diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index b92b6db..4030438 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -16,7 +16,7 @@ Provide any relevant links here.
## Testing and Verification
-Please describe the tests that you ran to verify your changes. Please also provide instructions, ROS packages, and Unity project files as appropriate so we can reproduce the test environment.
+Please describe the tests that you ran to verify your changes. Please also provide instructions, ROS packages, and Unity project files as appropriate so we can reproduce the test environment.
### Test Configuration:
- Unity Version: [e.g. Unity 2020.2.0f1]
diff --git a/.github/workflows/jira-link.yaml b/.github/workflows/jira-link.yaml
new file mode 100644
index 0000000..18240fb
--- /dev/null
+++ b/.github/workflows/jira-link.yaml
@@ -0,0 +1,22 @@
+name: jira-link
+
+on:
+ pull_request:
+ types: [opened, edited, reopened, synchronize]
+
+jobs:
+ jira-link:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: check pull request title and source branch name
+ run: |
+ echo "Checking pull request with title ${{ github.event.pull_request.title }} from source branch ${{ github.event.pull_request.head.ref }}"
+ if ! [[ "${{ github.event.pull_request.title }}" =~ ^AIRO-[0-9]+[[:space:]].*$ ]] && ! [[ "${{ github.event.pull_request.head.ref }}" =~ ^AIRO-[0-9]+.*$ ]]
+ then
+ echo -e "Please make sure one of the following is true:\n \
+ 1. the pull request title starts with 'AIRO-xxxx ', e.g. 'AIRO-1024 My Pull Request'\n \
+ 2. the source branch starts with 'AIRO-xxx', e.g. 'AIRO-1024-my-branch'"
+ exit 1
+ else
+ echo "Completed checking"
+ fi
diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml
new file mode 100644
index 0000000..4c4c3df
--- /dev/null
+++ b/.github/workflows/pre-commit.yaml
@@ -0,0 +1,20 @@
+name: pre-commit
+
+on:
+ pull_request:
+ push:
+ branches: [dev]
+
+jobs:
+ pre-commit:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions/setup-python@v2
+ with:
+ python-version: 3.7.x
+ - uses: actions/setup-dotnet@v1
+ with:
+ dotnet-version: '6.0.x'
+ include-prerelease: true
+ - uses: pre-commit/action@v2.0.0
diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml
new file mode 100644
index 0000000..a77c69e
--- /dev/null
+++ b/.github/workflows/stale.yaml
@@ -0,0 +1,27 @@
+name: 'Stale issue handler'
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: '0 17 * * *' # 17:00 UTC; 10:00 PDT
+
+permissions:
+ issues: write
+
+jobs:
+ stale:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/stale@v4.0.0
+ id: stale
+ with:
+ stale-issue-label: 'stale'
+ stale-issue-message: 'This issue has been marked stale because it has been open for 14 days with no activity. Please remove the stale label or comment on this issue, or the issue will be automatically closed in the next 14 days.'
+ days-before-stale: 14
+ days-before-pr-stale: -1
+ days-before-close: 14
+ days-before-pr-close: -1
+ exempt-issue-labels: 'blocked,must,should,keep,pinned,work-in-progress,request,announcement'
+ close-issue-message: 'This issue has been marked stale for 14 days and will now be closed. If this issue is still valid, please ping a maintainer.'
+ - name: Print outputs
+ run: echo ${{ join(steps.stale.outputs.*, ',') }}
+
\ No newline at end of file
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..045a3b3
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,33 @@
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.0.1
+ hooks:
+ - id: mixed-line-ending
+ exclude: >
+ (?x)^(
+ .*cs.meta|
+ .*.css|
+ .*.meta|
+ .*.mat|
+ .*.preset|
+ .*.lighting|
+ .*.dae
+ )$
+ args: [--fix=lf]
+
+ - id: trailing-whitespace
+ name: trailing-whitespace-markdown
+ types: [markdown]
+ - id: check-merge-conflict
+ args: [--assume-in-merge]
+ - id: check-yaml
+ # Won't handle the templating in yamato
+ exclude: \.yamato/.*
+
+
+ - repo: https://github.com/dotnet/format
+ rev: v5.1.225507
+ hooks:
+ - id: dotnet-format
+ entry: dotnet-format whitespace
+ args: [--folder, --include]
diff --git a/.yamato/sonar.yml b/.yamato/sonar.yml
new file mode 100644
index 0000000..1c08040
--- /dev/null
+++ b/.yamato/sonar.yml
@@ -0,0 +1,52 @@
+csharp:
+ name: Sonarqube C# Scan
+ agent:
+ type: Unity::metal::macmini
+ image: package-ci/mac
+ flavor: m1.mac
+ variables:
+ PROJECT_PATH: PoseEstimationDemoProject
+ SONARQUBE_PROJECT_KEY: ai-robotics-object-pose-estimation-csharp
+ SONARQUBE_PROJECT_BASE_DIR: /Users/bokken/build/output/Unity-Technologies/Robotics-Object-Pose-Estimation/PoseEstimationDemoProject
+ MSBUILD_SLN_PATH: ./PoseEstimationDemoProject/PoseEstimationDemoProject.sln
+ PROJECT_ROOT: /Users/bokken/build/output/Unity-Technologies/Robotics-Object-Pose-Estimation/
+ UNITY_VERSION: 2020.2.6f1
+ commands:
+ - npm install upm-ci-utils@stable -g --registry https://artifactory.prd.it.unity3d.com/artifactory/api/npm/upm-npm
+ - unity-downloader-cli --wait -u $UNITY_VERSION -c Editor
+ - brew install mono corretto
+ - curl https://github.com/SonarSource/sonar-scanner-msbuild/releases/download/5.2.1.31210/sonar-scanner-msbuild-5.2.1.31210-net46.zip -o sonar-scanner-msbuild-net46.zip -L
+ - unzip sonar-scanner-msbuild-net46.zip -d ~/sonar-scanner-msbuild
+ - chmod a+x ~/sonar-scanner-msbuild/sonar-scanner-4.6.1.2450/bin/sonar-scanner
+ - .Editor/Unity.app/Contents/MacOS/Unity -projectPath $PROJECT_PATH -batchmode -quit -nographics -logFile - -executeMethod "UnityEditor.SyncVS.SyncSolution"
+ - command: |
+ cd $PROJECT_PATH
+ for file in *.csproj; do sed -i.backup "s/^[[:blank:]]*false<\/ReferenceOutputAssembly>/true<\/ReferenceOutputAssembly>/g" $file; rm $file.backup; done
+ cd $PROJECT_ROOT
+ - mono ~/sonar-scanner-msbuild/SonarScanner.MSBuild.exe begin /k:$SONARQUBE_PROJECT_KEY /d:sonar.host.url=$SONARQUBE_ENDPOINT_URL_PRD /d:sonar.login=$SONARQUBE_TOKEN_PRD /d:sonar.projectBaseDir=$SONARQUBE_PROJECT_BASE_DIR
+ - msbuild $MSBUILD_SLN_PATH
+ - mono ~/sonar-scanner-msbuild/SonarScanner.MSBuild.exe end /d:sonar.login=$SONARQUBE_TOKEN_PRD
+ triggers:
+ cancel_old_ci: true
+ expression: |
+ ((pull_request.target eq "main" OR pull_request.target eq "dev")
+ AND NOT pull_request.push.changes.all match "**/*.md") OR
+ (push.branch eq "main" OR push.branch eq "dev")
+standard:
+ name: Sonarqube Standard Scan
+ agent:
+ type: Unity::metal::macmini
+ image: package-ci/mac
+ flavor: m1.mac
+ variables:
+ SONARQUBE_PROJECT_KEY: ai-robotics-object-pose-estimation-standard
+ commands:
+ - curl https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-4.6.2.2472-macosx.zip -o sonar-scanner-macosx.zip -L
+ - unzip sonar-scanner-macosx.zip -d ~/sonar-scanner
+ - ~/sonar-scanner/sonar-scanner-4.6.2.2472-macosx/bin/sonar-scanner -Dsonar.projectKey=$SONARQUBE_PROJECT_KEY -Dsonar.sources=. -Dsonar.host.url=$SONARQUBE_ENDPOINT_URL_PRD -Dsonar.login=$SONARQUBE_TOKEN_PRD
+ triggers:
+ cancel_old_ci: true
+ expression: |
+ ((pull_request.target eq "main" OR pull_request.target eq "dev")
+ AND NOT pull_request.push.changes.all match "**/*.md") OR
+ (push.branch eq "main" OR push.branch eq "dev")
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 014292c..2db74b8 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,7 +7,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) a
## Unreleased
### Upgrade Notes
-Add collision ignorance to the gripper inner knuckles and switch the PGS solver to the TGS solver
### Known Issues
@@ -19,5 +18,33 @@ Add collision ignorance to the gripper inner knuckles and switch the PGS solver
### Removed
+### Fixed
+
+## v0.0.2
+
+### Upgrade Notes
+
+Fixed CUDA-device support
+
+Add collision ignorance to the gripper inner knuckles and switch the PGS solver to the TGS solver
+
+### Known Issues
+
+### Added
+
+Added Sonarqube Scanner
+
+Add the [Close Stale Issues](https://github.com/marketplace/actions/close-stale-issues) action
+
+Added linter
+
+### Changed
+
+Linting and style fixes
+
+### Deprecated
+
+### Removed
+
### Fixed
Update key fetching from Ubuntu keyserver when building the ROS docker image
\ No newline at end of file
diff --git a/Documentation/0_ros_setup.md b/Documentation/0_ros_setup.md
index f1832cd..39f0a25 100644
--- a/Documentation/0_ros_setup.md
+++ b/Documentation/0_ros_setup.md
@@ -15,16 +15,16 @@ sudo pip3 install rospkg numpy jsonpickle scipy easydict torch==1.7.1+cu101 torc
```
> Note: If you encounter errors installing Pytorch via the above `pip3` command, try the following instead:
-> ```bash
+> ```bash
> sudo pip3 install rospkg numpy jsonpickle scipy easydict torch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
> ```
Most of the ROS setup has been provided via the `ur3_moveit` package. This section will describe the provided files.
-4. If you have not already built and sourced the ROS workspace since importing the new ROS packages, navigate to your ROS workplace, and run:
+4. If you have not already built and sourced the ROS workspace since importing the new ROS packages, navigate to your ROS workplace, and run:
-```bash
+```bash
catkin_make -DCATKIN_WHITELIST_PACKAGES="moveit_msgs;ros_tcp_endpoint;ur3_moveit;robotiq_2f_140_gripper_visualization;ur_description;ur_gazebo"
source devel/setup.bash
```
@@ -33,7 +33,7 @@ source devel/setup.bash
Ensure there are no unexpected errors.
-The ROS parameters will need to be set to your configuration in order to allow the server endpoint to fetch values for the TCP connection.
+The ROS parameters will need to be set to your configuration in order to allow the server endpoint to fetch values for the TCP connection.
5. Navigate to your ROS workspace (e.g. `~/catkin_ws`). Assign the ROS IP in the `params.yaml` file as follows:
diff --git a/Documentation/1_set_up_the_scene.md b/Documentation/1_set_up_the_scene.md
index d902910..a56d864 100644
--- a/Documentation/1_set_up_the_scene.md
+++ b/Documentation/1_set_up_the_scene.md
@@ -1,6 +1,6 @@
# Object Pose Estimation Tutorial: Part 1
-In this first part of the tutorial, we will start by downloading and installing the Unity Editor. We will install our project's dependencies: the Perception, URDF, and TCP Connector packages. We will then use a set of provided prefabs to easily prepare a simulated environment containing a table, a cube, and a working robot arm.
+In this first part of the tutorial, we will start by downloading and installing the Unity Editor. We will install our project's dependencies: the Perception, URDF, and TCP Connector packages. We will then use a set of provided prefabs to easily prepare a simulated environment containing a table, a cube, and a working robot arm.
**Table of Contents**
@@ -14,14 +14,14 @@ In this first part of the tutorial, we will start by downloading and installing
### Requirements
-To follow this tutorial you need to **clone** this repository even if you want to create your Unity project from scratch.
+To follow this tutorial you need to **clone** this repository even if you want to create your Unity project from scratch.
->Note: This project uses Git Submodules to grab the ROS package dependencies for the [`universal_robot`](https://github.com/ros-industrial/universal_robot), [`moveit_msgs`](https://github.com/ros-planning/moveit_msgs), [`ros_tcp_endpoint`](https://github.com/Unity-Technologies/ROS-TCP-Endpoint), and the [`robotiq`](https://github.com/JStech/robotiq/tree/noetic-mods) folders.
+>Note: This project uses Git Submodules to grab the ROS package dependencies for the [`universal_robot`](https://github.com/ros-industrial/universal_robot), [`moveit_msgs`](https://github.com/ros-planning/moveit_msgs), [`ros_tcp_endpoint`](https://github.com/Unity-Technologies/ROS-TCP-Endpoint), and the [`robotiq`](https://github.com/JStech/robotiq/tree/noetic-mods) folders.
>Note: The [`ros-industrial/robotiq`](https://github.com/ros-industrial/robotiq) repository does not currently support ROS Noetic. The [`JSTech/robotiq#noetic-mods`](https://github.com/JStech/robotiq/tree/noetic-mods) fork, which has been updated to use ROS Noetic, is used instead.
-1. Open a terminal and navigate to the folder where you want to host the repository.
+1. Open a terminal and navigate to the folder where you want to host the repository.
```bash
git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation.git
```
@@ -29,7 +29,7 @@ git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Ob
2. [Install Unity `2020.2.*`.](install_unity.md)
-### Create a New Project
+### Create a New Project
When you first run Unity, you will be asked to open an existing project, or create a new one.
1. Open Unity and create a new project using the **Universal Render Pipeline**. Name your new project _**Pose Estimation Tutorial**_, and specify a desired location as shown below.
@@ -53,10 +53,10 @@ We will need to download and install several packages. In general, packages can
- From the top menu bar, open _**Window**_ -> _**Package Manager**_. As the name suggests, the _**Package Manager**_ is where you can download new packages, update or remove existing ones, and access a variety of information and additional actions for each package.
-- Click on the _**+**_ sign at the top-left corner of the _**Package Manager**_ window and then choose the option _**Add package from git URL...**_.
+- Click on the _**+**_ sign at the top-left corner of the _**Package Manager**_ window and then choose the option _**Add package from git URL...**_.
+
+- Enter the package address and click _**Add**_.
-- Enter the package address and click _**Add**_.
-
It can take a few minutes for the manager to download and import packages.
@@ -68,13 +68,13 @@ It can take a few minutes for the manager to download and import packages.
Install the following packages with the provided git URLs:
1. [Perception package](https://github.com/Unity-Technologies/com.unity.perception) - `com.unity.perception@0.8.0-preview.3`
- * This will help us collect training data for our machine learning model.
+ * This will help us collect training data for our machine learning model.
2. [URDF Importer package](https://github.com/Unity-Technologies/URDF-Importer) - `https://github.com/Unity-Technologies/URDF-Importer.git?path=/com.unity.robotics.urdf-importer#v0.2.0-light`
* This package will help us import a robot into our scene from a file in the [Unified Robot Description Format (URDF)](http://wiki.ros.org/urdf).
3. [TCP Connector package](https://github.com/Unity-Technologies/ROS-TCP-Connector) - `https://github.com/Unity-Technologies/ROS-TCP-Connector.git?path=/com.unity.robotics.ros-tcp-connector#v0.2.0-light`
- * This package will enable a connection between ROS and Unity.
+ * This package will enable a connection between ROS and Unity.
>Note: If you encounter a Package Manager issue, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
@@ -107,7 +107,7 @@ The Perception package relies on a "Ground Truth Renderer Feature" to output lab
#### The Scene
Simply put in Unity, a Scene contains any object that exists in the world. This world can be a game, or in this case, a data-collection-oriented simulation. Every new project contains a Scene named `SampleScene`, which is automatically opened when the project is created. This Scene comes with several objects and settings that we do not need, so let's create a new one.
-1. In the _**Project**_ tab, right-click on the `Assets/Scenes` folder and click _**Create -> Scene**_. Name this new Scene `TutorialPoseEstimation` and double-click on it to open it.
+1. In the _**Project**_ tab, right-click on the `Assets/Scenes` folder and click _**Create -> Scene**_. Name this new Scene `TutorialPoseEstimation` and double-click on it to open it.
The _**Hierarchy**_ tab of the editor displays all the Scenes currently loaded, and all the objects currently present in each loaded Scene, as shown below:
@@ -116,33 +116,33 @@ The _**Hierarchy**_ tab of the editor displays all the Scenes currently loaded,
As seen above, the new Scene already contains a camera (`Main Camera`) and a light (`Directional Light`). We will now modify the camera's field of view and position to prepare it for the tutorial.
-2. Still in the _**Inspector**_ tab of the `Main Camera`, modify the camera's `Position` and `Rotation` to match the values shown below. This orients the camera so that it will have a good view of the objects we are about to add to the scene.
+2. Still in the _**Inspector**_ tab of the `Main Camera`, modify the camera's `Position` and `Rotation` to match the values shown below. This orients the camera so that it will have a good view of the objects we are about to add to the scene.
-3. Click on `Directional Light` and in the _**Inspector**_ tab, modify the light's `Position` and `Rotation` to match the screenshot below.
+3. Click on `Directional Light` and in the _**Inspector**_ tab, modify the light's `Position` and `Rotation` to match the screenshot below.
#### Adding Tutorial Files
-Now it is time to add some more objects to our scene. Before doing so, we need to import some folders containing the required assets.
+Now it is time to add some more objects to our scene. Before doing so, we need to import some folders containing the required assets.
4. Download [TutorialAssets.zip](https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation/releases/download/v0.0.1/TutorialAssets.zip), and unzip it. It should contain the following subfolders: `Materials`, `Prefabs`, `RosMessages`, `Scripts`, `URDFs`.
-5. Drag and Drop the `TutorialAssets` folder from your operating system's file explorer onto the `Assets` folder in the _**Project**_ tab of the editor.
+5. Drag and Drop the `TutorialAssets` folder from your operating system's file explorer onto the `Assets` folder in the _**Project**_ tab of the editor.
-Your `Assets` folder should like this:
+Your `Assets` folder should like this:
#### Using Prefabs
-Unity’s [Prefab](https://docs.unity3d.com/2020.2/Documentation/Manual/Prefabs.html) system allows you to create, configure, and store a GameObject complete with all its components, property values, and child GameObjects as a reusable Unity Asset. It is a convenient way to store complex objects.
+Unity’s [Prefab](https://docs.unity3d.com/2020.2/Documentation/Manual/Prefabs.html) system allows you to create, configure, and store a GameObject complete with all its components, property values, and child GameObjects as a reusable Unity Asset. It is a convenient way to store complex objects.
A Prefab is just a file, and you can easily create an instance of the object in the scene from a Prefab by dragging it into the _**Hierarchy**_ tab.
@@ -150,7 +150,7 @@ For your convenience, we have provided Prefabs for most of the components of the
6. In the _**Project**_ tab, go to `Assets/TutorialAssets/Prefabs/Part1` and drag and drop the `Cube` Prefab into the _**Hierarchy**_ tab.
-7. Repeat the above action with the `Goal`, `Table` and `Floor` Prefabs.
+7. Repeat the above action with the `Goal`, `Table` and `Floor` Prefabs.
@@ -161,9 +161,9 @@ For your convenience, we have provided Prefabs for most of the components of the
#### Importing the Robot
-Finally we will add the robot and the URDF files in order to import the UR3 Robot.
+Finally we will add the robot and the URDF files in order to import the UR3 Robot.
-8. In the _**Project**_ tab, go to `Assets/TutorialAssets/URDFs/ur3_with_gripper` and right click on the `ur3_with_gripper.urdf` file and select `Import Robot From Selected URDF file`. A window will pop up, keep the default **Y Axis** type and `VHACD` **Mesh Decomposer** in the Import menu. Then, click Import URDF. These actions are shown in the video below.
+8. In the _**Project**_ tab, go to `Assets/TutorialAssets/URDFs/ur3_with_gripper` and right click on the `ur3_with_gripper.urdf` file and select `Import Robot From Selected URDF file`. A window will pop up, keep the default **Y Axis** type and `VHACD` **Mesh Decomposer** in the Import menu. Then, click Import URDF. These actions are shown in the video below.
>Note: Unity uses a left-handed coordinate system in which the y-axis points up. However, many robotics packages use a right-handed coordinate system in which the z-axis or x-axis point up. For this reason, it is important to pay attention to the coordinate system when importing URDF files or interfacing with other robotics software.
diff --git a/Documentation/2_set_up_the_data_collection_scene.md b/Documentation/2_set_up_the_data_collection_scene.md
index bdfbe20..5df3cc1 100644
--- a/Documentation/2_set_up_the_data_collection_scene.md
+++ b/Documentation/2_set_up_the_data_collection_scene.md
@@ -5,9 +5,9 @@ In [Part 1](1_set_up_the_scene.md) of the tutorial, we learned:
* How to use the Package Manager to download and install Unity packages
* How to move and rotate objects in the Scene
* How to instantiate GameObjects with Prefabs
-* How to import a robot from a URDF file
-
-You should now have a table, a cube, a camera, and a robot arm in your Scene. In this part we will prepare the Scene for data collection with the Perception package.
+* How to import a robot from a URDF file
+
+You should now have a table, a cube, a camera, and a robot arm in your Scene. In this part we will prepare the Scene for data collection with the Perception package.
@@ -24,13 +24,13 @@ You should now have a table, a cube, a camera, and a robot arm in your Scene. In
The images you generate to train your deep learning model and the images you later use for inference during the pick-and-place task will need to have the same resolution. We will now set this resolution.
-1. In the ***Game*** view, click on the dropdown menu in front of `Display 1`. Then, click **+** to create a new preset. Make sure `Type` is set to `Fixed Resolution`. Set `Width` to `650` and `Height` to `400`. The gif below depicts these actions.
+1. In the ***Game*** view, click on the dropdown menu in front of `Display 1`. Then, click **+** to create a new preset. Make sure `Type` is set to `Fixed Resolution`. Set `Width` to `650` and `Height` to `400`. The gif below depicts these actions.
-We now need to add a few components to our camera in order to equip it for synthetic data generation.
+We now need to add a few components to our camera in order to equip it for synthetic data generation.
2. Select the `Main Camera` GameObject in the _**Hierarchy**_ tab and in the _**Inspector**_ tab, click on _**Add Component**_.
@@ -40,11 +40,11 @@ We now need to add a few components to our camera in order to equip it for synth
5. From the top menu bar of the editor, go to `Edit > Project Settings > Editor` and uncheck `Asynchronous Shader Compilation` under `Shader Compilation` options.
-In the ***Inspector*** view for the `Perception Camera` component, you can see an empty list (`List is Empty`). This is the list of Labelers. For each type of ground-truth you wish to generate alongside your captured frames, you will need to add a corresponding Labeler to this list. In our project we want to extract the position and orientation of an object, so we will use the `BoundingBox3DLabeler`.
+In the ***Inspector*** view for the `Perception Camera` component, you can see an empty list (`List is Empty`). This is the list of Labelers. For each type of ground-truth you wish to generate alongside your captured frames, you will need to add a corresponding Labeler to this list. In our project we want to extract the position and orientation of an object, so we will use the `BoundingBox3DLabeler`.
There are several other types of Labelers available, and you can even write your own. If you want more information on Labelers, you can consult the [Perception package documentation](https://github.com/Unity-Technologies/com.unity.perception).
-6. In the _**Inspector**_ view of the `Perception Camera` component, click on the _**+**_ button at the bottom right corner of the `List is Empty` field, and select `BoundingBox3DLabeler`.
+6. In the _**Inspector**_ view of the `Perception Camera` component, click on the _**+**_ button at the bottom right corner of the `List is Empty` field, and select `BoundingBox3DLabeler`.
This Labeler will annotate the captured output with 3D bounding boxes of GameObjects in the Scene that are labelled. If the `Perception Camera`'s `Show Labeler Visualizations` option is enabled, these bounding boxes will also be visualized in real-time in the ***Scene*** view as data is generated. We will next learn how to set up this Labeler.
@@ -59,7 +59,7 @@ Once you add the Labeler, the ***Inspector*** view of the `Perception Camera` co
Our work above prepares us to collect RGB images from the camera and associated 3D bounding boxes for objects in our Scene. However, we still need to specify _which_ objects we'd like to collect poses for using the Labeler we added. In this tutorial, we will only collect the pose of the cube, but you can add more objects if you'd like.
-You will notice that the `BoundingBox3DLabeler` component has a field named `Id Label Config`. The label configuration we link here will determine which objects' poses get saved in our dataset.
+You will notice that the `BoundingBox3DLabeler` component has a field named `Id Label Config`. The label configuration we link here will determine which objects' poses get saved in our dataset.
1. In the _**Project**_ tab, right-click the `Assets` folder, then click `Create -> Perception -> Id Label Config`.
@@ -67,7 +67,7 @@ This will create a new asset file named `IdLabelConfig` inside the `Assets` fold
This type of label configuration includes a list of labels, each with a numerical ID. By assigning this configuration to a Labeler, we tell the Labeler to only capture objects that carry labels that are included in the configuration's list of labels, and ignore the rest of the objects in the Scene. We will now assign this configuration the `BoundingBox3DLabeler` we just added to the `Perception Camera` component.
-2. Select the `Main Camera` object from the _**Hierarchy**_ tab, and in the _**Inspector**_ tab, assign the newly created `IdLabelConfig` asset to the `Id Label Config` field. To do so, you can either drag and drop the former into the corresponding field of the Labeler, or click on the small circular button in front of the `Id Label Config` field, which brings up an asset selection window filtered to only show compatible assets.
+2. Select the `Main Camera` object from the _**Hierarchy**_ tab, and in the _**Inspector**_ tab, assign the newly created `IdLabelConfig` asset to the `Id Label Config` field. To do so, you can either drag and drop the former into the corresponding field of the Labeler, or click on the small circular button in front of the `Id Label Config` field, which brings up an asset selection window filtered to only show compatible assets.
The `Perception Camera` component will now look like the image below:
@@ -75,13 +75,13 @@ The `Perception Camera` component will now look like the image below:
-Now we need to assign a label to the `Cube` object, and add the same label to `IdLabelConfig`, since it is the pose of the cube we wish to collect.
+Now we need to assign a label to the `Cube` object, and add the same label to `IdLabelConfig`, since it is the pose of the cube we wish to collect.
3. Select the `Cube` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button.
-4. Start typing `Labeling` in the search bar that appears, until the `Labeling` script is found, with a **#** icon to the left. Click on this script.
+4. Start typing `Labeling` in the search bar that appears, until the `Labeling` script is found, with a **#** icon to the left. Click on this script.
-5. In the UI that appears, click the **Add New Label** button and change `New Label` to `cube_position`. Then, click on `Add to Label Config...`, and below `Other Label Configs in Project` there should be `IdLabelConfig`. Click on `Add Label` and then close the window.
+5. In the UI that appears, click the **Add New Label** button and change `New Label` to `cube_position`. Then, click on `Add to Label Config...`, and below `Other Label Configs in Project` there should be `IdLabelConfig`. Click on `Add Label` and then close the window.
The `cube_position` label is now added to both the `Cube` object and the `IdLabelConfig` label configuration.
@@ -95,7 +95,7 @@ The _**Inspector**_ view of the `Cube` should look like the following:
### Add and Set Up Randomizers
#### Domain Randomization
-We will be collecting training data from a simulation, but most real perception use-cases occur in the real world.
+We will be collecting training data from a simulation, but most real perception use-cases occur in the real world.
To train a model to be robust enough to generalize to the real domain, we rely on a technique called [Domain Randomization](https://arxiv.org/pdf/1703.06907.pdf). Instead of training a model in a single, fixed environment, we _randomize_ aspects of the environment during training in order to introduce sufficient variation into the generated data. This forces the machine learning model to handle many small visual variations, making it more robust.
In this tutorial, we will randomize the position and the orientation of the cube on the table, and also the color, intensity, and position of the light. Note that the Randomizers in the Perception package can be extended to many other aspects of the environment as well.
@@ -106,21 +106,21 @@ To start randomizing your simulation, you will first need to add a **Scenario**
1. In the _**Hierarchy**_, click the **+** button and select `Create Empty`. Rename the newly created GameObject `Simulation Scenario`.
-2. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button. Start typing `Pose Estimation Scenario` in the search bar that appears, until the `Pose Estimation Scenario` script is found, with a **#** icon to the left. Click on the script.
-
+2. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button. Start typing `Pose Estimation Scenario` in the search bar that appears, until the `Pose Estimation Scenario` script is found, with a **#** icon to the left. Click on the script.
+
3. Still in the _**Inspector**_ tab of the `Simulation Scenario` GameObject, ensure the `Automatic Iteration` flag is enabled.
-Each Scenario executes a number of Iterations, and each Iteration carries on for a number of frames. These are timing elements you can leverage in order to customize your Scenarios and the timing of your randomizations.
+Each Scenario executes a number of Iterations, and each Iteration carries on for a number of frames. These are timing elements you can leverage in order to customize your Scenarios and the timing of your randomizations.
#### Writing our Custom Object Rotation Randomizer
The randomization workflow involves two types of C# classes: Randomizers and RandomizerTags. Randomizers are added to the Scenario and perform the actual randomization tasks, while RandomizerTags are attached to objects in the scene, so that Randomizers know which objects to target. One Randomizer can target many different RandomizerTags.
-First, we will write a Randomizer to randomly rotate the cube around its y-axis on each Iteration of the Scenario.
+First, we will write a Randomizer to randomly rotate the cube around its y-axis on each Iteration of the Scenario.
4. In the _**Project**_ tab, right-click on the **Scripts** folder and select `Create -> C# Script`. Name your new script file `YRotationRandomizer`.
@@ -163,15 +163,15 @@ public class YRotationRandomizer : Randomizer
```
-The purpose of this piece of code is to rotate a set of objects randomly about their y-axes every Iteration. In Unity, the y-axis points "up".
+The purpose of this piece of code is to rotate a set of objects randomly about their y-axes every Iteration. In Unity, the y-axis points "up".
>Note: If you look at the ***Console*** tab of the editor now, you will see an error regarding `YRotationRandomizerTag` not being found. This is to be expected, since we have not yet created this class; the error will go away once we create the class later.
Let's go through the code above and understand each part:
-* Near the top, you'll notice the line `[AddRandomizerMenu("Perception/Y Rotation Randomizer")]`. This will give the Randomizer a name in the UI which will be used when we add the Randomizer to our `Pose Estimation Scenario`.
+* Near the top, you'll notice the line `[AddRandomizerMenu("Perception/Y Rotation Randomizer")]`. This will give the Randomizer a name in the UI which will be used when we add the Randomizer to our `Pose Estimation Scenario`.
* The `YRotationRandomizer` class extends `Randomizer`, which is the base class for all Randomizers that can be added to a Scenario. This base class provides a plethora of useful functions and properties that can help catalyze the process of creating new Randomizers.
-* The `FloatParameter` field contains a seeded random number generator. We can set the sampling range and the distribution of this value in the editor UI for the Randomizer.
-* The `OnIterationStart()` function is a life-cycle method on all `Randomizer`s. It is called by the Scenario every Iteration (e.g. once per frame, if each Iteration runs for one frame).
+* The `FloatParameter` field contains a seeded random number generator. We can set the sampling range and the distribution of this value in the editor UI for the Randomizer.
+* The `OnIterationStart()` function is a life-cycle method on all `Randomizer`s. It is called by the Scenario every Iteration (e.g. once per frame, if each Iteration runs for one frame).
* The `tagManager` is an object available to every `Randomizer` which helps us find GameObjects tagged with a given `RandomizerTag`. In our case, we query the `tagManager` to gather references to all the `YRotationRandomizerTag`s currently present in the Scene.
* We then loop through these `tags` to rotate the object that each tag is attached to:
* `rotationRange.Sample()` gives us a random float in the specified range.
@@ -199,7 +199,7 @@ public class YRotationRandomizerTag : RandomizerTag
}
```
-The `Start` method is automatically called once, at runtime, before the first frame. Here, we use the `Start` method to save this object's original rotation in a variable. When `SetYRotation` is called by the Randomizer every Iteration, it updates the rotation around the y-axis, but keeps the x and z components of the rotation the same.
+The `Start` method is automatically called once, at runtime, before the first frame. Here, we use the `Start` method to save this object's original rotation in a variable. When `SetYRotation` is called by the Randomizer every Iteration, it updates the rotation around the y-axis, but keeps the x and z components of the rotation the same.
#### Adding our Custom Object Rotation Randomizer
@@ -212,7 +212,7 @@ If you return to your list of Randomizers in the Inspector view of `Simulation S
-10. Select the `Cube` GameObject and in the _**Inspector**_ tab, add a `YRotationRandomizerTag` component.
+10. Select the `Cube` GameObject and in the _**Inspector**_ tab, add a `YRotationRandomizerTag` component.
@@ -227,23 +227,23 @@ If you return to your list of Randomizers in the Inspector view of `Simulation S
#### Randomizing Object Positions
-It is great that we can now rotate the cube, but we also want to move it around the table. However, not all positions on the table are valid - we also need it to be within the robot arm's reach.
+It is great that we can now rotate the cube, but we also want to move it around the table. However, not all positions on the table are valid - we also need it to be within the robot arm's reach.
-To save time, we have provided a pre-written custom Randomizer to do this.
+To save time, we have provided a pre-written custom Randomizer to do this.
12. Select the `Simulation Scenario` GameObject, and do the following:
- * In the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, click `Add Randomizer` and start typing `RobotArmObjectPositionRandomizer`.
- * Set `Min Robot Reachability` to `0.2` and `Max Robot Reachability` to `0.4`.
- * On the `Plane` field, click on the circular button to the right side and start typing `ObjectPlacementPlane` and then double click on the GameObject that appears.
+ * In the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, click `Add Randomizer` and start typing `RobotArmObjectPositionRandomizer`.
+ * Set `Min Robot Reachability` to `0.2` and `Max Robot Reachability` to `0.4`.
+ * On the `Plane` field, click on the circular button to the right side and start typing `ObjectPlacementPlane` and then double click on the GameObject that appears.
* Drag and drop the base of the robot from the ***Hierarchy*** (the `ur3_with_gripper/world/base_link/base` object) to the `Robot Base` field.
-13. Now we need to add the corresponding RandomizerTag to the cube.
- * Select the `Cube` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button. Start typing `RobotArmObjectPositionRandomizerTag` in the search bar that appears, until the `RobotArmObjectPositionRandomizerTag` script is found, with a **#** icon to the left. Click on the script.
- * In the UI for this new component, enable the `Must Be Reachable` property.
+13. Now we need to add the corresponding RandomizerTag to the cube.
+ * Select the `Cube` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button. Start typing `RobotArmObjectPositionRandomizerTag` in the search bar that appears, until the `RobotArmObjectPositionRandomizerTag` script is found, with a **#** icon to the left. Click on the script.
+ * In the UI for this new component, enable the `Must Be Reachable` property.
The `RobotArmObjectPositionRandomizerTag` component should now look like this:
@@ -259,15 +259,15 @@ If you press **▷** (play) now, you should see the `Cube` and `Goal` objects mo
#### Light Randomizer
-Now we will add another Randomizer to introduce some variation into the Scene's lighting.
+Now we will add another Randomizer to introduce some variation into the Scene's lighting.
-14. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, click on `Add Randomizer` and start typing `LightRandomizer`.
- * For the range parameter of `Light Intensity Parameter`, set `Min` to `0.9` and `Max` to `1.1`.
- * For the range parameter of `Rotation X`, set `Min` to `40` and `Max` to `80`.
- * For the range parameter of `Rotation Y`, set `Min` to `-180` and `Max` to `180`.
- * For the range parameters of `Red`, `Green` and `Blue` inside of `Light Color Parameter`, set `Min` to `0.5`.
-
-The Randomizer should now look like this:
+14. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, click on `Add Randomizer` and start typing `LightRandomizer`.
+ * For the range parameter of `Light Intensity Parameter`, set `Min` to `0.9` and `Max` to `1.1`.
+ * For the range parameter of `Rotation X`, set `Min` to `40` and `Max` to `80`.
+ * For the range parameter of `Rotation Y`, set `Min` to `-180` and `Max` to `180`.
+ * For the range parameters of `Red`, `Green` and `Blue` inside of `Light Color Parameter`, set `Min` to `0.5`.
+
+The Randomizer should now look like this:
@@ -275,7 +275,7 @@ The Randomizer should now look like this:
15. Now we need to add a RandomizerTag to the light. Select the `Directional Light` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button. Start typing `LightRandomizerTag` in the search bar that appears, until the `LightRandomizerTag` script is found, with a **#** icon to the left. Click the script.
-To view this script, you can right click on the three dots at the right end and select `Edit Script`.
+To view this script, you can right click on the three dots at the right end and select `Edit Script`.
This Randomizer is a bit different from the previous ones. The line `[RequireComponent(typeof(Light))]` makes it so that you can only add the `LightRandomizerTag` component to an object that already has a **Light** component attached. This way, the Randomizers that query for this tag can be confident that the found objects have a **Light** component.
If you press play, you should see that the color, direction, and intensity of the lighting now change with each frame.
diff --git a/Documentation/3_data_collection_model_training.md b/Documentation/3_data_collection_model_training.md
index 762e165..7e6f06d 100644
--- a/Documentation/3_data_collection_model_training.md
+++ b/Documentation/3_data_collection_model_training.md
@@ -5,8 +5,8 @@ In [Part 1](1_set_up_the_scene.md) of the tutorial, we learned how to create our
In [Part 2](2_set_up_the_data_collection_scene.md) of the tutorial, we learned:
* How to equip the camera for the data collection
* How to set up labelling and label configurations
-* How to create your own Randomizer
-* How to add our custom Randomizer
+* How to create your own Randomizer
+* How to add our custom Randomizer
In this part, we will be collecting a large dataset of RGB images of the Scene, and the corresponding pose of the cube. We will then use this data to train a machine learning model to predict the cube's position and rotation from images taken by our camera. We will then be ready to use the trained model for our pick-and-place task in [Part 4](4_pick_and_place.md).
@@ -23,11 +23,11 @@ Steps included in this part of the tutorial:
Now it is time to collect the data: a set of images with the corresponding position and orientation of the cube relative to the camera.
-We need to collect data for the training process and data for the validation one.
+We need to collect data for the training process and data for the validation one.
-We have chosen a training dataset of 30,000 images and a validation dataset of 3,000 images.
+We have chosen a training dataset of 30,000 images and a validation dataset of 3,000 images.
-1. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, make sure `Automatic Iteration` is enabled. When this flag is enabled, our Scenario automatically proceeds through Iterations, triggering the `OnIterationStart()` method of all Randomizers on each Iteration. When this flag is disabled, the Iterations would have to be triggered manually.
+1. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, make sure `Automatic Iteration` is enabled. When this flag is enabled, our Scenario automatically proceeds through Iterations, triggering the `OnIterationStart()` method of all Randomizers on each Iteration. When this flag is disabled, the Iterations would have to be triggered manually.
2. In the ***Inspector*** view of `Pose Estimation Scenario`, set the `Total Frames` field under `Constants` to 30000.
@@ -41,24 +41,24 @@ We have chosen a training dataset of 30,000 images and a validation dataset of 3
5. Click _**Show Folder**_ to show and highlight the folder in your operating system's file explorer.
-6. Change this folder's name to `UR3_single_cube_training`.
+6. Change this folder's name to `UR3_single_cube_training`.
7. Enter the folder
-You should then see something similar to this:
+You should then see something similar to this:
-Now we need to collect the validation dataset.
+Now we need to collect the validation dataset.
8. Back in Unity Editor, Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, in `Pose Estimation Scenario`, set the `Total Frames` field under `Constants` to 3000.
9. Press play and wait until the simulation is done. Once the simulation finishes, follow the same steps as before to navigate to the output folder.
-10. Change the folder name where the latest data was saved to `UR3_single_cube_validation`.
+10. Change the folder name where the latest data was saved to `UR3_single_cube_validation`.
-11. **(Optional)**: Move the `UR3_single_cube_training` and `UR3_single_cube_validation` folders to a directory of your choice.
+11. **(Optional)**: Move the `UR3_single_cube_training` and `UR3_single_cube_validation` folders to a directory of your choice.
## Train the Deep Learning Model
@@ -70,14 +70,14 @@ This step can take a long time if your computer doesn't have GPU support (~5 day
### Requirements
-We support two approaches for running the model: Docker (which can run anywhere) or locally with Conda.
+We support two approaches for running the model: Docker (which can run anywhere) or locally with Conda.
#### Option A: Using Docker
If you would like to run using Docker, you can follow the [Docker steps provided](../Model/documentation/running_on_docker.md) in the model documentation.
-#### Option B: Using Conda
-To run this project locally, you will need to install [Anaconda](https://docs.anaconda.com/anaconda/install/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html).
+#### Option B: Using Conda
+To run this project locally, you will need to install [Anaconda](https://docs.anaconda.com/anaconda/install/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html).
If running locally without Docker, we first need to create a Conda virtual environment and install the dependencies for our machine learning model. If you only have access to CPUs, install the dependencies specified in the `environment.yml` file. If your development machine has GPU support, you can choose to use the `environment-gpu.yml` file instead.
@@ -95,7 +95,7 @@ conda activate
### Updating the Model Config
-At the top of the [cli.py](../Model/pose_estimation/cli.py) file in the model code, you can see the documentation for all supported commands. Since typing these in can be laborious, we use a [config.yaml](../Model/config.yaml) file to feed in all these arguments. You can still use the command line arguments if you want - they will override the config.
+At the top of the [cli.py](../Model/pose_estimation/cli.py) file in the model code, you can see the documentation for all supported commands. Since typing these in can be laborious, we use a [config.yaml](../Model/config.yaml) file to feed in all these arguments. You can still use the command line arguments if you want - they will override the config.
There are a few settings specific to your setup that you'll need to change.
@@ -106,7 +106,7 @@ First, we need to specify the path to the folders where your training and valida
data_root: /Users//Documents/data
```
-Second, we need to modify the location where the model is going to be saved:
+Second, we need to modify the location where the model is going to be saved:
5. In the [config.yaml](../Model/config.yaml), under `system`, you need to set the argument `log_dir_system` to the full path of the output folder where your model's results will be saved. For example, I created a new directory called `models` in my Documents, and then set the following:
```bash
@@ -115,11 +115,11 @@ log_dir_system: /Users//Documents/models
### Training the model
-6. If you are not already in the `Robotics-Object-Pose-Estimation/Model` directory, navigate there.
+6. If you are not already in the `Robotics-Object-Pose-Estimation/Model` directory, navigate there.
-7. Enter the following command to start training:
-```bash
-python -m pose_estimation.cli train
+7. Enter the following command to start training:
+```bash
+python -m pose_estimation.cli train
```
>Note (Optional): If you want to override certain training hyperparameters, you can do so with additional arguments on the above command. See the documentation at the top of [cli.py](../Model/pose_estimation/cli.py) for a full list of supported arguments.
@@ -130,7 +130,7 @@ python -m pose_estimation.cli train
If you'd like to examine the results of your training run in more detail, see our guide on [viewing the Tensorboard logs](../Model/documentation/tensorboard.md).
### Evaluating the Model
-Once training has completed, we can also run our model on our validation dataset to measure its performance on data it has never seen before.
+Once training has completed, we can also run our model on our validation dataset to measure its performance on data it has never seen before.
However, first we need to specify a few settings in our config file.
@@ -138,9 +138,9 @@ However, first we need to specify a few settings in our config file.
9. If you are not already in the `Robotics-Object-Pose-Estimation/Model` directory, navigate there.
-10. To start the evaluation run, enter the following command:
-```bash
-python -m pose_estimation.cli evaluate
+10. To start the evaluation run, enter the following command:
+```bash
+python -m pose_estimation.cli evaluate
```
>Note (Optional): To override additional settings on your evaluation run, you can tag on additional arguments to the command above. See the documentation in [cli.py](../Model/pose_estimation/cli.py) for more details.
@@ -151,6 +151,6 @@ python -m pose_estimation.cli evaluate
### Proceed to [Part 4](4_pick_and_place.md).
-###
+###
### Go back to [Part 2](2_set_up_the_data_collection_scene.md)
diff --git a/Documentation/4_pick_and_place.md b/Documentation/4_pick_and_place.md
index e045fde..96b3193 100644
--- a/Documentation/4_pick_and_place.md
+++ b/Documentation/4_pick_and_place.md
@@ -2,11 +2,11 @@
In [Part 1](1_set_up_the_scene.md) of the tutorial, we learned how to create our Scene in Unity Editor. In [Part 2](2_set_up_the_data_collection_scene.md), we set up the Scene for data collection.
-In [Part 3](3_data_collection_model_training.md) we have learned:
-* How to collect the data
+In [Part 3](3_data_collection_model_training.md) we have learned:
+* How to collect the data
* How to train the deep learning model
-In this part, we will use our trained deep learning model to predict the pose of the cube, and pick it up with our robot arm.
+In this part, we will use our trained deep learning model to predict the pose of the cube, and pick it up with our robot arm.
@@ -22,15 +22,15 @@ In this part, we will use our trained deep learning model to predict the pose of
---
### Set up
-If you have correctly followed parts 1 and 2, whether or not you choose to use the Unity project given by us or start it from scratch, you should have cloned the repository.
+If you have correctly followed parts 1 and 2, whether or not you choose to use the Unity project given by us or start it from scratch, you should have cloned the repository.
->Note: This project uses Git Submodules to grab the ROS package dependencies for the [`universal_robot`](https://github.com/ros-industrial/universal_robot), [`moveit_msgs`](https://github.com/ros-planning/moveit_msgs), [`ros_tcp_endpoint`](https://github.com/Unity-Technologies/ROS-TCP-Endpoint), and the [`robotiq`](https://github.com/JStech/robotiq/tree/noetic-mods)) folders. If you cloned the project and forgot to use `--recurse-submodules`, or if any submodule in this directory doesn't have content (e.g. moveit_msgs or ros_tcp_endpoint), you can run the following command to grab the Git submodules.
+>Note: This project uses Git Submodules to grab the ROS package dependencies for the [`universal_robot`](https://github.com/ros-industrial/universal_robot), [`moveit_msgs`](https://github.com/ros-planning/moveit_msgs), [`ros_tcp_endpoint`](https://github.com/Unity-Technologies/ROS-TCP-Endpoint), and the [`robotiq`](https://github.com/JStech/robotiq/tree/noetic-mods)) folders. If you cloned the project and forgot to use `--recurse-submodules`, or if any submodule in this directory doesn't have content (e.g. moveit_msgs or ros_tcp_endpoint), you can run the following command to grab the Git submodules.
> ```bash
> cd /PATH/TO/Robotics-Object-Pose-Estimation &&
-> git submodule update --init --recursive
+> git submodule update --init --recursive
> ```
-In your ROS/src folder, you should now have five subdirectories: `moveit_msgs`, `robotiq`, `ros_tcp_endpoint`, `universal_robot` and `ur3_moveit`.
+In your ROS/src folder, you should now have five subdirectories: `moveit_msgs`, `robotiq`, `ros_tcp_endpoint`, `universal_robot` and `ur3_moveit`.
### Add the Pose Estimation Model
@@ -56,7 +56,7 @@ The provided ROS files require the following packages to be installed. The follo
Building this Docker container will install the necessary packages for this tutorial.
-1. Install the [Docker Engine](https://docs.docker.com/engine/install/) if not already installed. Start the Docker daemon. To check if the Docker daemon is running, when you open you Docker application you should see something similar to the following (green dot on the bottom left corner with the word running at the foot of Docker):
+1. Install the [Docker Engine](https://docs.docker.com/engine/install/) if not already installed. Start the Docker daemon. To check if the Docker daemon is running, when you open you Docker application you should see something similar to the following (green dot on the bottom left corner with the word running at the foot of Docker):
@@ -71,17 +71,17 @@ docker build -t unity-robotics:pose-estimation -f docker/Dockerfile .
>Note: The provided Dockerfile uses the [ROS Noetic base Image](https://hub.docker.com/_/ros/). Building the image will install the necessary packages as well as copy the [provided ROS packages and submodules](../ROS/) to the container, predownload and cache the [VGG16 model](https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16), and build the catkin workspace.
-3. Start the newly built Docker container:
+3. Start the newly built Docker container:
```docker
docker run -it --rm -p 10000:10000 -p 5005:5005 unity-robotics:pose-estimation /bin/bash
```
-When this is complete, it will print: `Successfully tagged unity-robotics:pose-estimation`. This console should open into a bash shell at the ROS workspace root, e.g. `root@8d88ed579657:/catkin_ws#`.
+When this is complete, it will print: `Successfully tagged unity-robotics:pose-estimation`. This console should open into a bash shell at the ROS workspace root, e.g. `root@8d88ed579657:/catkin_ws#`.
>Note: If you encounter issues with Docker, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
-4. Source your ROS workspace:
+4. Source your ROS workspace:
```bash
source devel/setup.bash
@@ -89,7 +89,7 @@ source devel/setup.bash
The ROS workspace is now ready to accept commands!
->Note: The Docker-related files (Dockerfile, bash scripts for setup) are located in `Robotics-Object-Pose-Estimation/docker`.
+>Note: The Docker-related files (Dockerfile, bash scripts for setup) are located in `Robotics-Object-Pose-Estimation/docker`.
---
@@ -97,7 +97,7 @@ The ROS workspace is now ready to accept commands!
If your Pose Estimation Tutorial Unity project is not already open, select and open it from the Unity Hub.
-We will work on the same Scene that was created in the [Part 1](1_set_up_the_scene.md) and [Part 2](2_set_up_the_data_collection_scene.md), so if you have not already, complete Parts 1 and 2 to set up the Unity project.
+We will work on the same Scene that was created in the [Part 1](1_set_up_the_scene.md) and [Part 2](2_set_up_the_data_collection_scene.md), so if you have not already, complete Parts 1 and 2 to set up the Unity project.
#### Connecting with ROS
@@ -105,8 +105,8 @@ Prefabs have been provided for the UI elements and Trajectory Planner for conven
1. In the ***Project*** tab, go to `Assets/TutorialAssets/Prefabs/Part4` and drag and drop the `ROSObjects` Prefab into the _**Hierarchy**_ tab.
-2. The ROS TCP connection needs to be created. In the top menu bar in Unity Editor, select `Robotics -> ROS Settings`. Find the IP address of your ROS machine.
- * If you are going to run ROS services with the Docker container introduced [above](#step-3), fill `ROS IP Address` and `Override Unity IP` with the loopback IP address `127.0.0.1`. If you will be running ROS services via a non-Dockerized setup, you will most likely want to have the `Override Unity IP` field blank, which will let the Unity IP be determined automatically.
+2. The ROS TCP connection needs to be created. In the top menu bar in Unity Editor, select `Robotics -> ROS Settings`. Find the IP address of your ROS machine.
+ * If you are going to run ROS services with the Docker container introduced [above](#step-3), fill `ROS IP Address` and `Override Unity IP` with the loopback IP address `127.0.0.1`. If you will be running ROS services via a non-Dockerized setup, you will most likely want to have the `Override Unity IP` field blank, which will let the Unity IP be determined automatically.
* If you are **not** going to run ROS services with the Docker container, e.g. if you are using a dedicated Linux machine or VM instead, open a terminal window in this ROS workspace. Set the `ROS IP Address` field in Unity Editor to the output of the following command:
@@ -155,7 +155,7 @@ void PoseEstimationCallback(PoseEstimationServiceResponse response)
{
if (response != null)
{
- // The position output by the model is the position of the cube relative to the camera so we need to extract its global position
+ // The position output by the model is the position of the cube relative to the camera so we need to extract its global position
var estimatedPosition = Camera.main.transform.TransformPoint(response.estimated_pose.position.From());
var estimatedRotation = Camera.main.transform.rotation * response.estimated_pose.orientation.From();
@@ -185,9 +185,9 @@ Note that the `TrajectoryPlanner` component shows its member variables in the _*
#### Switching to Inference Mode
-7. On the `Simulation Scenario` GameObject, uncheck the `Automatic Iteration` property of the `Pose Estimation Scenario`, as we are no longer in the Data Collection part. If you want to collect new data in the future, you can always enable `Automatic Iteration` and disable `ROSObjects`.
+7. On the `Simulation Scenario` GameObject, uncheck the `Automatic Iteration` property of the `Pose Estimation Scenario`, as we are no longer in the Data Collection part. If you want to collect new data in the future, you can always enable `Automatic Iteration` and disable `ROSObjects`.
-8. On the `Main Camera` GameObject, uncheck the `Perception Camera` script component, since we do not need it anymore.
+8. On the `Main Camera` GameObject, uncheck the `Perception Camera` script component, since we do not need it anymore.
Also note that UI elements that have been provided in `ROSObjects/Canvas`, including the Event System that is added by default by Unity. In `ROSObjects/Canvas/ButtonPanel`, the `OnClick` callbacks have been pre-assigned in the Prefab. These buttons set the robot to its upright default position, randomize the cube position and rotation, randomize the target, and call the Pose Estimation service.
@@ -199,12 +199,12 @@ Run the following roslaunch command in order to start roscore, set the ROS param
1. In the terminal window of your ROS workspace opened in [Set up the ROS side](#step-3), run the provided launch file:
```bash
-roslaunch ur3_moveit pose_est.launch
+roslaunch ur3_moveit pose_est.launch
```
---
-This launch file also loads all relevant files and starts ROS nodes required for trajectory planning for the UR3 robot (`demo.launch`). The launch files for this project are available in the package's launch directory, i.e. `src/ur3_moveit/launch`.
+This launch file also loads all relevant files and starts ROS nodes required for trajectory planning for the UR3 robot (`demo.launch`). The launch files for this project are available in the package's launch directory, i.e. `src/ur3_moveit/launch`.
This launch will print various messages to the console, including the set parameters and the nodes launched. The final message should confirm `You can start planning now!`.
@@ -214,24 +214,24 @@ This launch will print various messages to the console, including the set parame
2. Return to Unity, and press Play.
->Note: If you encounter connection errors such as a `SocketException` or don't see a completed TCP handshake between ROS and Unity in the ***Console*** window, return to the [Connecting with ROS](#connecting-with-ros) section above to update the ROS Settings and generate the ROSConnectionPrefab.
+>Note: If you encounter connection errors such as a `SocketException` or don't see a completed TCP handshake between ROS and Unity in the ***Console*** window, return to the [Connecting with ROS](#connecting-with-ros) section above to update the ROS Settings and generate the ROSConnectionPrefab.
>Note: If you encounter a `SocketException` on Ubuntu, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
Note that the robot arm must be in its default position, i.e. standing upright, to perform Pose Estimation. This is done by simply clicking the `Reset Robot Position` button after each run.
-3. Press the `Pose Estimation` button to send the image to ROS.
+3. Press the `Pose Estimation` button to send the image to ROS.
This will grab the current camera view, generate a [sensor_msgs/Image](http://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/Image.html) message, and send a new Pose Estimation Service Request to the ROS node running `pose_estimation_service.py`. This will run the trained model and return a Pose Estimation Service Response containing an estimated pose, which is subsequently converted and sent as a new Mover Service Response to the `mover.py` ROS node. Finally, MoveIt calculates and returns a list of trajectories to Unity, and the poses are executed to pick up and place the cube.
-The target object and empty goal object can be moved around during runtime for different trajectory calculations, or can be randomized using the `Randomize Cube` button.
+The target object and empty goal object can be moved around during runtime for different trajectory calculations, or can be randomized using the `Randomize Cube` button.
>Note: You may encounter a `UserWarning: CUDA initialization: Found no NVIDIA driver on your system.` error upon the first image prediction attempt. This warning can be safely ignored.
>Note: If you encounter issues with the connection between Unity and ROS, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
-You should see the following:
+You should see the following:
diff --git a/Documentation/5_more_randomizers.md b/Documentation/5_more_randomizers.md
index 1fecca0..2025eb2 100644
--- a/Documentation/5_more_randomizers.md
+++ b/Documentation/5_more_randomizers.md
@@ -2,12 +2,12 @@
In the main tutorial, we randomized the position and rotation of the cube. However, the Perception Package supports much more sophisticated environment randomization. In this (optional) section we will create a richer and more varied environment by adding one more Randomizer to our scene.
-In addition to the `YRotationRandomizer` and the `RobotArmObjectPositionRandomizer`, we have designed one more Randomizer:
+In addition to the `YRotationRandomizer` and the `RobotArmObjectPositionRandomizer`, we have designed one more Randomizer:
* The `UniformPoseRandomizer` randomizes an object's position and rotation relative to a fixed starting pose, over a specified range. We will apply this to the camera, to make our trained model more robust to small inaccuracies in placing the real camera.
### Randomizing the Camera Pose
-1. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, add a `Uniform Pose Randomizer`. For the `Random` parameter, set the minimum value of the Range to `-1`. We do this because we want to randomize the position and rotation in both directions for a given axis. The Randomizer's UI snippet should look like the following image:
+1. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, add a `Uniform Pose Randomizer`. For the `Random` parameter, set the minimum value of the Range to `-1`. We do this because we want to randomize the position and rotation in both directions for a given axis. The Randomizer's UI snippet should look like the following image:
@@ -26,4 +26,4 @@ If you press play, you should see the cube moving around the robot and rotating,
You have now learned how to create a Randomizer, and seen how multiple Randomizers can be used together to create a rich, varied scene. Now it is time to create your own Randomizer by yourself! How could this Scene be further improved?
-Good luck and have fun!
+Good luck and have fun!
diff --git a/Documentation/install_unity.md b/Documentation/install_unity.md
index ab01768..46c889a 100644
--- a/Documentation/install_unity.md
+++ b/Documentation/install_unity.md
@@ -2,7 +2,7 @@
_**Unity Version**_: if you want to use the Unity project given by the repository, you need to use a version of Unity at least `2020.2.*`. The easiest way to install Unity is through Unity Hub.
-1. Navigate to [this](https://unity3d.com/get-unity/download) page to download Unity Hub
+1. Navigate to [this](https://unity3d.com/get-unity/download) page to download Unity Hub
2. Go to the Unity Hub and in the panel `install`. Then click on `Add` and select the latest release of `Unity 2020.2`.
diff --git a/Documentation/quick_demo_full.md b/Documentation/quick_demo_full.md
index 47a4e57..261c4bf 100644
--- a/Documentation/quick_demo_full.md
+++ b/Documentation/quick_demo_full.md
@@ -15,9 +15,9 @@ If you just want to run the completed project, this section can help you get up
## Prerequisites
-You will first need to **clone** this repository.
+You will first need to **clone** this repository.
-1. Open a terminal and navigate to the folder where you want to host the repository.
+1. Open a terminal and navigate to the folder where you want to host the repository.
```bash
git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation.git
```
@@ -26,9 +26,9 @@ git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Ob
## Setup
-1. Open the completed project. In the Unity Hub, click the `Add` button, and select `Robotics-Object-Pose-Estimation/PoseEstimationDemoProject` from inside the file location where you cloned the repo.
+1. Open the completed project. In the Unity Hub, click the `Add` button, and select `Robotics-Object-Pose-Estimation/PoseEstimationDemoProject` from inside the file location where you cloned the repo.
-2. Open the scene. Go to `Assets/Scenes` and double click on `TutorialPoseEstimation`.
+2. Open the scene. Go to `Assets/Scenes` and double click on `TutorialPoseEstimation`.
3. We now need to set the size of the images used. In the ***Game*** view, click on the dropdown menu in front of `Display 1`. Then, click **+** to create a new preset. Make sure `Type` is set to `Fixed Resolution`. Set `Width` to `650` and `Height` to `400`. The gif below depicts these actions.
@@ -38,7 +38,7 @@ git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Ob
## Add the Pose Estimation Model
-In your root `Robotics-Object-Pose-Estimation` folder, you should have a `ROS` folder. Inside that folder you should have a `src` folder and inside that one 5 folders: `moveit_msgs`, `robotiq`, `ros_tcp_endpoint`, `universal_robot` and `ur3_moveit`.
+In your root `Robotics-Object-Pose-Estimation` folder, you should have a `ROS` folder. Inside that folder you should have a `src` folder and inside that one 5 folders: `moveit_msgs`, `robotiq`, `ros_tcp_endpoint`, `universal_robot` and `ur3_moveit`.
1. Download the [pose estimation model](https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation/releases/download/v0.0.1/UR3_single_cube_model.tar) we have trained.
@@ -49,9 +49,9 @@ In your root `Robotics-Object-Pose-Estimation` folder, you should have a `ROS` f
>Note: This project has been developed with Python 3 and ROS Noetic.
-We have provided a Docker container to get you up and running quickly.
+We have provided a Docker container to get you up and running quickly.
-1. Install the [Docker Engine](https://docs.docker.com/engine/install/) if not already installed. Start the Docker daemon. To check if the Docker daemon is running, when you open you Docker application you should see something similar to the following (green dot on the bottom left corner with the word running at the foot of Docker):
+1. Install the [Docker Engine](https://docs.docker.com/engine/install/) if not already installed. Start the Docker daemon. To check if the Docker daemon is running, when you open you Docker application you should see something similar to the following (green dot on the bottom left corner with the word running at the foot of Docker):
@@ -65,17 +65,17 @@ docker build -t unity-robotics:pose-estimation -f docker/Dockerfile .
>Note: The provided Dockerfile uses the [ROS Noetic base Image](https://hub.docker.com/_/ros/). Building the image will install the necessary packages as well as copy the [provided ROS packages and submodules](../ROS/) to the container, predownload and cache the [VGG16 model](https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16), and build the catkin workspace.
-3. Start the newly built Docker container:
+3. Start the newly built Docker container:
```docker
docker run -it --rm -p 10000:10000 -p 5005:5005 unity-robotics:pose-estimation /bin/bash
```
-When this is complete, it will print: `Successfully tagged unity-robotics:pose-estimation`. This console should open into a bash shell at the ROS workspace root, e.g. `root@8d88ed579657:/catkin_ws#`.
+When this is complete, it will print: `Successfully tagged unity-robotics:pose-estimation`. This console should open into a bash shell at the ROS workspace root, e.g. `root@8d88ed579657:/catkin_ws#`.
>Note: If you encounter issues with Docker, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
-4. Source your ROS workspace:
+4. Source your ROS workspace:
```bash
source devel/setup.bash
@@ -86,7 +86,7 @@ The ROS workspace is now ready to accept commands!
## Set Up the Unity Side
-1. At the top of your screen, open the ROS settings by selecting `Robotics/ROS Settings`. Fill `ROS IP Address` and `Override Unity IP` with the loopback IP address `127.0.0.1`.
+1. At the top of your screen, open the ROS settings by selecting `Robotics/ROS Settings`. Fill `ROS IP Address` and `Override Unity IP` with the loopback IP address `127.0.0.1`.
2. Ensure that `ROS Port` is set to `10000` and `Unity Port` is set to `5005`.
@@ -101,10 +101,10 @@ Run the following `roslaunch` command in order to start roscore, set the ROS par
1. In the terminal window of your ROS workspace opened above, run the provided launch file:
```bash
-roslaunch ur3_moveit pose_est.launch
+roslaunch ur3_moveit pose_est.launch
```
-This launch file also loads all relevant files and starts ROS nodes required for trajectory planning for the UR3 robot. The launch files for this project are available in the package's launch directory, i.e. `src/ur3_moveit/launch`.
+This launch file also loads all relevant files and starts ROS nodes required for trajectory planning for the UR3 robot. The launch files for this project are available in the package's launch directory, i.e. `src/ur3_moveit/launch`.
This launch will print various messages to the console, including the set parameters and the nodes launched. The final message should confirm `You can start planning now!`.
@@ -119,17 +119,17 @@ This launch will print various messages to the console, including the set parame
Note that the robot arm must be in its default position, i.e. standing upright, to perform Pose Estimation. This is done by simply clicking the `Reset Robot Position` button after each run.
-3. Press the `Pose Estimation` button to send the image to ROS.
+3. Press the `Pose Estimation` button to send the image to ROS.
This will grab the current camera view, generate a [sensor_msgs/Image](http://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/Image.html) message, and send a new Pose Estimation Service Response to the ROS node running `pose_estimation_service.py`. This will run the trained model and return a Pose Estimation Service Response containing an estimated pose, which is subsequently converted and sent as a new Mover Service Response to the `mover.py` ROS node. Finally, MoveIt calculates and returns a list of trajectories to Unity, and the poses are executed to pick up and place the cube.
-The target object and goal object can be moved around during runtime for different trajectory calculations, or the target can be randomized using the `Randomize Cube` button.
+The target object and goal object can be moved around during runtime for different trajectory calculations, or the target can be randomized using the `Randomize Cube` button.
>Note: You may encounter a `UserWarning: CUDA initialization: Found no NVIDIA driver on your system.` error upon the first image prediction attempt. This warning can be safely ignored.
>Note: If you encounter issues with the connection between Unity and ROS, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
-You should see the following:
+You should see the following:
diff --git a/Documentation/quick_demo_train.md b/Documentation/quick_demo_train.md
index 808d664..e08a6cc 100644
--- a/Documentation/quick_demo_train.md
+++ b/Documentation/quick_demo_train.md
@@ -1,6 +1,6 @@
# Data Collection: Quick Demo
-If you just want to run the completed project in order to collect your training and validation data this section can help do it.
+If you just want to run the completed project in order to collect your training and validation data this section can help do it.
To learn how to build something like this from scratch, see [Part 1](1_set_up_the_scene.md) and [Part 2](2_set_up_the_data_collection_scene.md) of our tutorial.
@@ -12,20 +12,20 @@ To learn how to build something like this from scratch, see [Part 1](1_set_up_th
## Prerequisites
-To follow this tutorial you need to **clone** this repository even if you want to create your Unity project from scratch.
+To follow this tutorial you need to **clone** this repository even if you want to create your Unity project from scratch.
-1. Open a terminal and navigate to the folder where you want to host the repository.
+1. Open a terminal and navigate to the folder where you want to host the repository.
```bash
git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation.git
```
2. [Install Unity `2020.2.*`.](install_unity.md)
-3. Open the completed project. To do so, open Unity Hub, click the `Add` button, and select `PoseEstimationDemoProject` from the root `Robotics-Object-Pose-Estimation` folder.
+3. Open the completed project. To do so, open Unity Hub, click the `Add` button, and select `PoseEstimationDemoProject` from the root `Robotics-Object-Pose-Estimation` folder.
## Setup
-1. Once the project is opened, in the ***Project*** tab, go to `Assets > Scenes` and double click on `TutorialPoseEstimation` to open the Scene created for this tutorial.
+1. Once the project is opened, in the ***Project*** tab, go to `Assets > Scenes` and double click on `TutorialPoseEstimation` to open the Scene created for this tutorial.
2. We now need to set the size of the images used. In the ***Game*** view, click on the dropdown menu in front of `Display 1`. Then, click **+** to create a new preset. Make sure `Type` is set to `Fixed Resolution`. Set `Width` to `650` and `Height` to `400`. The gif below depicts these actions.
@@ -43,8 +43,8 @@ The completed project is set up for inference mode by default, so we must switch
3. On the `Main Camera` GameObject, check the `Perception Camera (Script)` component to enable it.
## Data Collection
-To get started with the data collection, follow the instructions in [Part 3: Collect the Training and Validation Data](3_data_collection_model_training.md#step-1) of the tutorial. This section will explain how to set the a random seed for the environment, choose how many training data examples you'd like to collect, and get things running.
+To get started with the data collection, follow the instructions in [Part 3: Collect the Training and Validation Data](3_data_collection_model_training.md#step-1) of the tutorial. This section will explain how to set the a random seed for the environment, choose how many training data examples you'd like to collect, and get things running.
-If you'd like to move on to training a pose estimation model on the data you've collected, navigate to [Part 3: Train the Deep Learning Model](3_data_collection_model_training.md#step-2).
+If you'd like to move on to training a pose estimation model on the data you've collected, navigate to [Part 3: Train the Deep Learning Model](3_data_collection_model_training.md#step-2).
Have fun!
diff --git a/Documentation/troubleshooting.md b/Documentation/troubleshooting.md
index 6ad8247..46bafca 100644
--- a/Documentation/troubleshooting.md
+++ b/Documentation/troubleshooting.md
@@ -17,29 +17,29 @@
### Package Installation
- If you are receiving a `[Package Manager Window] Unable to add package ... xcrun: error: invalid developer path...`, you may need to install the [Command Line Tools](https://developer.apple.com/library/archive/technotes/tn2339/_index.html) package for macOS via `xcode-select --install`.
- If receiving `[Package Manager] Done resolving packages in ... seconds ... An error occurred while resolving packages: Project has invalid dependencies: ... Error when executing git command. fatal: update_ref failed for ref 'HEAD': cannot update ref 'refs/heads/master'` or similar git-related Package Manger errors, please note that this is a known issue that is being tracked on the [Issue Tracker](https://issuetracker.unity3d.com/issues/package-resolution-error-when-using-a-git-dependency-referencing-an-annotated-tag-in-its-git-url). The current workaround is to use a lightweight tag for the git URLs, i.e. `https://github.com/...#v0.2.0-light`. This workaround is reflected in the current version of the tutorial.
-
+
### Assets, Materials
- Upon import, the cube and floor materials may appear to be bright pink (i.e. missing texture).
- Cube: Go to `Assets/TutorialAssets/Materials`. Select the `AlphabetCubeMaterial`. There is a section called `Surface Inputs`. If the Base Map is not assigned, select the circle next to this field. Click on it and start typing `NonsymmetricCubeTexture` and select it when it appears. Apply this updated `AlphabetCubeMaterial` to the Cube. Your Inspector view of the Material should look like the following:
![](Images/1_alphabet_material.png)
- Floor: Assign the `NavyFloor` material to the Floor object.
- If all of the project materials appear to have missing textures, ensure you have created the project using the Universal Render Pipeline.
-- If the UR3 arm's base has some missing textures (e.g. pink ovals), in the Project window, navigate to `Assets/TutorialAssets/URDFs/ur3_with_gripper/ur_description/meshes/ur3/visual >base.dae`. Select the base, and in the ***Inspector*** window, open the ***Materials*** tab. If the `Material_001` and `_002` fields are blank, assign them to `Assets/TutorialAssets/URDFs/ ur3_with_gripper/ur_description/Materials/Material_001` and `_002`, respectively.
-
+- If the UR3 arm's base has some missing textures (e.g. pink ovals), in the Project window, navigate to `Assets/TutorialAssets/URDFs/ur3_with_gripper/ur_description/meshes/ur3/visual >base.dae`. Select the base, and in the ***Inspector*** window, open the ***Materials*** tab. If the `Material_001` and `_002` fields are blank, assign them to `Assets/TutorialAssets/URDFs/ ur3_with_gripper/ur_description/Materials/Material_001` and `_002`, respectively.
+
![](Images/faq_base_mat.png)
### URDF Importer
-- If you are not seeing `Import Robot from URDF` in the `Assets` menu, check the ***Console*** for compile errors. The project must compile correctly before the editor tools become available.
-- If the robot appears loose/wiggly or is not moving with no console errors, ensure on the `Controller` script of the `ur3_with_gripper` that the `Stiffness` is **10000**, the `Damping` is **1000** and the `Force Limit` is **1000**.
-- Note that the world-space origin of the robot is defined in its URDF file. In this sample, we have assigned it to sit on top of the table, which is at `(0, 0.77, 0)` in Unity coordinates. Moving the robot from its root position in Unity will require a change to its URDF definition.
+- If you are not seeing `Import Robot from URDF` in the `Assets` menu, check the ***Console*** for compile errors. The project must compile correctly before the editor tools become available.
+- If the robot appears loose/wiggly or is not moving with no console errors, ensure on the `Controller` script of the `ur3_with_gripper` that the `Stiffness` is **10000**, the `Damping` is **1000** and the `Force Limit` is **1000**.
+- Note that the world-space origin of the robot is defined in its URDF file. In this sample, we have assigned it to sit on top of the table, which is at `(0, 0.77, 0)` in Unity coordinates. Moving the robot from its root position in Unity will require a change to its URDF definition.
- ```xml
-
-
-
-
-
- ```
+ ```xml
+
+
+
+
+
+ ```
**Note**: Going from Unity world space to ROS world space requires a conversion. Unity's `(x,y,z)` is equivalent to the ROS `(z,-x,y)` coordinate.
@@ -48,7 +48,7 @@
### Docker, Environment
- If you are using a Docker container to train your model but it is killed shortly after starting, you may need to increase the memory allocated to Docker. In the Docker Dashboard, navigate to Settings (via the gear icon) > Resources. The suggested minimum memory is 4.00 GB, but you may need to modify this for your particular needs.
- If you encounter errors installing Pytorch via the instructed `pip3` command, try the following instead:
- ```bash
+ ```bash
sudo pip3 install rospkg numpy jsonpickle scipy easydict torch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
```
@@ -58,21 +58,21 @@
- `Error processing request: invalid load key...` This has most likely occurred due to the downloaded model's `.tar` file being corrupted, e.g. caused by an unstable connection, or otherwise interrupted download process. Please try redownloading the [UR3_single_cube_model.tar](https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation/releases/download/v0.0.1/UR3_single_cube_model.tar) file and try the process again.
### Unity Scene
-- The buttons might appear oversized compared to the rest of the objects in the scene view, this is a normal behavior. If you zoom out from the table you should see something similar to the following:
+- The buttons might appear oversized compared to the rest of the objects in the scene view, this is a normal behavior. If you zoom out from the table you should see something similar to the following:
### Docker, ROS-TCP Connection
-- Building the Docker image may throw an `Could not find a package configuration file provided by...` exception if one or more of the directories in ROS/ appears empty. This project uses Git Submodules to grab the ROS package dependencies. If you cloned the project and forgot to use `--recurse-submodules`, or if any submodule in this directory doesn't have content, you can run the `git submodule update --init --recursive` to grab the Git submodules.
+- Building the Docker image may throw an `Could not find a package configuration file provided by...` exception if one or more of the directories in ROS/ appears empty. This project uses Git Submodules to grab the ROS package dependencies. If you cloned the project and forgot to use `--recurse-submodules`, or if any submodule in this directory doesn't have content, you can run the `git submodule update --init --recursive` to grab the Git submodules.
- `...failed because unknown error handler name 'rosmsg'` This is due to a bug in an outdated package version. Try running `sudo apt-get update && sudo apt-get upgrade` to upgrade packages.
-- `Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?` The system-independent `docker info` command can verify whether or not Docker is running. This command will throw a `Server: ERROR` if the Docker daemon is not currently running, and will print the appropriate [system-wide information](https://docs.docker.com/engine/reference/commandline/info/) otherwise.
-- Occasionally, not having enough memory allocated to the Docker container can cause the `server_endpoint` to fail. This may cause unexpected behavior during the pick-and-place task, such as constantly predicting the same pose. If this occurs, check your Docker settings. You may need to increase the `Memory` to 8GB.
- - This can be found in Docker Desktop settings, under the gear icon.
+- `Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?` The system-independent `docker info` command can verify whether or not Docker is running. This command will throw a `Server: ERROR` if the Docker daemon is not currently running, and will print the appropriate [system-wide information](https://docs.docker.com/engine/reference/commandline/info/) otherwise.
+- Occasionally, not having enough memory allocated to the Docker container can cause the `server_endpoint` to fail. This may cause unexpected behavior during the pick-and-place task, such as constantly predicting the same pose. If this occurs, check your Docker settings. You may need to increase the `Memory` to 8GB.
+ - This can be found in Docker Desktop settings, under the gear icon.
- `Exception Raised: unpack requires a buffer of 4 bytes`: This may be caused by a mismatch in the expected Service Request formatting. Ensure that the [srv definition](../ROS/src/ur3_moveit/srv/MoverService.srv) matches the [generated C# script](../PoseEstimationDemoProject/Assets/TutorialAssets/RosMessages/Ur3Moveit/srv/MoverServiceRequest.cs), and that you have not modified these files since the last push to your ROS workspace.
### ROS Workspace
-- If the `catkin_make` command is failing, ensure you are specifying which packages to build (i.e. `catkin_make -DCATKIN_WHITELIST_PACKAGES="moveit_msgs;ros_tcp_endpoint;ur3_moveit;robotiq_2f_140_gripper_visualization;ur_description;ur_gazebo"`).
+- If the `catkin_make` command is failing, ensure you are specifying which packages to build (i.e. `catkin_make -DCATKIN_WHITELIST_PACKAGES="moveit_msgs;ros_tcp_endpoint;ur3_moveit;robotiq_2f_140_gripper_visualization;ur_description;ur_gazebo"`).
- If the problem persists, add the `-j1` flag to the `catkin_make` command.
### Ubuntu
diff --git a/Model/README.md b/Model/README.md
index c5302f1..be3dfcd 100644
--- a/Model/README.md
+++ b/Model/README.md
@@ -1,8 +1,8 @@
Object Pose Estimation Model
=====================
-This section contains code for training and evaluating a deep neural network to predict the pose of a single object from RGB images. We provide support for running both locally and with Docker.
+This section contains code for training and evaluating a deep neural network to predict the pose of a single object from RGB images. We provide support for running both locally and with Docker.
-This model is a modified implementation of [Domain Randomization for Transferring Deep Neural Networks from Simulation to the Real World](https://arxiv.org/pdf/1703.06907.pdf), by Tobin et. al. It is based on the classic VGG-16 backbone architecture, and initialized with weights pre-trained on the ImageNet dataset. The head of the network is replaced with a 3D position prediction head that outputs (x, y, z), and an orientation predicton head that outputs a quaternion (qx, qy, qz, qw).
+This model is a modified implementation of [Domain Randomization for Transferring Deep Neural Networks from Simulation to the Real World](https://arxiv.org/pdf/1703.06907.pdf), by Tobin et. al. It is based on the classic VGG-16 backbone architecture, and initialized with weights pre-trained on the ImageNet dataset. The head of the network is replaced with a 3D position prediction head that outputs (x, y, z), and an orientation predicton head that outputs a quaternion (qx, qy, qz, qw).
@@ -23,19 +23,19 @@ We've provided a pre-trained model, which can be downloaded [here](https://githu
This model supports a `train` and an `evaluate` command. Both of these have many arguments, which you can examine in `cli.py`. They will default to the values in `config.yaml` for convenience, but can be overridden via the command line.
The most important `train` arguments to be aware of are:
-* `--data_root`: Path to the directory containing your data folders. These directory should include `UR3_single_cube_training` and `UR3_single_cube_validation`, containing the training and validation data, respectively.
+* `--data_root`: Path to the directory containing your data folders. These directory should include `UR3_single_cube_training` and `UR3_single_cube_validation`, containing the training and validation data, respectively.
* `--log-dir-system`: Path to directory where you'd like to save Tensorboard log files and model checkpoint files.
The most important `evaluate` arguments to be aware of are:
-* `--load-dir-checkpoint`: Path to model to be evaluated.
-* `--data_root`: Path to the directory containing your data folders. These directory should include `UR3_single_cube_training` and `UR3_single_cube_validation`, containing the training and validation data, respectively.
+* `--load-dir-checkpoint`: Path to model to be evaluated.
+* `--data_root`: Path to the directory containing your data folders. These directory should include `UR3_single_cube_training` and `UR3_single_cube_validation`, containing the training and validation data, respectively.
## Performance
Below is a description of the model's performance on predicting the pose of a cube. For the loss, we used the L2 norm for the position and orientation in each batch.
-However, we used different metrics to _evaluate_ the performance of the model.
+However, we used different metrics to _evaluate_ the performance of the model.
* To evaluate translation predictions, we measured the [average L2 norm over the dataset](pose_estimation/evaluation_metrics/translation_average_mean_square_error.py).
* To evaluate the orientation predictions, we used the angle between the orientation of the prediction and the orientation of the target, averaged over the dataset (implementation [here](pose_estimation/evaluation_metrics/orientation_average_quaternion_error.py)).
@@ -43,9 +43,9 @@ However, we used different metrics to _evaluate_ the performance of the model.
| | Training Error | Validation Error |
|:-------------------:|:---------------------------:|:--------------------------:|
|Translation | 0.012 (12% of cube's size) | 0.01 (10% of cube's size) |
-|Orientation (radian) | 0.06 | 0.05 |
+|Orientation (radian) | 0.06 | 0.05 |
-> Note: Data for the above experiment was collected in Unity 2020.2.1f1.
+> Note: Data for the above experiment was collected in Unity 2020.2.1f1.
## Unit Testing
diff --git a/Model/documentation/codebase_structure.md b/Model/documentation/codebase_structure.md
index 94ac075..2589386 100644
--- a/Model/documentation/codebase_structure.md
+++ b/Model/documentation/codebase_structure.md
@@ -1,20 +1,20 @@
Codebase Structure
==================
-In this project, I create a network to predict the position of a cube.
+In this project, I create a network to predict the position of a cube.
### Architecture
The pose estimation project is organized as following.
PoseEstimationModel:
* [environment-gpu.yml](../environment-gpu.yml):
- If the computer you are runnning the project from **has a gpu support**, this file sets the dependencies of the project and the different packages to install. It is meant to be used when you create your conda environment.
+ If the computer you are runnning the project from **has a gpu support**, this file sets the dependencies of the project and the different packages to install. It is meant to be used when you create your conda environment.
* [environment.yml](../environment.yml):
- If the computer you are runnning the project from **does not have a gpu support**, this file sets the dependencies of the project and the different packages to install. It is meant to be used when you create your conda environment.
+ If the computer you are runnning the project from **does not have a gpu support**, this file sets the dependencies of the project and the different packages to install. It is meant to be used when you create your conda environment.
* [setup.py](../setup.py):
- This file is to create a package as your project.
+ This file is to create a package as your project.
* [cli.py](../pose_estimation/cli.py):
This file contains the cli commands which are the commands to launch the different processes (either train or evaluate).
@@ -23,15 +23,15 @@ PoseEstimationModel:
This file contains the default configuration for the estimator (pose estimation model) on the single cube dataset.
* [single_cube_dataset.py](../pose_estimation/single_cube_dataset.py):
- This file contains knowledge on how the SingleCubeDataset class dataset should be loaded into memory.
+ This file contains knowledge on how the SingleCubeDataset class dataset should be loaded into memory.
* [model.py](../pose_estimation/model.py):
- This file contains the neural network along with the custom linear activation function to perform
- the pose estimation task: predict the object's translation (coordinates x, y, z of the cube's center)
+ This file contains the neural network along with the custom linear activation function to perform
+ the pose estimation task: predict the object's translation (coordinates x, y, z of the cube's center)
and the cube's orientation (quaternion describing the orientation of the cube) if the object is asymmetric otherwise it will predict only the translation.
* [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py):
- This file contains the pose estimation estimator and the different methods you can apply on your model as train, evaluate, save and
+ This file contains the pose estimation estimator and the different methods you can apply on your model as train, evaluate, save and
load.
* [train.py](../pose_estimation/train.py):
@@ -44,7 +44,7 @@ PoseEstimationModel:
This module contains metrics used by the pose estimation estimator.
* [logger.py](../pose_estimation/logger.py):
- This module contains the logger class which is a class designed to save elements (metrics, losses) visible on tensorboard.
+ This module contains the logger class which is a class designed to save elements (metrics, losses) visible on tensorboard.
* [storage](../pose_estimation/storage):
This module contains functionality that relates to
@@ -54,19 +54,19 @@ PoseEstimationModel:
This module contains all the tests which you can run using the [pytest command](../README.md#unit-testing).
* [Dockerfile](../Dockerfile):
- This file is the file reponsible for the creation of the docker image.
+ This file is the file reponsible for the creation of the docker image.
* [kubeflow](../kubeflow/):
This module contains kubeflow pipelines ([.py.tar.gz](../kubeflow/train_pipeline.py) files). You can have more information on how to set up a kubeflow pipeline in the [ReadMe](../kubeflow/README.md).
-### Details of the config.py file
-In the following, I will explain what each argument in the [config.yaml](../config.yaml) means.
-There are 8 sections in the config files:
+### Details of the config.py file
+In the following, I will explain what each argument in the [config.yaml](../config.yaml) means.
+There are 8 sections in the config files:
* **estimator**: This will be the core name of the saved model
-* _**train**_:
+* _**train**_:
- **dataset_zip_file_name_training**: name of the training dataset file.
- **batch_training_size**: number of training samples to work through before the model’s internal parameters are updated.
@@ -75,7 +75,7 @@ There are 8 sections in the config files:
- **epochs**: number of passes of the entire training dataset the machine learning algorithm has completed.
- - **beta_loss**: beta coefficient when we add the translation and orientation losses.
+ - **beta_loss**: beta coefficient when we add the translation and orientation losses.
- **sample_size_train**: size of a dataset training sample. It is used to test operations/commands on a few examples.
@@ -84,7 +84,7 @@ There are 8 sections in the config files:
- **dataset_zip_file_name_validation**: name of the validation dataset file.
- **batch_validation_size**: number of validation samples to work through before the metrics are calculated.
-
+
- **eval_freq**: frequency of epochs when the evaulation process is launched.
- **sample_size_val**: size of a dataset validation sample. It is used to test operations/commands on a few examples.
@@ -107,8 +107,8 @@ There are 8 sections in the config files:
- **pose_estimation_gcs_path**: path inside the gcp bucket where the datasets are located.
- - **symmetric**: Boolean. If the object is symmetric then the element is True otherwise it is False. Based on that we will only predict the translation
- or translation and orientation.
+ - **symmetric**: Boolean. If the object is symmetric then the element is True otherwise it is False. Based on that we will only predict the translation
+ or translation and orientation.
* _**adam_optimizer**_:
@@ -119,13 +119,13 @@ There are 8 sections in the config files:
- **beta_2**: the exponential decay rate for the second-moment estimates.
-* _**checkpoint**_:
-
+* _**checkpoint**_:
+
- **load_dir_checkpoint**: path towards the saved model.
- **save_frequency**: frequency of epochs when the model is saved. If it is set to 1 then the model will be saved every epoch and if it is set to 2 then the model will be saved ever two epochs.
-* _**system**_:
+* _**system**_:
- **log_dir_system**: path where the model and the metrics (.tar file that will be visioned by tensorbard) will be saved.
@@ -133,9 +133,9 @@ There are 8 sections in the config files:
### Save and Load methods
-In the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file, there is one method to save and one method to load a model.
+In the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file, there is one method to save and one method to load a model.
-The save method is called in [train.py](../pose_estimation/train.py) file at the line 95 and the load method is called in the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) line 82. The model is saved using the save method of the checkpointer object and the model is loaded using the load method of the checkpointer object. The checkpointer object is created in [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file line 50. Then, to understand how the model is saved or loaded we need to look into the [checkpoint.py](../pose_estimation/storage/checkpoint.py) file.
+The save method is called in [train.py](../pose_estimation/train.py) file at the line 95 and the load method is called in the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) line 82. The model is saved using the save method of the checkpointer object and the model is loaded using the load method of the checkpointer object. The checkpointer object is created in [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file line 50. Then, to understand how the model is saved or loaded we need to look into the [checkpoint.py](../pose_estimation/storage/checkpoint.py) file.
But first, let's have a general overview of the [checkpoint.py](../pose_estimation/storage/checkpoint.py) file. There are three classes:
- **EstimatorCheckpoint**: it assigns `estimator checkpoint writer` according to `log_dir` which is responsible for saving estimators. The writer can be a GCS or local writer. It also assigns `loader` which is responsible for loading estimator from a given path. Loader can be a local, GCS or HTTP loader.
@@ -148,7 +148,7 @@ When the EstimatorCheckpoint object is created, the static method `_create_write
Now you have two options to save your model, either you save it on local or you save it on google cloud (you can use another cloud but you will have to make the changes yourself).
Then, if we go back to the method called to save the model, it is the `save` method of the `EstimatorCheckpoint` object. This method calls the `save` method of the object created by the `_create_writer` method.
-* `local`: the class `LocalEstimatorWriter` takes as attributes a `dirname` which is the `log_dir` path, a `prefix` which is the name of the estimator (corresponds to the argument `estimator` in the [config.yaml](../config.yaml) file), and a `suffix` which is by default equal to `.tar` (type of the file) and create a directory which will host the model. Then, the method `save` calls the method `save` of the estimator which in the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file.
+* `local`: the class `LocalEstimatorWriter` takes as attributes a `dirname` which is the `log_dir` path, a `prefix` which is the name of the estimator (corresponds to the argument `estimator` in the [config.yaml](../config.yaml) file), and a `suffix` which is by default equal to `.tar` (type of the file) and create a directory which will host the model. Then, the method `save` calls the method `save` of the estimator which in the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file.
* `gcp`: The class `GCSEstimatorWriter` takes as attributes `cloud_path` which is the `log_dir` path towards the gcp bucket and a `prefix` which is the name of the estimator (corresponds to the argument `estimator` in the [config.yaml](../config.yaml) file). In the method `save`, the model is saved on a temporary directory on the local computer that the cloud uses. The process used is the process I just described a little bit above in `local`. The `save` method returns the full GCS cloud path to the saved checkpoint file. Then the method `upload` from the [GCSClient()](../pose_estimation/storage/gcs.py) class is called: it is a method to upload files on Google Cloud Platform.
diff --git a/Model/documentation/running_on_docker.md b/Model/documentation/running_on_docker.md
index 0152ae9..695adde 100644
--- a/Model/documentation/running_on_docker.md
+++ b/Model/documentation/running_on_docker.md
@@ -1,17 +1,17 @@
-Docker
+Docker
======
-Another option to run the project is to use a Docker image. This option allows you to avoid downloading the project's libraries to your local computer, while still running the project successfully. With a Docker image, you also have the ability to train or evaluate your model on a cloud platform, such as Google Cloud Platform, AWS, Microsoft Cloud, and many others.
+Another option to run the project is to use a Docker image. This option allows you to avoid downloading the project's libraries to your local computer, while still running the project successfully. With a Docker image, you also have the ability to train or evaluate your model on a cloud platform, such as Google Cloud Platform, AWS, Microsoft Cloud, and many others.
## Docker Requirements
-You will need to have [Docker](https://docs.docker.com/get-docker/) installed on your computer.
+You will need to have [Docker](https://docs.docker.com/get-docker/) installed on your computer.
-### Running with Docker
+### Running with Docker
-* **Action**: In [config.yaml](../config.yaml), under `system`, set the argument `log_dir_system` to: `/save/single_cube`.
-* **Action**: Set the argument `data_root` under `system` to `/data`.
+* **Action**: In [config.yaml](../config.yaml), under `system`, set the argument `log_dir_system` to: `/save/single_cube`.
+* **Action**: Set the argument `data_root` under `system` to `/data`.
-Before creating the Docker image, you need to be sure your Docker settings are compatible with the project. Open Docker Desktop, click on `Settings` (the gear icon) on the top right, and go to `Resources`. Then change your settings so that it matches the following:
+Before creating the Docker image, you need to be sure your Docker settings are compatible with the project. Open Docker Desktop, click on `Settings` (the gear icon) on the top right, and go to `Resources`. Then change your settings so that it matches the following:
@@ -22,11 +22,11 @@ Before creating the Docker image, you need to be sure your Docker settings are c
The first step is to build the Docker image.
* **Action**: Open a new terminal and navigate to the `Robotics-Object-Pose-Estimation/Model` folder. Then run the command to build your docker image, and name it `pose_estimation`:
-```bash
+```bash
docker build -t pose_estimation .
```
-**Note**: If you change any code in the `Model` directory, you will need to rebuild the Docker image.
+**Note**: If you change any code in the `Model` directory, you will need to rebuild the Docker image.
* **Action**: Now we need to run the Docker image. One way is to use the bash shell. Still in the same terminal, enter the following:
```bash
@@ -35,29 +35,29 @@ docker run -it -v [FULL PATH TO DATA FOLDER]:/data -v [FULL PATH TO MODEL FOLDER
The `FULL PATH TO DATA FOLDER` is the path to the upper directory of your data. As an example, I have put my `UR3_single_cube_training` and `UR3_single_cube_validation` data folder into a folder called `data` that I have created in my `Documents` folder. Thus my `FULL PATH TO DATA FOLDER` will be `/Users/jonathan.leban/Documents/data`.
-The `FULL PATH TO MODEL FOLDER` is the directory in which your models and metrics will be saved. For me, I created a folder called `save` into my Documents.
-The `/save/single_cube` directory is the directory inside the docker container. That is why in the [config.yaml](../config.yaml) file, under the argument `system` the argument `log_dir_system` is set to `/save/single_cube`.
+The `FULL PATH TO MODEL FOLDER` is the directory in which your models and metrics will be saved. For me, I created a folder called `save` into my Documents.
+The `/save/single_cube` directory is the directory inside the docker container. That is why in the [config.yaml](../config.yaml) file, under the argument `system` the argument `log_dir_system` is set to `/save/single_cube`.
-Thus, the final command for me is:
+Thus, the final command for me is:
```bash
docker run -it -v /Users/jonathan.leban/Documents/data:/data -v /Users/jonathan.leban/Documents/save:/save/single_cube pose_estimation bash
```
-### CLI
-At the top of the [cli.py](../pose_estimation/cli.py) file, you can see the documentation for all supported commands.
+### CLI
+At the top of the [cli.py](../pose_estimation/cli.py) file, you can see the documentation for all supported commands.
#### Train
To run the training commmand with default values:
-* **Action**:
-```bash
+* **Action**:
+```bash
python -m pose_estimation.cli train
```
-You can override many hyperparameters by adding additional arguments to this command. See [cli.py](../pose_estimation/cli.py) for a view of all supported arguments.
+You can override many hyperparameters by adding additional arguments to this command. See [cli.py](../pose_estimation/cli.py) for a view of all supported arguments.
-#### Evaluate
+#### Evaluate
To run the evaluate commmand:
```bash
@@ -66,8 +66,8 @@ python -m pose_estimation.cli evaluate --load-dir-checkpoint=/save/single_cube/U
Again, you can override many hyperparameters by adding additional arguments to this command. See [cli.py](../pose_estimation/cli.py) for a view of all supported arguments.
-### Copy metrics and models saved on Docker on your local machine
-Once you have trained or evaluated your model, you may want to copy the results out of the docker container, to your local computer.
+### Copy metrics and models saved on Docker on your local machine
+Once you have trained or evaluated your model, you may want to copy the results out of the docker container, to your local computer.
After building and running the docker image your terminal should look something like this:
@@ -75,26 +75,26 @@ After building and running the docker image your terminal should look something
-Here you can see on the right of `root@` the id of the docker container you are in. Copy this id.
+Here you can see on the right of `root@` the id of the docker container you are in. Copy this id.
-As a reminder, we want to extract some files of `save/single_cube/` inside the docker container into your `save` folder you have created on your local computer.
-Open a new terminal and enter the following:
+As a reminder, we want to extract some files of `save/single_cube/` inside the docker container into your `save` folder you have created on your local computer.
+Open a new terminal and enter the following:
```bash
docker cp :