Merge pull request #54 from Unity-Technologies/dev

AIRO-1654 Merge dev to main
This commit is contained in:
Amanda 2022-01-26 13:06:54 -08:00 коммит произвёл GitHub
Родитель 7558d74b21 ba7e69559b
Коммит 45156479be
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
58 изменённых файлов: 1972 добавлений и 1787 удалений

2
.github/PULL_REQUEST_TEMPLATE.md поставляемый
Просмотреть файл

@ -16,7 +16,7 @@ Provide any relevant links here.
## Testing and Verification
Please describe the tests that you ran to verify your changes. Please also provide instructions, ROS packages, and Unity project files as appropriate so we can reproduce the test environment.
Please describe the tests that you ran to verify your changes. Please also provide instructions, ROS packages, and Unity project files as appropriate so we can reproduce the test environment.
### Test Configuration:
- Unity Version: [e.g. Unity 2020.2.0f1]

22
.github/workflows/jira-link.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,22 @@
name: jira-link
on:
pull_request:
types: [opened, edited, reopened, synchronize]
jobs:
jira-link:
runs-on: ubuntu-20.04
steps:
- name: check pull request title and source branch name
run: |
echo "Checking pull request with title ${{ github.event.pull_request.title }} from source branch ${{ github.event.pull_request.head.ref }}"
if ! [[ "${{ github.event.pull_request.title }}" =~ ^AIRO-[0-9]+[[:space:]].*$ ]] && ! [[ "${{ github.event.pull_request.head.ref }}" =~ ^AIRO-[0-9]+.*$ ]]
then
echo -e "Please make sure one of the following is true:\n \
1. the pull request title starts with 'AIRO-xxxx ', e.g. 'AIRO-1024 My Pull Request'\n \
2. the source branch starts with 'AIRO-xxx', e.g. 'AIRO-1024-my-branch'"
exit 1
else
echo "Completed checking"
fi

20
.github/workflows/pre-commit.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
name: pre-commit
on:
pull_request:
push:
branches: [dev]
jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: 3.7.x
- uses: actions/setup-dotnet@v1
with:
dotnet-version: '6.0.x'
include-prerelease: true
- uses: pre-commit/action@v2.0.0

27
.github/workflows/stale.yaml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
name: 'Stale issue handler'
on:
workflow_dispatch:
schedule:
- cron: '0 17 * * *' # 17:00 UTC; 10:00 PDT
permissions:
issues: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4.0.0
id: stale
with:
stale-issue-label: 'stale'
stale-issue-message: 'This issue has been marked stale because it has been open for 14 days with no activity. Please remove the stale label or comment on this issue, or the issue will be automatically closed in the next 14 days.'
days-before-stale: 14
days-before-pr-stale: -1
days-before-close: 14
days-before-pr-close: -1
exempt-issue-labels: 'blocked,must,should,keep,pinned,work-in-progress,request,announcement'
close-issue-message: 'This issue has been marked stale for 14 days and will now be closed. If this issue is still valid, please ping a maintainer.'
- name: Print outputs
run: echo ${{ join(steps.stale.outputs.*, ',') }}

33
.pre-commit-config.yaml Normal file
Просмотреть файл

@ -0,0 +1,33 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
hooks:
- id: mixed-line-ending
exclude: >
(?x)^(
.*cs.meta|
.*.css|
.*.meta|
.*.mat|
.*.preset|
.*.lighting|
.*.dae
)$
args: [--fix=lf]
- id: trailing-whitespace
name: trailing-whitespace-markdown
types: [markdown]
- id: check-merge-conflict
args: [--assume-in-merge]
- id: check-yaml
# Won't handle the templating in yamato
exclude: \.yamato/.*
- repo: https://github.com/dotnet/format
rev: v5.1.225507
hooks:
- id: dotnet-format
entry: dotnet-format whitespace
args: [--folder, --include]

52
.yamato/sonar.yml Normal file
Просмотреть файл

@ -0,0 +1,52 @@
csharp:
name: Sonarqube C# Scan
agent:
type: Unity::metal::macmini
image: package-ci/mac
flavor: m1.mac
variables:
PROJECT_PATH: PoseEstimationDemoProject
SONARQUBE_PROJECT_KEY: ai-robotics-object-pose-estimation-csharp
SONARQUBE_PROJECT_BASE_DIR: /Users/bokken/build/output/Unity-Technologies/Robotics-Object-Pose-Estimation/PoseEstimationDemoProject
MSBUILD_SLN_PATH: ./PoseEstimationDemoProject/PoseEstimationDemoProject.sln
PROJECT_ROOT: /Users/bokken/build/output/Unity-Technologies/Robotics-Object-Pose-Estimation/
UNITY_VERSION: 2020.2.6f1
commands:
- npm install upm-ci-utils@stable -g --registry https://artifactory.prd.it.unity3d.com/artifactory/api/npm/upm-npm
- unity-downloader-cli --wait -u $UNITY_VERSION -c Editor
- brew install mono corretto
- curl https://github.com/SonarSource/sonar-scanner-msbuild/releases/download/5.2.1.31210/sonar-scanner-msbuild-5.2.1.31210-net46.zip -o sonar-scanner-msbuild-net46.zip -L
- unzip sonar-scanner-msbuild-net46.zip -d ~/sonar-scanner-msbuild
- chmod a+x ~/sonar-scanner-msbuild/sonar-scanner-4.6.1.2450/bin/sonar-scanner
- .Editor/Unity.app/Contents/MacOS/Unity -projectPath $PROJECT_PATH -batchmode -quit -nographics -logFile - -executeMethod "UnityEditor.SyncVS.SyncSolution"
- command: |
cd $PROJECT_PATH
for file in *.csproj; do sed -i.backup "s/^[[:blank:]]*<ReferenceOutputAssembly>false<\/ReferenceOutputAssembly>/<ReferenceOutputAssembly>true<\/ReferenceOutputAssembly>/g" $file; rm $file.backup; done
cd $PROJECT_ROOT
- mono ~/sonar-scanner-msbuild/SonarScanner.MSBuild.exe begin /k:$SONARQUBE_PROJECT_KEY /d:sonar.host.url=$SONARQUBE_ENDPOINT_URL_PRD /d:sonar.login=$SONARQUBE_TOKEN_PRD /d:sonar.projectBaseDir=$SONARQUBE_PROJECT_BASE_DIR
- msbuild $MSBUILD_SLN_PATH
- mono ~/sonar-scanner-msbuild/SonarScanner.MSBuild.exe end /d:sonar.login=$SONARQUBE_TOKEN_PRD
triggers:
cancel_old_ci: true
expression: |
((pull_request.target eq "main" OR pull_request.target eq "dev")
AND NOT pull_request.push.changes.all match "**/*.md") OR
(push.branch eq "main" OR push.branch eq "dev")
standard:
name: Sonarqube Standard Scan
agent:
type: Unity::metal::macmini
image: package-ci/mac
flavor: m1.mac
variables:
SONARQUBE_PROJECT_KEY: ai-robotics-object-pose-estimation-standard
commands:
- curl https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-4.6.2.2472-macosx.zip -o sonar-scanner-macosx.zip -L
- unzip sonar-scanner-macosx.zip -d ~/sonar-scanner
- ~/sonar-scanner/sonar-scanner-4.6.2.2472-macosx/bin/sonar-scanner -Dsonar.projectKey=$SONARQUBE_PROJECT_KEY -Dsonar.sources=. -Dsonar.host.url=$SONARQUBE_ENDPOINT_URL_PRD -Dsonar.login=$SONARQUBE_TOKEN_PRD
triggers:
cancel_old_ci: true
expression: |
((pull_request.target eq "main" OR pull_request.target eq "dev")
AND NOT pull_request.push.changes.all match "**/*.md") OR
(push.branch eq "main" OR push.branch eq "dev")

Просмотреть файл

@ -7,7 +7,6 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) a
## Unreleased
### Upgrade Notes
Add collision ignorance to the gripper inner knuckles and switch the PGS solver to the TGS solver
### Known Issues
@ -19,5 +18,33 @@ Add collision ignorance to the gripper inner knuckles and switch the PGS solver
### Removed
### Fixed
## v0.0.2
### Upgrade Notes
Fixed CUDA-device support
Add collision ignorance to the gripper inner knuckles and switch the PGS solver to the TGS solver
### Known Issues
### Added
Added Sonarqube Scanner
Add the [Close Stale Issues](https://github.com/marketplace/actions/close-stale-issues) action
Added linter
### Changed
Linting and style fixes
### Deprecated
### Removed
### Fixed
Update key fetching from Ubuntu keyserver when building the ROS docker image

Просмотреть файл

@ -15,16 +15,16 @@ sudo pip3 install rospkg numpy jsonpickle scipy easydict torch==1.7.1+cu101 torc
```
> Note: If you encounter errors installing Pytorch via the above `pip3` command, try the following instead:
> ```bash
> ```bash
> sudo pip3 install rospkg numpy jsonpickle scipy easydict torch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
> ```
Most of the ROS setup has been provided via the `ur3_moveit` package. This section will describe the provided files.
4. If you have not already built and sourced the ROS workspace since importing the new ROS packages, navigate to your ROS workplace, and run:
4. If you have not already built and sourced the ROS workspace since importing the new ROS packages, navigate to your ROS workplace, and run:
```bash
```bash
catkin_make -DCATKIN_WHITELIST_PACKAGES="moveit_msgs;ros_tcp_endpoint;ur3_moveit;robotiq_2f_140_gripper_visualization;ur_description;ur_gazebo"
source devel/setup.bash
```
@ -33,7 +33,7 @@ source devel/setup.bash
Ensure there are no unexpected errors.
The ROS parameters will need to be set to your configuration in order to allow the server endpoint to fetch values for the TCP connection.
The ROS parameters will need to be set to your configuration in order to allow the server endpoint to fetch values for the TCP connection.
5. Navigate to your ROS workspace (e.g. `~/catkin_ws`). Assign the ROS IP in the `params.yaml` file as follows:

Просмотреть файл

@ -1,6 +1,6 @@
# Object Pose Estimation Tutorial: Part 1
In this first part of the tutorial, we will start by downloading and installing the Unity Editor. We will install our project's dependencies: the Perception, URDF, and TCP Connector packages. We will then use a set of provided prefabs to easily prepare a simulated environment containing a table, a cube, and a working robot arm.
In this first part of the tutorial, we will start by downloading and installing the Unity Editor. We will install our project's dependencies: the Perception, URDF, and TCP Connector packages. We will then use a set of provided prefabs to easily prepare a simulated environment containing a table, a cube, and a working robot arm.
**Table of Contents**
@ -14,14 +14,14 @@ In this first part of the tutorial, we will start by downloading and installing
### <a name="reqs">Requirements</a>
To follow this tutorial you need to **clone** this repository even if you want to create your Unity project from scratch.
To follow this tutorial you need to **clone** this repository even if you want to create your Unity project from scratch.
>Note: This project uses Git Submodules to grab the ROS package dependencies for the [`universal_robot`](https://github.com/ros-industrial/universal_robot), [`moveit_msgs`](https://github.com/ros-planning/moveit_msgs), [`ros_tcp_endpoint`](https://github.com/Unity-Technologies/ROS-TCP-Endpoint), and the [`robotiq`](https://github.com/JStech/robotiq/tree/noetic-mods) folders.
>Note: This project uses Git Submodules to grab the ROS package dependencies for the [`universal_robot`](https://github.com/ros-industrial/universal_robot), [`moveit_msgs`](https://github.com/ros-planning/moveit_msgs), [`ros_tcp_endpoint`](https://github.com/Unity-Technologies/ROS-TCP-Endpoint), and the [`robotiq`](https://github.com/JStech/robotiq/tree/noetic-mods) folders.
>Note: The [`ros-industrial/robotiq`](https://github.com/ros-industrial/robotiq) repository does not currently support ROS Noetic. The [`JSTech/robotiq#noetic-mods`](https://github.com/JStech/robotiq/tree/noetic-mods) fork, which has been updated to use ROS Noetic, is used instead.
1. Open a terminal and navigate to the folder where you want to host the repository.
1. Open a terminal and navigate to the folder where you want to host the repository.
```bash
git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation.git
```
@ -29,7 +29,7 @@ git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Ob
2. [Install Unity `2020.2.*`.](install_unity.md)
### <a name="step-1">Create a New Project</a>
### <a name="step-1">Create a New Project</a>
When you first run Unity, you will be asked to open an existing project, or create a new one.
1. Open Unity and create a new project using the **Universal Render Pipeline**. Name your new project _**Pose Estimation Tutorial**_, and specify a desired location as shown below.
@ -53,10 +53,10 @@ We will need to download and install several packages. In general, packages can
- From the top menu bar, open _**Window**_ -> _**Package Manager**_. As the name suggests, the _**Package Manager**_ is where you can download new packages, update or remove existing ones, and access a variety of information and additional actions for each package.
- Click on the _**+**_ sign at the top-left corner of the _**Package Manager**_ window and then choose the option _**Add package from git URL...**_.
- Click on the _**+**_ sign at the top-left corner of the _**Package Manager**_ window and then choose the option _**Add package from git URL...**_.
- Enter the package address and click _**Add**_.
- Enter the package address and click _**Add**_.
It can take a few minutes for the manager to download and import packages.
<p align="center">
@ -68,13 +68,13 @@ It can take a few minutes for the manager to download and import packages.
Install the following packages with the provided git URLs:
1. [Perception package](https://github.com/Unity-Technologies/com.unity.perception) - `com.unity.perception@0.8.0-preview.3`
* This will help us collect training data for our machine learning model.
* This will help us collect training data for our machine learning model.
2. [URDF Importer package](https://github.com/Unity-Technologies/URDF-Importer) - `https://github.com/Unity-Technologies/URDF-Importer.git?path=/com.unity.robotics.urdf-importer#v0.2.0-light`
* This package will help us import a robot into our scene from a file in the [Unified Robot Description Format (URDF)](http://wiki.ros.org/urdf).
3. [TCP Connector package](https://github.com/Unity-Technologies/ROS-TCP-Connector) - `https://github.com/Unity-Technologies/ROS-TCP-Connector.git?path=/com.unity.robotics.ros-tcp-connector#v0.2.0-light`
* This package will enable a connection between ROS and Unity.
* This package will enable a connection between ROS and Unity.
>Note: If you encounter a Package Manager issue, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
@ -107,7 +107,7 @@ The Perception package relies on a "Ground Truth Renderer Feature" to output lab
#### The Scene
Simply put in Unity, a Scene contains any object that exists in the world. This world can be a game, or in this case, a data-collection-oriented simulation. Every new project contains a Scene named `SampleScene`, which is automatically opened when the project is created. This Scene comes with several objects and settings that we do not need, so let's create a new one.
1. In the _**Project**_ tab, right-click on the `Assets/Scenes` folder and click _**Create -> Scene**_. Name this new Scene `TutorialPoseEstimation` and double-click on it to open it.
1. In the _**Project**_ tab, right-click on the `Assets/Scenes` folder and click _**Create -> Scene**_. Name this new Scene `TutorialPoseEstimation` and double-click on it to open it.
The _**Hierarchy**_ tab of the editor displays all the Scenes currently loaded, and all the objects currently present in each loaded Scene, as shown below:
<p align="center">
@ -116,33 +116,33 @@ The _**Hierarchy**_ tab of the editor displays all the Scenes currently loaded,
As seen above, the new Scene already contains a camera (`Main Camera`) and a light (`Directional Light`). We will now modify the camera's field of view and position to prepare it for the tutorial.
2. Still in the _**Inspector**_ tab of the `Main Camera`, modify the camera's `Position` and `Rotation` to match the values shown below. This orients the camera so that it will have a good view of the objects we are about to add to the scene.
2. Still in the _**Inspector**_ tab of the `Main Camera`, modify the camera's `Position` and `Rotation` to match the values shown below. This orients the camera so that it will have a good view of the objects we are about to add to the scene.
<p align="center">
<img src="Images/1_camera_settings.png" height=117 width=500/>
</p>
3. Click on `Directional Light` and in the _**Inspector**_ tab, modify the light's `Position` and `Rotation` to match the screenshot below.
3. Click on `Directional Light` and in the _**Inspector**_ tab, modify the light's `Position` and `Rotation` to match the screenshot below.
<p align="center">
<img src="Images/1_directional_light.png" height=217 width=500/>
</p>
#### Adding Tutorial Files
Now it is time to add some more objects to our scene. Before doing so, we need to import some folders containing the required assets.
Now it is time to add some more objects to our scene. Before doing so, we need to import some folders containing the required assets.
4. Download [TutorialAssets.zip](https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation/releases/download/v0.0.1/TutorialAssets.zip), and unzip it. It should contain the following subfolders: `Materials`, `Prefabs`, `RosMessages`, `Scripts`, `URDFs`.
5. Drag and Drop the `TutorialAssets` folder from your operating system's file explorer onto the `Assets` folder in the _**Project**_ tab of the editor.
5. Drag and Drop the `TutorialAssets` folder from your operating system's file explorer onto the `Assets` folder in the _**Project**_ tab of the editor.
Your `Assets` folder should like this:
Your `Assets` folder should like this:
<p align="center">
<img src="Images/1_assets_preview.png"/>
</p>
#### Using Prefabs
Unitys [Prefab](https://docs.unity3d.com/2020.2/Documentation/Manual/Prefabs.html) system allows you to create, configure, and store a GameObject complete with all its components, property values, and child GameObjects as a reusable Unity Asset. It is a convenient way to store complex objects.
Unitys [Prefab](https://docs.unity3d.com/2020.2/Documentation/Manual/Prefabs.html) system allows you to create, configure, and store a GameObject complete with all its components, property values, and child GameObjects as a reusable Unity Asset. It is a convenient way to store complex objects.
A Prefab is just a file, and you can easily create an instance of the object in the scene from a Prefab by dragging it into the _**Hierarchy**_ tab.
@ -150,7 +150,7 @@ For your convenience, we have provided Prefabs for most of the components of the
6. In the _**Project**_ tab, go to `Assets/TutorialAssets/Prefabs/Part1` and drag and drop the `Cube` Prefab into the _**Hierarchy**_ tab.
7. Repeat the above action with the `Goal`, `Table` and `Floor` Prefabs.
7. Repeat the above action with the `Goal`, `Table` and `Floor` Prefabs.
<p align="center">
@ -161,9 +161,9 @@ For your convenience, we have provided Prefabs for most of the components of the
#### Importing the Robot
Finally we will add the robot and the URDF files in order to import the UR3 Robot.
Finally we will add the robot and the URDF files in order to import the UR3 Robot.
8. In the _**Project**_ tab, go to `Assets/TutorialAssets/URDFs/ur3_with_gripper` and right click on the `ur3_with_gripper.urdf` file and select `Import Robot From Selected URDF file`. A window will pop up, keep the default **Y Axis** type and `VHACD` **Mesh Decomposer** in the Import menu. Then, click Import URDF. These actions are shown in the video below.
8. In the _**Project**_ tab, go to `Assets/TutorialAssets/URDFs/ur3_with_gripper` and right click on the `ur3_with_gripper.urdf` file and select `Import Robot From Selected URDF file`. A window will pop up, keep the default **Y Axis** type and `VHACD` **Mesh Decomposer** in the Import menu. Then, click Import URDF. These actions are shown in the video below.
>Note: Unity uses a left-handed coordinate system in which the y-axis points up. However, many robotics packages use a right-handed coordinate system in which the z-axis or x-axis point up. For this reason, it is important to pay attention to the coordinate system when importing URDF files or interfacing with other robotics software.

Просмотреть файл

@ -5,9 +5,9 @@ In [Part 1](1_set_up_the_scene.md) of the tutorial, we learned:
* How to use the Package Manager to download and install Unity packages
* How to move and rotate objects in the Scene
* How to instantiate GameObjects with Prefabs
* How to import a robot from a URDF file
You should now have a table, a cube, a camera, and a robot arm in your Scene. In this part we will prepare the Scene for data collection with the Perception package.
* How to import a robot from a URDF file
You should now have a table, a cube, a camera, and a robot arm in your Scene. In this part we will prepare the Scene for data collection with the Perception package.
<p align="center">
<img src="Images/2_Pose_Estimation_Data_Collection.png" width="680" height="520"/>
@ -24,13 +24,13 @@ You should now have a table, a cube, a camera, and a robot arm in your Scene. In
The images you generate to train your deep learning model and the images you later use for inference during the pick-and-place task will need to have the same resolution. We will now set this resolution.
1. In the ***Game*** view, click on the dropdown menu in front of `Display 1`. Then, click **+** to create a new preset. Make sure `Type` is set to `Fixed Resolution`. Set `Width` to `650` and `Height` to `400`. The gif below depicts these actions.
1. In the ***Game*** view, click on the dropdown menu in front of `Display 1`. Then, click **+** to create a new preset. Make sure `Type` is set to `Fixed Resolution`. Set `Width` to `650` and `Height` to `400`. The gif below depicts these actions.
<p align="center">
<img src="Gifs/2_aspect_ratio.gif"/>
</p>
We now need to add a few components to our camera in order to equip it for synthetic data generation.
We now need to add a few components to our camera in order to equip it for synthetic data generation.
2. Select the `Main Camera` GameObject in the _**Hierarchy**_ tab and in the _**Inspector**_ tab, click on _**Add Component**_.
@ -40,11 +40,11 @@ We now need to add a few components to our camera in order to equip it for synth
5. From the top menu bar of the editor, go to `Edit > Project Settings > Editor` and uncheck `Asynchronous Shader Compilation` under `Shader Compilation` options.
In the ***Inspector*** view for the `Perception Camera` component, you can see an empty list (`List is Empty`). This is the list of Labelers. For each type of ground-truth you wish to generate alongside your captured frames, you will need to add a corresponding Labeler to this list. In our project we want to extract the position and orientation of an object, so we will use the `BoundingBox3DLabeler`.
In the ***Inspector*** view for the `Perception Camera` component, you can see an empty list (`List is Empty`). This is the list of Labelers. For each type of ground-truth you wish to generate alongside your captured frames, you will need to add a corresponding Labeler to this list. In our project we want to extract the position and orientation of an object, so we will use the `BoundingBox3DLabeler`.
There are several other types of Labelers available, and you can even write your own. If you want more information on Labelers, you can consult the [Perception package documentation](https://github.com/Unity-Technologies/com.unity.perception).
6. In the _**Inspector**_ view of the `Perception Camera` component, click on the _**+**_ button at the bottom right corner of the `List is Empty` field, and select `BoundingBox3DLabeler`.
6. In the _**Inspector**_ view of the `Perception Camera` component, click on the _**+**_ button at the bottom right corner of the `List is Empty` field, and select `BoundingBox3DLabeler`.
This Labeler will annotate the captured output with 3D bounding boxes of GameObjects in the Scene that are labelled. If the `Perception Camera`'s `Show Labeler Visualizations` option is enabled, these bounding boxes will also be visualized in real-time in the ***Scene*** view as data is generated. We will next learn how to set up this Labeler.
@ -59,7 +59,7 @@ Once you add the Labeler, the ***Inspector*** view of the `Perception Camera` co
Our work above prepares us to collect RGB images from the camera and associated 3D bounding boxes for objects in our Scene. However, we still need to specify _which_ objects we'd like to collect poses for using the Labeler we added. In this tutorial, we will only collect the pose of the cube, but you can add more objects if you'd like.
You will notice that the `BoundingBox3DLabeler` component has a field named `Id Label Config`. The label configuration we link here will determine which objects' poses get saved in our dataset.
You will notice that the `BoundingBox3DLabeler` component has a field named `Id Label Config`. The label configuration we link here will determine which objects' poses get saved in our dataset.
1. In the _**Project**_ tab, right-click the `Assets` folder, then click `Create -> Perception -> Id Label Config`.
@ -67,7 +67,7 @@ This will create a new asset file named `IdLabelConfig` inside the `Assets` fold
This type of label configuration includes a list of labels, each with a numerical ID. By assigning this configuration to a Labeler, we tell the Labeler to only capture objects that carry labels that are included in the configuration's list of labels, and ignore the rest of the objects in the Scene. We will now assign this configuration the `BoundingBox3DLabeler` we just added to the `Perception Camera` component.
2. Select the `Main Camera` object from the _**Hierarchy**_ tab, and in the _**Inspector**_ tab, assign the newly created `IdLabelConfig` asset to the `Id Label Config` field. To do so, you can either drag and drop the former into the corresponding field of the Labeler, or click on the small circular button in front of the `Id Label Config` field, which brings up an asset selection window filtered to only show compatible assets.
2. Select the `Main Camera` object from the _**Hierarchy**_ tab, and in the _**Inspector**_ tab, assign the newly created `IdLabelConfig` asset to the `Id Label Config` field. To do so, you can either drag and drop the former into the corresponding field of the Labeler, or click on the small circular button in front of the `Id Label Config` field, which brings up an asset selection window filtered to only show compatible assets.
The `Perception Camera` component will now look like the image below:
@ -75,13 +75,13 @@ The `Perception Camera` component will now look like the image below:
<img src="Images/2_final_perception_script.png" height=450/>
</p>
Now we need to assign a label to the `Cube` object, and add the same label to `IdLabelConfig`, since it is the pose of the cube we wish to collect.
Now we need to assign a label to the `Cube` object, and add the same label to `IdLabelConfig`, since it is the pose of the cube we wish to collect.
3. Select the `Cube` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button.
4. Start typing `Labeling` in the search bar that appears, until the `Labeling` script is found, with a **#** icon to the left. Click on this script.
4. Start typing `Labeling` in the search bar that appears, until the `Labeling` script is found, with a **#** icon to the left. Click on this script.
5. In the UI that appears, click the **Add New Label** button and change `New Label` to `cube_position`. Then, click on `Add to Label Config...`, and below `Other Label Configs in Project` there should be `IdLabelConfig`. Click on `Add Label` and then close the window.
5. In the UI that appears, click the **Add New Label** button and change `New Label` to `cube_position`. Then, click on `Add to Label Config...`, and below `Other Label Configs in Project` there should be `IdLabelConfig`. Click on `Add Label` and then close the window.
The `cube_position` label is now added to both the `Cube` object and the `IdLabelConfig` label configuration.
@ -95,7 +95,7 @@ The _**Inspector**_ view of the `Cube` should look like the following:
### <a name="step-3">Add and Set Up Randomizers</a>
#### Domain Randomization
We will be collecting training data from a simulation, but most real perception use-cases occur in the real world.
We will be collecting training data from a simulation, but most real perception use-cases occur in the real world.
To train a model to be robust enough to generalize to the real domain, we rely on a technique called [Domain Randomization](https://arxiv.org/pdf/1703.06907.pdf). Instead of training a model in a single, fixed environment, we _randomize_ aspects of the environment during training in order to introduce sufficient variation into the generated data. This forces the machine learning model to handle many small visual variations, making it more robust.
In this tutorial, we will randomize the position and the orientation of the cube on the table, and also the color, intensity, and position of the light. Note that the Randomizers in the Perception package can be extended to many other aspects of the environment as well.
@ -106,21 +106,21 @@ To start randomizing your simulation, you will first need to add a **Scenario**
1. In the _**Hierarchy**_, click the **+** button and select `Create Empty`. Rename the newly created GameObject `Simulation Scenario`.
2. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button. Start typing `Pose Estimation Scenario` in the search bar that appears, until the `Pose Estimation Scenario` script is found, with a **#** icon to the left. Click on the script.
2. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button. Start typing `Pose Estimation Scenario` in the search bar that appears, until the `Pose Estimation Scenario` script is found, with a **#** icon to the left. Click on the script.
3. Still in the _**Inspector**_ tab of the `Simulation Scenario` GameObject, ensure the `Automatic Iteration` flag is enabled.
<p align="center">
<img src="Images/2_scenario_auto.png" height=500/>
</p>
Each Scenario executes a number of Iterations, and each Iteration carries on for a number of frames. These are timing elements you can leverage in order to customize your Scenarios and the timing of your randomizations.
Each Scenario executes a number of Iterations, and each Iteration carries on for a number of frames. These are timing elements you can leverage in order to customize your Scenarios and the timing of your randomizations.
#### Writing our Custom Object Rotation Randomizer
The randomization workflow involves two types of C# classes: Randomizers and RandomizerTags. Randomizers are added to the Scenario and perform the actual randomization tasks, while RandomizerTags are attached to objects in the scene, so that Randomizers know which objects to target. One Randomizer can target many different RandomizerTags.
First, we will write a Randomizer to randomly rotate the cube around its y-axis on each Iteration of the Scenario.
First, we will write a Randomizer to randomly rotate the cube around its y-axis on each Iteration of the Scenario.
4. In the _**Project**_ tab, right-click on the **Scripts** folder and select `Create -> C# Script`. Name your new script file `YRotationRandomizer`.
@ -163,15 +163,15 @@ public class YRotationRandomizer : Randomizer
```
The purpose of this piece of code is to rotate a set of objects randomly about their y-axes every Iteration. In Unity, the y-axis points "up".
The purpose of this piece of code is to rotate a set of objects randomly about their y-axes every Iteration. In Unity, the y-axis points "up".
>Note: If you look at the ***Console*** tab of the editor now, you will see an error regarding `YRotationRandomizerTag` not being found. This is to be expected, since we have not yet created this class; the error will go away once we create the class later.
Let's go through the code above and understand each part:
* Near the top, you'll notice the line `[AddRandomizerMenu("Perception/Y Rotation Randomizer")]`. This will give the Randomizer a name in the UI which will be used when we add the Randomizer to our `Pose Estimation Scenario`.
* Near the top, you'll notice the line `[AddRandomizerMenu("Perception/Y Rotation Randomizer")]`. This will give the Randomizer a name in the UI which will be used when we add the Randomizer to our `Pose Estimation Scenario`.
* The `YRotationRandomizer` class extends `Randomizer`, which is the base class for all Randomizers that can be added to a Scenario. This base class provides a plethora of useful functions and properties that can help catalyze the process of creating new Randomizers.
* The `FloatParameter` field contains a seeded random number generator. We can set the sampling range and the distribution of this value in the editor UI for the Randomizer.
* The `OnIterationStart()` function is a life-cycle method on all `Randomizer`s. It is called by the Scenario every Iteration (e.g. once per frame, if each Iteration runs for one frame).
* The `FloatParameter` field contains a seeded random number generator. We can set the sampling range and the distribution of this value in the editor UI for the Randomizer.
* The `OnIterationStart()` function is a life-cycle method on all `Randomizer`s. It is called by the Scenario every Iteration (e.g. once per frame, if each Iteration runs for one frame).
* The `tagManager` is an object available to every `Randomizer` which helps us find GameObjects tagged with a given `RandomizerTag`. In our case, we query the `tagManager` to gather references to all the `YRotationRandomizerTag`s currently present in the Scene.
* We then loop through these `tags` to rotate the object that each tag is attached to:
* `rotationRange.Sample()` gives us a random float in the specified range.
@ -199,7 +199,7 @@ public class YRotationRandomizerTag : RandomizerTag
}
```
The `Start` method is automatically called once, at runtime, before the first frame. Here, we use the `Start` method to save this object's original rotation in a variable. When `SetYRotation` is called by the Randomizer every Iteration, it updates the rotation around the y-axis, but keeps the x and z components of the rotation the same.
The `Start` method is automatically called once, at runtime, before the first frame. Here, we use the `Start` method to save this object's original rotation in a variable. When `SetYRotation` is called by the Randomizer every Iteration, it updates the rotation around the y-axis, but keeps the x and z components of the rotation the same.
#### Adding our Custom Object Rotation Randomizer
@ -212,7 +212,7 @@ If you return to your list of Randomizers in the Inspector view of `Simulation S
<img src="Images/2_y_rotation_randomizer.png" height=100/>
</p>
10. Select the `Cube` GameObject and in the _**Inspector**_ tab, add a `YRotationRandomizerTag` component.
10. Select the `Cube` GameObject and in the _**Inspector**_ tab, add a `YRotationRandomizerTag` component.
<p align="center">
<img src="Gifs/2_y_rotation_randomizer_settings.gif" height=550 width=900/>
@ -227,23 +227,23 @@ If you return to your list of Randomizers in the Inspector view of `Simulation S
#### Randomizing Object Positions
It is great that we can now rotate the cube, but we also want to move it around the table. However, not all positions on the table are valid - we also need it to be within the robot arm's reach.
It is great that we can now rotate the cube, but we also want to move it around the table. However, not all positions on the table are valid - we also need it to be within the robot arm's reach.
To save time, we have provided a pre-written custom Randomizer to do this.
To save time, we have provided a pre-written custom Randomizer to do this.
12. Select the `Simulation Scenario` GameObject, and do the following:
* In the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, click `Add Randomizer` and start typing `RobotArmObjectPositionRandomizer`.
* Set `Min Robot Reachability` to `0.2` and `Max Robot Reachability` to `0.4`.
* On the `Plane` field, click on the circular button to the right side and start typing `ObjectPlacementPlane` and then double click on the GameObject that appears.
* In the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, click `Add Randomizer` and start typing `RobotArmObjectPositionRandomizer`.
* Set `Min Robot Reachability` to `0.2` and `Max Robot Reachability` to `0.4`.
* On the `Plane` field, click on the circular button to the right side and start typing `ObjectPlacementPlane` and then double click on the GameObject that appears.
* Drag and drop the base of the robot from the ***Hierarchy*** (the `ur3_with_gripper/world/base_link/base` object) to the `Robot Base` field.
<p align="center">
<img src="Gifs/2_robot_randomizer_settings.gif" height=658 width=950/>
</p>
13. Now we need to add the corresponding RandomizerTag to the cube.
* Select the `Cube` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button. Start typing `RobotArmObjectPositionRandomizerTag` in the search bar that appears, until the `RobotArmObjectPositionRandomizerTag` script is found, with a **#** icon to the left. Click on the script.
* In the UI for this new component, enable the `Must Be Reachable` property.
13. Now we need to add the corresponding RandomizerTag to the cube.
* Select the `Cube` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button. Start typing `RobotArmObjectPositionRandomizerTag` in the search bar that appears, until the `RobotArmObjectPositionRandomizerTag` script is found, with a **#** icon to the left. Click on the script.
* In the UI for this new component, enable the `Must Be Reachable` property.
The `RobotArmObjectPositionRandomizerTag` component should now look like this:
@ -259,15 +259,15 @@ If you press **▷** (play) now, you should see the `Cube` and `Goal` objects mo
#### Light Randomizer
Now we will add another Randomizer to introduce some variation into the Scene's lighting.
Now we will add another Randomizer to introduce some variation into the Scene's lighting.
14. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, click on `Add Randomizer` and start typing `LightRandomizer`.
* For the range parameter of `Light Intensity Parameter`, set `Min` to `0.9` and `Max` to `1.1`.
* For the range parameter of `Rotation X`, set `Min` to `40` and `Max` to `80`.
* For the range parameter of `Rotation Y`, set `Min` to `-180` and `Max` to `180`.
* For the range parameters of `Red`, `Green` and `Blue` inside of `Light Color Parameter`, set `Min` to `0.5`.
The Randomizer should now look like this:
14. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, click on `Add Randomizer` and start typing `LightRandomizer`.
* For the range parameter of `Light Intensity Parameter`, set `Min` to `0.9` and `Max` to `1.1`.
* For the range parameter of `Rotation X`, set `Min` to `40` and `Max` to `80`.
* For the range parameter of `Rotation Y`, set `Min` to `-180` and `Max` to `180`.
* For the range parameters of `Red`, `Green` and `Blue` inside of `Light Color Parameter`, set `Min` to `0.5`.
The Randomizer should now look like this:
<p align="center">
<img src="Images/2_light_randomizer_settings.png" height=450/>
@ -275,7 +275,7 @@ The Randomizer should now look like this:
15. Now we need to add a RandomizerTag to the light. Select the `Directional Light` GameObject and in the _**Inspector**_ tab, click on the _**Add Component**_ button. Start typing `LightRandomizerTag` in the search bar that appears, until the `LightRandomizerTag` script is found, with a **#** icon to the left. Click the script.
To view this script, you can right click on the three dots at the right end and select `Edit Script`.
To view this script, you can right click on the three dots at the right end and select `Edit Script`.
This Randomizer is a bit different from the previous ones. The line `[RequireComponent(typeof(Light))]` makes it so that you can only add the `LightRandomizerTag` component to an object that already has a **Light** component attached. This way, the Randomizers that query for this tag can be confident that the found objects have a **Light** component.
If you press play, you should see that the color, direction, and intensity of the lighting now change with each frame.

Просмотреть файл

@ -5,8 +5,8 @@ In [Part 1](1_set_up_the_scene.md) of the tutorial, we learned how to create our
In [Part 2](2_set_up_the_data_collection_scene.md) of the tutorial, we learned:
* How to equip the camera for the data collection
* How to set up labelling and label configurations
* How to create your own Randomizer
* How to add our custom Randomizer
* How to create your own Randomizer
* How to add our custom Randomizer
In this part, we will be collecting a large dataset of RGB images of the Scene, and the corresponding pose of the cube. We will then use this data to train a machine learning model to predict the cube's position and rotation from images taken by our camera. We will then be ready to use the trained model for our pick-and-place task in [Part 4](4_pick_and_place.md).
@ -23,11 +23,11 @@ Steps included in this part of the tutorial:
Now it is time to collect the data: a set of images with the corresponding position and orientation of the cube relative to the camera.
We need to collect data for the training process and data for the validation one.
We need to collect data for the training process and data for the validation one.
We have chosen a training dataset of 30,000 images and a validation dataset of 3,000 images.
We have chosen a training dataset of 30,000 images and a validation dataset of 3,000 images.
1. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, make sure `Automatic Iteration` is enabled. When this flag is enabled, our Scenario automatically proceeds through Iterations, triggering the `OnIterationStart()` method of all Randomizers on each Iteration. When this flag is disabled, the Iterations would have to be triggered manually.
1. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, make sure `Automatic Iteration` is enabled. When this flag is enabled, our Scenario automatically proceeds through Iterations, triggering the `OnIterationStart()` method of all Randomizers on each Iteration. When this flag is disabled, the Iterations would have to be triggered manually.
2. In the ***Inspector*** view of `Pose Estimation Scenario`, set the `Total Frames` field under `Constants` to 30000.
@ -41,24 +41,24 @@ We have chosen a training dataset of 30,000 images and a validation dataset of 3
5. Click _**Show Folder**_ to show and highlight the folder in your operating system's file explorer.
6. Change this folder's name to `UR3_single_cube_training`.
6. Change this folder's name to `UR3_single_cube_training`.
7. Enter the folder
You should then see something similar to this:
You should then see something similar to this:
<p align="center">
<img src="Images/3_data_logs.png" width = "800"/>
</p>
Now we need to collect the validation dataset.
Now we need to collect the validation dataset.
8. Back in Unity Editor, Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, in `Pose Estimation Scenario`, set the `Total Frames` field under `Constants` to 3000.
9. Press play and wait until the simulation is done. Once the simulation finishes, follow the same steps as before to navigate to the output folder.
10. Change the folder name where the latest data was saved to `UR3_single_cube_validation`.
10. Change the folder name where the latest data was saved to `UR3_single_cube_validation`.
11. **(Optional)**: Move the `UR3_single_cube_training` and `UR3_single_cube_validation` folders to a directory of your choice.
11. **(Optional)**: Move the `UR3_single_cube_training` and `UR3_single_cube_validation` folders to a directory of your choice.
## <a name="step-2">Train the Deep Learning Model</a>
@ -70,14 +70,14 @@ This step can take a long time if your computer doesn't have GPU support (~5 day
### Requirements
We support two approaches for running the model: Docker (which can run anywhere) or locally with Conda.
We support two approaches for running the model: Docker (which can run anywhere) or locally with Conda.
#### Option A: Using Docker
If you would like to run using Docker, you can follow the [Docker steps provided](../Model/documentation/running_on_docker.md) in the model documentation.
#### Option B: Using Conda
To run this project locally, you will need to install [Anaconda](https://docs.anaconda.com/anaconda/install/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html).
#### Option B: Using Conda
To run this project locally, you will need to install [Anaconda](https://docs.anaconda.com/anaconda/install/) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html).
If running locally without Docker, we first need to create a Conda virtual environment and install the dependencies for our machine learning model. If you only have access to CPUs, install the dependencies specified in the `environment.yml` file. If your development machine has GPU support, you can choose to use the `environment-gpu.yml` file instead.
@ -95,7 +95,7 @@ conda activate <env-name>
### Updating the Model Config
At the top of the [cli.py](../Model/pose_estimation/cli.py) file in the model code, you can see the documentation for all supported commands. Since typing these in can be laborious, we use a [config.yaml](../Model/config.yaml) file to feed in all these arguments. You can still use the command line arguments if you want - they will override the config.
At the top of the [cli.py](../Model/pose_estimation/cli.py) file in the model code, you can see the documentation for all supported commands. Since typing these in can be laborious, we use a [config.yaml](../Model/config.yaml) file to feed in all these arguments. You can still use the command line arguments if you want - they will override the config.
There are a few settings specific to your setup that you'll need to change.
@ -106,7 +106,7 @@ First, we need to specify the path to the folders where your training and valida
data_root: /Users/<user-name>/Documents/data
```
Second, we need to modify the location where the model is going to be saved:
Second, we need to modify the location where the model is going to be saved:
5. In the [config.yaml](../Model/config.yaml), under `system`, you need to set the argument `log_dir_system` to the full path of the output folder where your model's results will be saved. For example, I created a new directory called `models` in my Documents, and then set the following:
```bash
@ -115,11 +115,11 @@ log_dir_system: /Users/<user-name>/Documents/models
### Training the model
6. If you are not already in the `Robotics-Object-Pose-Estimation/Model` directory, navigate there.
6. If you are not already in the `Robotics-Object-Pose-Estimation/Model` directory, navigate there.
7. Enter the following command to start training:
```bash
python -m pose_estimation.cli train
7. Enter the following command to start training:
```bash
python -m pose_estimation.cli train
```
>Note (Optional): If you want to override certain training hyperparameters, you can do so with additional arguments on the above command. See the documentation at the top of [cli.py](../Model/pose_estimation/cli.py) for a full list of supported arguments.
@ -130,7 +130,7 @@ python -m pose_estimation.cli train
If you'd like to examine the results of your training run in more detail, see our guide on [viewing the Tensorboard logs](../Model/documentation/tensorboard.md).
### Evaluating the Model
Once training has completed, we can also run our model on our validation dataset to measure its performance on data it has never seen before.
Once training has completed, we can also run our model on our validation dataset to measure its performance on data it has never seen before.
However, first we need to specify a few settings in our config file.
@ -138,9 +138,9 @@ However, first we need to specify a few settings in our config file.
9. If you are not already in the `Robotics-Object-Pose-Estimation/Model` directory, navigate there.
10. To start the evaluation run, enter the following command:
```bash
python -m pose_estimation.cli evaluate
10. To start the evaluation run, enter the following command:
```bash
python -m pose_estimation.cli evaluate
```
>Note (Optional): To override additional settings on your evaluation run, you can tag on additional arguments to the command above. See the documentation in [cli.py](../Model/pose_estimation/cli.py) for more details.
@ -151,6 +151,6 @@ python -m pose_estimation.cli evaluate
### Proceed to [Part 4](4_pick_and_place.md).
###
###
### Go back to [Part 2](2_set_up_the_data_collection_scene.md)

Просмотреть файл

@ -2,11 +2,11 @@
In [Part 1](1_set_up_the_scene.md) of the tutorial, we learned how to create our Scene in Unity Editor. In [Part 2](2_set_up_the_data_collection_scene.md), we set up the Scene for data collection.
In [Part 3](3_data_collection_model_training.md) we have learned:
* How to collect the data
In [Part 3](3_data_collection_model_training.md) we have learned:
* How to collect the data
* How to train the deep learning model
In this part, we will use our trained deep learning model to predict the pose of the cube, and pick it up with our robot arm.
In this part, we will use our trained deep learning model to predict the pose of the cube, and pick it up with our robot arm.
<p align="center">
<img src="Images/4_Pose_Estimation_ROS.png"/>
@ -22,15 +22,15 @@ In this part, we will use our trained deep learning model to predict the pose of
---
### <a name="setup">Set up</a>
If you have correctly followed parts 1 and 2, whether or not you choose to use the Unity project given by us or start it from scratch, you should have cloned the repository.
If you have correctly followed parts 1 and 2, whether or not you choose to use the Unity project given by us or start it from scratch, you should have cloned the repository.
>Note: This project uses Git Submodules to grab the ROS package dependencies for the [`universal_robot`](https://github.com/ros-industrial/universal_robot), [`moveit_msgs`](https://github.com/ros-planning/moveit_msgs), [`ros_tcp_endpoint`](https://github.com/Unity-Technologies/ROS-TCP-Endpoint), and the [`robotiq`](https://github.com/JStech/robotiq/tree/noetic-mods)) folders. If you cloned the project and forgot to use `--recurse-submodules`, or if any submodule in this directory doesn't have content (e.g. moveit_msgs or ros_tcp_endpoint), you can run the following command to grab the Git submodules.
>Note: This project uses Git Submodules to grab the ROS package dependencies for the [`universal_robot`](https://github.com/ros-industrial/universal_robot), [`moveit_msgs`](https://github.com/ros-planning/moveit_msgs), [`ros_tcp_endpoint`](https://github.com/Unity-Technologies/ROS-TCP-Endpoint), and the [`robotiq`](https://github.com/JStech/robotiq/tree/noetic-mods)) folders. If you cloned the project and forgot to use `--recurse-submodules`, or if any submodule in this directory doesn't have content (e.g. moveit_msgs or ros_tcp_endpoint), you can run the following command to grab the Git submodules.
> ```bash
> cd /PATH/TO/Robotics-Object-Pose-Estimation &&
> git submodule update --init --recursive
> git submodule update --init --recursive
> ```
In your ROS/src folder, you should now have five subdirectories: `moveit_msgs`, `robotiq`, `ros_tcp_endpoint`, `universal_robot` and `ur3_moveit`.
In your ROS/src folder, you should now have five subdirectories: `moveit_msgs`, `robotiq`, `ros_tcp_endpoint`, `universal_robot` and `ur3_moveit`.
### <a name="step-2">Add the Pose Estimation Model</a>
@ -56,7 +56,7 @@ The provided ROS files require the following packages to be installed. The follo
Building this Docker container will install the necessary packages for this tutorial.
1. Install the [Docker Engine](https://docs.docker.com/engine/install/) if not already installed. Start the Docker daemon. To check if the Docker daemon is running, when you open you Docker application you should see something similar to the following (green dot on the bottom left corner with the word running at the foot of Docker):
1. Install the [Docker Engine](https://docs.docker.com/engine/install/) if not already installed. Start the Docker daemon. To check if the Docker daemon is running, when you open you Docker application you should see something similar to the following (green dot on the bottom left corner with the word running at the foot of Docker):
<p align="center">
<img src="Images/4_docker_daemon.png" height=500/>
@ -71,17 +71,17 @@ docker build -t unity-robotics:pose-estimation -f docker/Dockerfile .
>Note: The provided Dockerfile uses the [ROS Noetic base Image](https://hub.docker.com/_/ros/). Building the image will install the necessary packages as well as copy the [provided ROS packages and submodules](../ROS/) to the container, predownload and cache the [VGG16 model](https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16), and build the catkin workspace.
3. Start the newly built Docker container:
3. Start the newly built Docker container:
```docker
docker run -it --rm -p 10000:10000 -p 5005:5005 unity-robotics:pose-estimation /bin/bash
```
When this is complete, it will print: `Successfully tagged unity-robotics:pose-estimation`. This console should open into a bash shell at the ROS workspace root, e.g. `root@8d88ed579657:/catkin_ws#`.
When this is complete, it will print: `Successfully tagged unity-robotics:pose-estimation`. This console should open into a bash shell at the ROS workspace root, e.g. `root@8d88ed579657:/catkin_ws#`.
>Note: If you encounter issues with Docker, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
4. Source your ROS workspace:
4. Source your ROS workspace:
```bash
source devel/setup.bash
@ -89,7 +89,7 @@ source devel/setup.bash
The ROS workspace is now ready to accept commands!
>Note: The Docker-related files (Dockerfile, bash scripts for setup) are located in `Robotics-Object-Pose-Estimation/docker`.
>Note: The Docker-related files (Dockerfile, bash scripts for setup) are located in `Robotics-Object-Pose-Estimation/docker`.
---
@ -97,7 +97,7 @@ The ROS workspace is now ready to accept commands!
If your Pose Estimation Tutorial Unity project is not already open, select and open it from the Unity Hub.
We will work on the same Scene that was created in the [Part 1](1_set_up_the_scene.md) and [Part 2](2_set_up_the_data_collection_scene.md), so if you have not already, complete Parts 1 and 2 to set up the Unity project.
We will work on the same Scene that was created in the [Part 1](1_set_up_the_scene.md) and [Part 2](2_set_up_the_data_collection_scene.md), so if you have not already, complete Parts 1 and 2 to set up the Unity project.
#### Connecting with ROS
@ -105,8 +105,8 @@ Prefabs have been provided for the UI elements and Trajectory Planner for conven
1. In the ***Project*** tab, go to `Assets/TutorialAssets/Prefabs/Part4` and drag and drop the `ROSObjects` Prefab into the _**Hierarchy**_ tab.
2. The ROS TCP connection needs to be created. In the top menu bar in Unity Editor, select `Robotics -> ROS Settings`. Find the IP address of your ROS machine.
* If you are going to run ROS services with the Docker container introduced [above](#step-3), fill `ROS IP Address` and `Override Unity IP` with the loopback IP address `127.0.0.1`. If you will be running ROS services via a non-Dockerized setup, you will most likely want to have the `Override Unity IP` field blank, which will let the Unity IP be determined automatically.
2. The ROS TCP connection needs to be created. In the top menu bar in Unity Editor, select `Robotics -> ROS Settings`. Find the IP address of your ROS machine.
* If you are going to run ROS services with the Docker container introduced [above](#step-3), fill `ROS IP Address` and `Override Unity IP` with the loopback IP address `127.0.0.1`. If you will be running ROS services via a non-Dockerized setup, you will most likely want to have the `Override Unity IP` field blank, which will let the Unity IP be determined automatically.
* If you are **not** going to run ROS services with the Docker container, e.g. if you are using a dedicated Linux machine or VM instead, open a terminal window in this ROS workspace. Set the `ROS IP Address` field in Unity Editor to the output of the following command:
@ -155,7 +155,7 @@ void PoseEstimationCallback(PoseEstimationServiceResponse response)
{
if (response != null)
{
// The position output by the model is the position of the cube relative to the camera so we need to extract its global position
// The position output by the model is the position of the cube relative to the camera so we need to extract its global position
var estimatedPosition = Camera.main.transform.TransformPoint(response.estimated_pose.position.From<RUF>());
var estimatedRotation = Camera.main.transform.rotation * response.estimated_pose.orientation.From<RUF>();
@ -185,9 +185,9 @@ Note that the `TrajectoryPlanner` component shows its member variables in the _*
#### Switching to Inference Mode
7. On the `Simulation Scenario` GameObject, uncheck the `Automatic Iteration` property of the `Pose Estimation Scenario`, as we are no longer in the Data Collection part. If you want to collect new data in the future, you can always enable `Automatic Iteration` and disable `ROSObjects`.
7. On the `Simulation Scenario` GameObject, uncheck the `Automatic Iteration` property of the `Pose Estimation Scenario`, as we are no longer in the Data Collection part. If you want to collect new data in the future, you can always enable `Automatic Iteration` and disable `ROSObjects`.
8. On the `Main Camera` GameObject, uncheck the `Perception Camera` script component, since we do not need it anymore.
8. On the `Main Camera` GameObject, uncheck the `Perception Camera` script component, since we do not need it anymore.
Also note that UI elements that have been provided in `ROSObjects/Canvas`, including the Event System that is added by default by Unity. In `ROSObjects/Canvas/ButtonPanel`, the `OnClick` callbacks have been pre-assigned in the Prefab. These buttons set the robot to its upright default position, randomize the cube position and rotation, randomize the target, and call the Pose Estimation service.
@ -199,12 +199,12 @@ Run the following roslaunch command in order to start roscore, set the ROS param
1. In the terminal window of your ROS workspace opened in [Set up the ROS side](#step-3), run the provided launch file:
```bash
roslaunch ur3_moveit pose_est.launch
roslaunch ur3_moveit pose_est.launch
```
---
This launch file also loads all relevant files and starts ROS nodes required for trajectory planning for the UR3 robot (`demo.launch`). The launch files for this project are available in the package's launch directory, i.e. `src/ur3_moveit/launch`.
This launch file also loads all relevant files and starts ROS nodes required for trajectory planning for the UR3 robot (`demo.launch`). The launch files for this project are available in the package's launch directory, i.e. `src/ur3_moveit/launch`.
This launch will print various messages to the console, including the set parameters and the nodes launched. The final message should confirm `You can start planning now!`.
@ -214,24 +214,24 @@ This launch will print various messages to the console, including the set parame
2. Return to Unity, and press Play.
>Note: If you encounter connection errors such as a `SocketException` or don't see a completed TCP handshake between ROS and Unity in the ***Console*** window, return to the [Connecting with ROS](#connecting-with-ros) section above to update the ROS Settings and generate the ROSConnectionPrefab.
>Note: If you encounter connection errors such as a `SocketException` or don't see a completed TCP handshake between ROS and Unity in the ***Console*** window, return to the [Connecting with ROS](#connecting-with-ros) section above to update the ROS Settings and generate the ROSConnectionPrefab.
>Note: If you encounter a `SocketException` on Ubuntu, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
Note that the robot arm must be in its default position, i.e. standing upright, to perform Pose Estimation. This is done by simply clicking the `Reset Robot Position` button after each run.
3. Press the `Pose Estimation` button to send the image to ROS.
3. Press the `Pose Estimation` button to send the image to ROS.
This will grab the current camera view, generate a [sensor_msgs/Image](http://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/Image.html) message, and send a new Pose Estimation Service Request to the ROS node running `pose_estimation_service.py`. This will run the trained model and return a Pose Estimation Service Response containing an estimated pose, which is subsequently converted and sent as a new Mover Service Response to the `mover.py` ROS node. Finally, MoveIt calculates and returns a list of trajectories to Unity, and the poses are executed to pick up and place the cube.
The target object and empty goal object can be moved around during runtime for different trajectory calculations, or can be randomized using the `Randomize Cube` button.
The target object and empty goal object can be moved around during runtime for different trajectory calculations, or can be randomized using the `Randomize Cube` button.
>Note: You may encounter a `UserWarning: CUDA initialization: Found no NVIDIA driver on your system.` error upon the first image prediction attempt. This warning can be safely ignored.
>Note: If you encounter issues with the connection between Unity and ROS, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
You should see the following:
You should see the following:
<p align="center">
<img src="Gifs/0_demo.gif"/>
</p>

Просмотреть файл

@ -2,12 +2,12 @@
In the main tutorial, we randomized the position and rotation of the cube. However, the Perception Package supports much more sophisticated environment randomization. In this (optional) section we will create a richer and more varied environment by adding one more Randomizer to our scene.
In addition to the `YRotationRandomizer` and the `RobotArmObjectPositionRandomizer`, we have designed one more Randomizer:
In addition to the `YRotationRandomizer` and the `RobotArmObjectPositionRandomizer`, we have designed one more Randomizer:
* The `UniformPoseRandomizer` randomizes an object's position and rotation relative to a fixed starting pose, over a specified range. We will apply this to the camera, to make our trained model more robust to small inaccuracies in placing the real camera.
### Randomizing the Camera Pose
1. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, add a `Uniform Pose Randomizer`. For the `Random` parameter, set the minimum value of the Range to `-1`. We do this because we want to randomize the position and rotation in both directions for a given axis. The Randomizer's UI snippet should look like the following image:
1. Select the `Simulation Scenario` GameObject and in the _**Inspector**_ tab, on the `Pose Estimation Scenario` component, add a `Uniform Pose Randomizer`. For the `Random` parameter, set the minimum value of the Range to `-1`. We do this because we want to randomize the position and rotation in both directions for a given axis. The Randomizer's UI snippet should look like the following image:
<p align="center">
<img src="Images/5_uniform_pose_randomizer_settings.png" height=150/>
@ -26,4 +26,4 @@ If you press play, you should see the cube moving around the robot and rotating,
You have now learned how to create a Randomizer, and seen how multiple Randomizers can be used together to create a rich, varied scene. Now it is time to create your own Randomizer by yourself! How could this Scene be further improved?
Good luck and have fun!
Good luck and have fun!

Просмотреть файл

@ -2,7 +2,7 @@
_**Unity Version**_: if you want to use the Unity project given by the repository, you need to use a version of Unity at least `2020.2.*`. The easiest way to install Unity is through Unity Hub.
1. Navigate to [this](https://unity3d.com/get-unity/download) page to download Unity Hub
1. Navigate to [this](https://unity3d.com/get-unity/download) page to download Unity Hub
2. Go to the Unity Hub and in the panel `install`. Then click on `Add` and select the latest release of `Unity 2020.2`.
<p align="center">

Просмотреть файл

@ -15,9 +15,9 @@ If you just want to run the completed project, this section can help you get up
## Prerequisites
You will first need to **clone** this repository.
You will first need to **clone** this repository.
1. Open a terminal and navigate to the folder where you want to host the repository.
1. Open a terminal and navigate to the folder where you want to host the repository.
```bash
git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation.git
```
@ -26,9 +26,9 @@ git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Ob
## <a name='setup'>Setup</a>
1. Open the completed project. In the Unity Hub, click the `Add` button, and select `Robotics-Object-Pose-Estimation/PoseEstimationDemoProject` from inside the file location where you cloned the repo.
1. Open the completed project. In the Unity Hub, click the `Add` button, and select `Robotics-Object-Pose-Estimation/PoseEstimationDemoProject` from inside the file location where you cloned the repo.
2. Open the scene. Go to `Assets/Scenes` and double click on `TutorialPoseEstimation`.
2. Open the scene. Go to `Assets/Scenes` and double click on `TutorialPoseEstimation`.
3. We now need to set the size of the images used. In the ***Game*** view, click on the dropdown menu in front of `Display 1`. Then, click **+** to create a new preset. Make sure `Type` is set to `Fixed Resolution`. Set `Width` to `650` and `Height` to `400`. The gif below depicts these actions.
@ -38,7 +38,7 @@ git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Ob
## Add the Pose Estimation Model
In your root `Robotics-Object-Pose-Estimation` folder, you should have a `ROS` folder. Inside that folder you should have a `src` folder and inside that one 5 folders: `moveit_msgs`, `robotiq`, `ros_tcp_endpoint`, `universal_robot` and `ur3_moveit`.
In your root `Robotics-Object-Pose-Estimation` folder, you should have a `ROS` folder. Inside that folder you should have a `src` folder and inside that one 5 folders: `moveit_msgs`, `robotiq`, `ros_tcp_endpoint`, `universal_robot` and `ur3_moveit`.
1. Download the [pose estimation model](https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation/releases/download/v0.0.1/UR3_single_cube_model.tar) we have trained.
@ -49,9 +49,9 @@ In your root `Robotics-Object-Pose-Estimation` folder, you should have a `ROS` f
>Note: This project has been developed with Python 3 and ROS Noetic.
We have provided a Docker container to get you up and running quickly.
We have provided a Docker container to get you up and running quickly.
1. Install the [Docker Engine](https://docs.docker.com/engine/install/) if not already installed. Start the Docker daemon. To check if the Docker daemon is running, when you open you Docker application you should see something similar to the following (green dot on the bottom left corner with the word running at the foot of Docker):
1. Install the [Docker Engine](https://docs.docker.com/engine/install/) if not already installed. Start the Docker daemon. To check if the Docker daemon is running, when you open you Docker application you should see something similar to the following (green dot on the bottom left corner with the word running at the foot of Docker):
<p align="center">
<img src="Images/4_docker_daemon.png" height=400/>
@ -65,17 +65,17 @@ docker build -t unity-robotics:pose-estimation -f docker/Dockerfile .
>Note: The provided Dockerfile uses the [ROS Noetic base Image](https://hub.docker.com/_/ros/). Building the image will install the necessary packages as well as copy the [provided ROS packages and submodules](../ROS/) to the container, predownload and cache the [VGG16 model](https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16), and build the catkin workspace.
3. Start the newly built Docker container:
3. Start the newly built Docker container:
```docker
docker run -it --rm -p 10000:10000 -p 5005:5005 unity-robotics:pose-estimation /bin/bash
```
When this is complete, it will print: `Successfully tagged unity-robotics:pose-estimation`. This console should open into a bash shell at the ROS workspace root, e.g. `root@8d88ed579657:/catkin_ws#`.
When this is complete, it will print: `Successfully tagged unity-robotics:pose-estimation`. This console should open into a bash shell at the ROS workspace root, e.g. `root@8d88ed579657:/catkin_ws#`.
>Note: If you encounter issues with Docker, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
4. Source your ROS workspace:
4. Source your ROS workspace:
```bash
source devel/setup.bash
@ -86,7 +86,7 @@ The ROS workspace is now ready to accept commands!
## Set Up the Unity Side
1. At the top of your screen, open the ROS settings by selecting `Robotics/ROS Settings`. Fill `ROS IP Address` and `Override Unity IP` with the loopback IP address `127.0.0.1`.
1. At the top of your screen, open the ROS settings by selecting `Robotics/ROS Settings`. Fill `ROS IP Address` and `Override Unity IP` with the loopback IP address `127.0.0.1`.
2. Ensure that `ROS Port` is set to `10000` and `Unity Port` is set to `5005`.
@ -101,10 +101,10 @@ Run the following `roslaunch` command in order to start roscore, set the ROS par
1. In the terminal window of your ROS workspace opened above, run the provided launch file:
```bash
roslaunch ur3_moveit pose_est.launch
roslaunch ur3_moveit pose_est.launch
```
This launch file also loads all relevant files and starts ROS nodes required for trajectory planning for the UR3 robot. The launch files for this project are available in the package's launch directory, i.e. `src/ur3_moveit/launch`.
This launch file also loads all relevant files and starts ROS nodes required for trajectory planning for the UR3 robot. The launch files for this project are available in the package's launch directory, i.e. `src/ur3_moveit/launch`.
This launch will print various messages to the console, including the set parameters and the nodes launched. The final message should confirm `You can start planning now!`.
@ -119,17 +119,17 @@ This launch will print various messages to the console, including the set parame
Note that the robot arm must be in its default position, i.e. standing upright, to perform Pose Estimation. This is done by simply clicking the `Reset Robot Position` button after each run.
3. Press the `Pose Estimation` button to send the image to ROS.
3. Press the `Pose Estimation` button to send the image to ROS.
This will grab the current camera view, generate a [sensor_msgs/Image](http://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/Image.html) message, and send a new Pose Estimation Service Response to the ROS node running `pose_estimation_service.py`. This will run the trained model and return a Pose Estimation Service Response containing an estimated pose, which is subsequently converted and sent as a new Mover Service Response to the `mover.py` ROS node. Finally, MoveIt calculates and returns a list of trajectories to Unity, and the poses are executed to pick up and place the cube.
The target object and goal object can be moved around during runtime for different trajectory calculations, or the target can be randomized using the `Randomize Cube` button.
The target object and goal object can be moved around during runtime for different trajectory calculations, or the target can be randomized using the `Randomize Cube` button.
>Note: You may encounter a `UserWarning: CUDA initialization: Found no NVIDIA driver on your system.` error upon the first image prediction attempt. This warning can be safely ignored.
>Note: If you encounter issues with the connection between Unity and ROS, check the [Troubleshooting Guide](troubleshooting.md) for potential solutions.
You should see the following:
You should see the following:
<p align="center">
<img src="Gifs/0_demo.gif"/>
</p>

Просмотреть файл

@ -1,6 +1,6 @@
# Data Collection: Quick Demo
If you just want to run the completed project in order to collect your training and validation data this section can help do it.
If you just want to run the completed project in order to collect your training and validation data this section can help do it.
To learn how to build something like this from scratch, see [Part 1](1_set_up_the_scene.md) and [Part 2](2_set_up_the_data_collection_scene.md) of our tutorial.
@ -12,20 +12,20 @@ To learn how to build something like this from scratch, see [Part 1](1_set_up_th
## <a name="reqs">Prerequisites</a>
To follow this tutorial you need to **clone** this repository even if you want to create your Unity project from scratch.
To follow this tutorial you need to **clone** this repository even if you want to create your Unity project from scratch.
1. Open a terminal and navigate to the folder where you want to host the repository.
1. Open a terminal and navigate to the folder where you want to host the repository.
```bash
git clone --recurse-submodules https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation.git
```
2. [Install Unity `2020.2.*`.](install_unity.md)
3. Open the completed project. To do so, open Unity Hub, click the `Add` button, and select `PoseEstimationDemoProject` from the root `Robotics-Object-Pose-Estimation` folder.
3. Open the completed project. To do so, open Unity Hub, click the `Add` button, and select `PoseEstimationDemoProject` from the root `Robotics-Object-Pose-Estimation` folder.
## <a name='setup'>Setup</a>
1. Once the project is opened, in the ***Project*** tab, go to `Assets > Scenes` and double click on `TutorialPoseEstimation` to open the Scene created for this tutorial.
1. Once the project is opened, in the ***Project*** tab, go to `Assets > Scenes` and double click on `TutorialPoseEstimation` to open the Scene created for this tutorial.
2. We now need to set the size of the images used. In the ***Game*** view, click on the dropdown menu in front of `Display 1`. Then, click **+** to create a new preset. Make sure `Type` is set to `Fixed Resolution`. Set `Width` to `650` and `Height` to `400`. The gif below depicts these actions.
@ -43,8 +43,8 @@ The completed project is set up for inference mode by default, so we must switch
3. On the `Main Camera` GameObject, check the `Perception Camera (Script)` component to enable it.
## <a name="data-collection">Data Collection</a>
To get started with the data collection, follow the instructions in [Part 3: Collect the Training and Validation Data](3_data_collection_model_training.md#step-1) of the tutorial. This section will explain how to set the a random seed for the environment, choose how many training data examples you'd like to collect, and get things running.
To get started with the data collection, follow the instructions in [Part 3: Collect the Training and Validation Data](3_data_collection_model_training.md#step-1) of the tutorial. This section will explain how to set the a random seed for the environment, choose how many training data examples you'd like to collect, and get things running.
If you'd like to move on to training a pose estimation model on the data you've collected, navigate to [Part 3: Train the Deep Learning Model](3_data_collection_model_training.md#step-2).
If you'd like to move on to training a pose estimation model on the data you've collected, navigate to [Part 3: Train the Deep Learning Model](3_data_collection_model_training.md#step-2).
Have fun!

Просмотреть файл

@ -17,29 +17,29 @@
### Package Installation
- If you are receiving a `[Package Manager Window] Unable to add package ... xcrun: error: invalid developer path...`, you may need to install the [Command Line Tools](https://developer.apple.com/library/archive/technotes/tn2339/_index.html) package for macOS via `xcode-select --install`.
- If receiving `[Package Manager] Done resolving packages in ... seconds ... An error occurred while resolving packages: Project has invalid dependencies: ... Error when executing git command. fatal: update_ref failed for ref 'HEAD': cannot update ref 'refs/heads/master'` or similar git-related Package Manger errors, please note that this is a known issue that is being tracked on the [Issue Tracker](https://issuetracker.unity3d.com/issues/package-resolution-error-when-using-a-git-dependency-referencing-an-annotated-tag-in-its-git-url). The current workaround is to use a lightweight tag for the git URLs, i.e. `https://github.com/...#v0.2.0-light`. This workaround is reflected in the current version of the tutorial.
### Assets, Materials
- Upon import, the cube and floor materials may appear to be bright pink (i.e. missing texture).
- Cube: Go to `Assets/TutorialAssets/Materials`. Select the `AlphabetCubeMaterial`. There is a section called `Surface Inputs`. If the Base Map is not assigned, select the circle next to this field. Click on it and start typing `NonsymmetricCubeTexture` and select it when it appears. Apply this updated `AlphabetCubeMaterial` to the Cube. Your Inspector view of the Material should look like the following:
![](Images/1_alphabet_material.png)
- Floor: Assign the `NavyFloor` material to the Floor object.
- If all of the project materials appear to have missing textures, ensure you have created the project using the Universal Render Pipeline.
- If the UR3 arm's base has some missing textures (e.g. pink ovals), in the Project window, navigate to `Assets/TutorialAssets/URDFs/ur3_with_gripper/ur_description/meshes/ur3/visual >base.dae`. Select the base, and in the ***Inspector*** window, open the ***Materials*** tab. If the `Material_001` and `_002` fields are blank, assign them to `Assets/TutorialAssets/URDFs/ ur3_with_gripper/ur_description/Materials/Material_001` and `_002`, respectively.
- If the UR3 arm's base has some missing textures (e.g. pink ovals), in the Project window, navigate to `Assets/TutorialAssets/URDFs/ur3_with_gripper/ur_description/meshes/ur3/visual >base.dae`. Select the base, and in the ***Inspector*** window, open the ***Materials*** tab. If the `Material_001` and `_002` fields are blank, assign them to `Assets/TutorialAssets/URDFs/ ur3_with_gripper/ur_description/Materials/Material_001` and `_002`, respectively.
![](Images/faq_base_mat.png)
### URDF Importer
- If you are not seeing `Import Robot from URDF` in the `Assets` menu, check the ***Console*** for compile errors. The project must compile correctly before the editor tools become available.
- If the robot appears loose/wiggly or is not moving with no console errors, ensure on the `Controller` script of the `ur3_with_gripper` that the `Stiffness` is **10000**, the `Damping` is **1000** and the `Force Limit` is **1000**.
- Note that the world-space origin of the robot is defined in its URDF file. In this sample, we have assigned it to sit on top of the table, which is at `(0, 0.77, 0)` in Unity coordinates. Moving the robot from its root position in Unity will require a change to its URDF definition.
- If you are not seeing `Import Robot from URDF` in the `Assets` menu, check the ***Console*** for compile errors. The project must compile correctly before the editor tools become available.
- If the robot appears loose/wiggly or is not moving with no console errors, ensure on the `Controller` script of the `ur3_with_gripper` that the `Stiffness` is **10000**, the `Damping` is **1000** and the `Force Limit` is **1000**.
- Note that the world-space origin of the robot is defined in its URDF file. In this sample, we have assigned it to sit on top of the table, which is at `(0, 0.77, 0)` in Unity coordinates. Moving the robot from its root position in Unity will require a change to its URDF definition.
```xml
<joint name="joint_world" type="fixed">
<parent link="world" />
<child link="base_link" />
<origin rpy="0.0 0.0 0.0" xyz="0.0 0.0 0.77"/>
</joint>
```
```xml
<joint name="joint_world" type="fixed">
<parent link="world" />
<child link="base_link" />
<origin rpy="0.0 0.0 0.0" xyz="0.0 0.0 0.77"/>
</joint>
```
**Note**: Going from Unity world space to ROS world space requires a conversion. Unity's `(x,y,z)` is equivalent to the ROS `(z,-x,y)` coordinate.
@ -48,7 +48,7 @@
### Docker, Environment
- If you are using a Docker container to train your model but it is killed shortly after starting, you may need to increase the memory allocated to Docker. In the Docker Dashboard, navigate to Settings (via the gear icon) > Resources. The suggested minimum memory is 4.00 GB, but you may need to modify this for your particular needs.
- If you encounter errors installing Pytorch via the instructed `pip3` command, try the following instead:
```bash
```bash
sudo pip3 install rospkg numpy jsonpickle scipy easydict torch==1.7.1 torchvision==0.8.2 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html
```
@ -58,21 +58,21 @@
- `Error processing request: invalid load key...` This has most likely occurred due to the downloaded model's `.tar` file being corrupted, e.g. caused by an unstable connection, or otherwise interrupted download process. Please try redownloading the [UR3_single_cube_model.tar](https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation/releases/download/v0.0.1/UR3_single_cube_model.tar) file and try the process again.
### Unity Scene
- The buttons might appear oversized compared to the rest of the objects in the scene view, this is a normal behavior. If you zoom out from the table you should see something similar to the following:
- The buttons might appear oversized compared to the rest of the objects in the scene view, this is a normal behavior. If you zoom out from the table you should see something similar to the following:
<p align="center">
<img src="Images/button_error.png" align="center" width=950/>
</p>
### Docker, ROS-TCP Connection
- Building the Docker image may throw an `Could not find a package configuration file provided by...` exception if one or more of the directories in ROS/ appears empty. This project uses Git Submodules to grab the ROS package dependencies. If you cloned the project and forgot to use `--recurse-submodules`, or if any submodule in this directory doesn't have content, you can run the `git submodule update --init --recursive` to grab the Git submodules.
- Building the Docker image may throw an `Could not find a package configuration file provided by...` exception if one or more of the directories in ROS/ appears empty. This project uses Git Submodules to grab the ROS package dependencies. If you cloned the project and forgot to use `--recurse-submodules`, or if any submodule in this directory doesn't have content, you can run the `git submodule update --init --recursive` to grab the Git submodules.
- `...failed because unknown error handler name 'rosmsg'` This is due to a bug in an outdated package version. Try running `sudo apt-get update && sudo apt-get upgrade` to upgrade packages.
- `Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?` The system-independent `docker info` command can verify whether or not Docker is running. This command will throw a `Server: ERROR` if the Docker daemon is not currently running, and will print the appropriate [system-wide information](https://docs.docker.com/engine/reference/commandline/info/) otherwise.
- Occasionally, not having enough memory allocated to the Docker container can cause the `server_endpoint` to fail. This may cause unexpected behavior during the pick-and-place task, such as constantly predicting the same pose. If this occurs, check your Docker settings. You may need to increase the `Memory` to 8GB.
- This can be found in Docker Desktop settings, under the gear icon.
- `Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?` The system-independent `docker info` command can verify whether or not Docker is running. This command will throw a `Server: ERROR` if the Docker daemon is not currently running, and will print the appropriate [system-wide information](https://docs.docker.com/engine/reference/commandline/info/) otherwise.
- Occasionally, not having enough memory allocated to the Docker container can cause the `server_endpoint` to fail. This may cause unexpected behavior during the pick-and-place task, such as constantly predicting the same pose. If this occurs, check your Docker settings. You may need to increase the `Memory` to 8GB.
- This can be found in Docker Desktop settings, under the gear icon.
- `Exception Raised: unpack requires a buffer of 4 bytes`: This may be caused by a mismatch in the expected Service Request formatting. Ensure that the [srv definition](../ROS/src/ur3_moveit/srv/MoverService.srv) matches the [generated C# script](../PoseEstimationDemoProject/Assets/TutorialAssets/RosMessages/Ur3Moveit/srv/MoverServiceRequest.cs), and that you have not modified these files since the last push to your ROS workspace.
### ROS Workspace
- If the `catkin_make` command is failing, ensure you are specifying which packages to build (i.e. `catkin_make -DCATKIN_WHITELIST_PACKAGES="moveit_msgs;ros_tcp_endpoint;ur3_moveit;robotiq_2f_140_gripper_visualization;ur_description;ur_gazebo"`).
- If the `catkin_make` command is failing, ensure you are specifying which packages to build (i.e. `catkin_make -DCATKIN_WHITELIST_PACKAGES="moveit_msgs;ros_tcp_endpoint;ur3_moveit;robotiq_2f_140_gripper_visualization;ur_description;ur_gazebo"`).
- If the problem persists, add the `-j1` flag to the `catkin_make` command.
### Ubuntu

Просмотреть файл

@ -1,8 +1,8 @@
Object Pose Estimation Model
=====================
This section contains code for training and evaluating a deep neural network to predict the pose of a single object from RGB images. We provide support for running both locally and with Docker.
This section contains code for training and evaluating a deep neural network to predict the pose of a single object from RGB images. We provide support for running both locally and with Docker.
This model is a modified implementation of [Domain Randomization for Transferring Deep Neural Networks from Simulation to the Real World](https://arxiv.org/pdf/1703.06907.pdf), by Tobin et. al. It is based on the classic VGG-16 backbone architecture, and initialized with weights pre-trained on the ImageNet dataset. The head of the network is replaced with a 3D position prediction head that outputs (x, y, z), and an orientation predicton head that outputs a quaternion (q<sub>x</sub>, q<sub>y</sub>, q<sub>z</sub>, q<sub>w</sub>).
This model is a modified implementation of [Domain Randomization for Transferring Deep Neural Networks from Simulation to the Real World](https://arxiv.org/pdf/1703.06907.pdf), by Tobin et. al. It is based on the classic VGG-16 backbone architecture, and initialized with weights pre-trained on the ImageNet dataset. The head of the network is replaced with a 3D position prediction head that outputs (x, y, z), and an orientation predicton head that outputs a quaternion (q<sub>x</sub>, q<sub>y</sub>, q<sub>z</sub>, q<sub>w</sub>).
<p align='center'>
<img src='documentation/docs/network.png' height=400/>
@ -23,19 +23,19 @@ We've provided a pre-trained model, which can be downloaded [here](https://githu
This model supports a `train` and an `evaluate` command. Both of these have many arguments, which you can examine in `cli.py`. They will default to the values in `config.yaml` for convenience, but can be overridden via the command line.
The most important `train` arguments to be aware of are:
* `--data_root`: Path to the directory containing your data folders. These directory should include `UR3_single_cube_training` and `UR3_single_cube_validation`, containing the training and validation data, respectively.
* `--data_root`: Path to the directory containing your data folders. These directory should include `UR3_single_cube_training` and `UR3_single_cube_validation`, containing the training and validation data, respectively.
* `--log-dir-system`: Path to directory where you'd like to save Tensorboard log files and model checkpoint files.
The most important `evaluate` arguments to be aware of are:
* `--load-dir-checkpoint`: Path to model to be evaluated.
* `--data_root`: Path to the directory containing your data folders. These directory should include `UR3_single_cube_training` and `UR3_single_cube_validation`, containing the training and validation data, respectively.
* `--load-dir-checkpoint`: Path to model to be evaluated.
* `--data_root`: Path to the directory containing your data folders. These directory should include `UR3_single_cube_training` and `UR3_single_cube_validation`, containing the training and validation data, respectively.
## Performance
Below is a description of the model's performance on predicting the pose of a cube. For the loss, we used the L2 norm for the position and orientation in each batch.
However, we used different metrics to _evaluate_ the performance of the model.
However, we used different metrics to _evaluate_ the performance of the model.
* To evaluate translation predictions, we measured the [average L2 norm over the dataset](pose_estimation/evaluation_metrics/translation_average_mean_square_error.py).
* To evaluate the orientation predictions, we used the angle between the orientation of the prediction and the orientation of the target, averaged over the dataset (implementation [here](pose_estimation/evaluation_metrics/orientation_average_quaternion_error.py)).
@ -43,9 +43,9 @@ However, we used different metrics to _evaluate_ the performance of the model.
| | Training Error | Validation Error |
|:-------------------:|:---------------------------:|:--------------------------:|
|Translation | 0.012 (12% of cube's size) | 0.01 (10% of cube's size) |
|Orientation (radian) | 0.06 | 0.05 |
|Orientation (radian) | 0.06 | 0.05 |
> Note: Data for the above experiment was collected in Unity 2020.2.1f1.
> Note: Data for the above experiment was collected in Unity 2020.2.1f1.
## Unit Testing

Просмотреть файл

@ -1,20 +1,20 @@
Codebase Structure
==================
In this project, I create a network to predict the position of a cube.
In this project, I create a network to predict the position of a cube.
### Architecture
The pose estimation project is organized as following.
PoseEstimationModel:
* [environment-gpu.yml](../environment-gpu.yml):
If the computer you are runnning the project from **has a gpu support**, this file sets the dependencies of the project and the different packages to install. It is meant to be used when you create your conda environment.
If the computer you are runnning the project from **has a gpu support**, this file sets the dependencies of the project and the different packages to install. It is meant to be used when you create your conda environment.
* [environment.yml](../environment.yml):
If the computer you are runnning the project from **does not have a gpu support**, this file sets the dependencies of the project and the different packages to install. It is meant to be used when you create your conda environment.
If the computer you are runnning the project from **does not have a gpu support**, this file sets the dependencies of the project and the different packages to install. It is meant to be used when you create your conda environment.
* [setup.py](../setup.py):
This file is to create a package as your project.
This file is to create a package as your project.
* [cli.py](../pose_estimation/cli.py):
This file contains the cli commands which are the commands to launch the different processes (either train or evaluate).
@ -23,15 +23,15 @@ PoseEstimationModel:
This file contains the default configuration for the estimator (pose estimation model) on the single cube dataset.
* [single_cube_dataset.py](../pose_estimation/single_cube_dataset.py):
This file contains knowledge on how the SingleCubeDataset class dataset should be loaded into memory.
This file contains knowledge on how the SingleCubeDataset class dataset should be loaded into memory.
* [model.py](../pose_estimation/model.py):
This file contains the neural network along with the custom linear activation function to perform
the pose estimation task: predict the object's translation (coordinates x, y, z of the cube's center)
This file contains the neural network along with the custom linear activation function to perform
the pose estimation task: predict the object's translation (coordinates x, y, z of the cube's center)
and the cube's orientation (quaternion describing the orientation of the cube) if the object is asymmetric otherwise it will predict only the translation.
* [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py):
This file contains the pose estimation estimator and the different methods you can apply on your model as train, evaluate, save and
This file contains the pose estimation estimator and the different methods you can apply on your model as train, evaluate, save and
load.
* [train.py](../pose_estimation/train.py):
@ -44,7 +44,7 @@ PoseEstimationModel:
This module contains metrics used by the pose estimation estimator.
* [logger.py](../pose_estimation/logger.py):
This module contains the logger class which is a class designed to save elements (metrics, losses) visible on tensorboard.
This module contains the logger class which is a class designed to save elements (metrics, losses) visible on tensorboard.
* [storage](../pose_estimation/storage):
This module contains functionality that relates to
@ -54,19 +54,19 @@ PoseEstimationModel:
This module contains all the tests which you can run using the [pytest command](../README.md#unit-testing).
* [Dockerfile](../Dockerfile):
This file is the file reponsible for the creation of the docker image.
This file is the file reponsible for the creation of the docker image.
* [kubeflow](../kubeflow/):
This module contains kubeflow pipelines ([.py.tar.gz](../kubeflow/train_pipeline.py) files). You can have more information on how to set up a kubeflow pipeline in the [ReadMe](../kubeflow/README.md).
### Details of the config.py file
In the following, I will explain what each argument in the [config.yaml](../config.yaml) means.
There are 8 sections in the config files:
### Details of the config.py file
In the following, I will explain what each argument in the [config.yaml](../config.yaml) means.
There are 8 sections in the config files:
* **estimator**: This will be the core name of the saved model
* _**train**_:
* _**train**_:
- **dataset_zip_file_name_training**: name of the training dataset file.
- **batch_training_size**: number of training samples to work through before the models internal parameters are updated.
@ -75,7 +75,7 @@ There are 8 sections in the config files:
- **epochs**: number of passes of the entire training dataset the machine learning algorithm has completed.
- **beta_loss**: beta coefficient when we add the translation and orientation losses.
- **beta_loss**: beta coefficient when we add the translation and orientation losses.
- **sample_size_train**: size of a dataset training sample. It is used to test operations/commands on a few examples.
@ -84,7 +84,7 @@ There are 8 sections in the config files:
- **dataset_zip_file_name_validation**: name of the validation dataset file.
- **batch_validation_size**: number of validation samples to work through before the metrics are calculated.
- **eval_freq**: frequency of epochs when the evaulation process is launched.
- **sample_size_val**: size of a dataset validation sample. It is used to test operations/commands on a few examples.
@ -107,8 +107,8 @@ There are 8 sections in the config files:
- **pose_estimation_gcs_path**: path inside the gcp bucket where the datasets are located.
- **symmetric**: Boolean. If the object is symmetric then the element is True otherwise it is False. Based on that we will only predict the translation
or translation and orientation.
- **symmetric**: Boolean. If the object is symmetric then the element is True otherwise it is False. Based on that we will only predict the translation
or translation and orientation.
* _**adam_optimizer**_:
@ -119,13 +119,13 @@ There are 8 sections in the config files:
- **beta_2**: the exponential decay rate for the second-moment estimates.
* _**checkpoint**_:
* _**checkpoint**_:
- **load_dir_checkpoint**: path towards the saved model.
- **save_frequency**: frequency of epochs when the model is saved. If it is set to 1 then the model will be saved every epoch and if it is set to 2 then the model will be saved ever two epochs.
* _**system**_:
* _**system**_:
- **log_dir_system**: path where the model and the metrics (.tar file that will be visioned by tensorbard) will be saved.
@ -133,9 +133,9 @@ There are 8 sections in the config files:
### Save and Load methods
In the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file, there is one method to save and one method to load a model.
In the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file, there is one method to save and one method to load a model.
The save method is called in [train.py](../pose_estimation/train.py) file at the line 95 and the load method is called in the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) line 82. The model is saved using the save method of the checkpointer object and the model is loaded using the load method of the checkpointer object. The checkpointer object is created in [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file line 50. Then, to understand how the model is saved or loaded we need to look into the [checkpoint.py](../pose_estimation/storage/checkpoint.py) file.
The save method is called in [train.py](../pose_estimation/train.py) file at the line 95 and the load method is called in the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) line 82. The model is saved using the save method of the checkpointer object and the model is loaded using the load method of the checkpointer object. The checkpointer object is created in [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file line 50. Then, to understand how the model is saved or loaded we need to look into the [checkpoint.py](../pose_estimation/storage/checkpoint.py) file.
But first, let's have a general overview of the [checkpoint.py](../pose_estimation/storage/checkpoint.py) file. There are three classes:
- **EstimatorCheckpoint**: it assigns `estimator checkpoint writer` according to `log_dir` which is responsible for saving estimators. The writer can be a GCS or local writer. It also assigns `loader` which is responsible for loading estimator from a given path. Loader can be a local, GCS or HTTP loader.
@ -148,7 +148,7 @@ When the EstimatorCheckpoint object is created, the static method `_create_write
Now you have two options to save your model, either you save it on local or you save it on google cloud (you can use another cloud but you will have to make the changes yourself).
Then, if we go back to the method called to save the model, it is the `save` method of the `EstimatorCheckpoint` object. This method calls the `save` method of the object created by the `_create_writer` method.
* `local`: the class `LocalEstimatorWriter` takes as attributes a `dirname` which is the `log_dir` path, a `prefix` which is the name of the estimator (corresponds to the argument `estimator` in the [config.yaml](../config.yaml) file), and a `suffix` which is by default equal to `.tar` (type of the file) and create a directory which will host the model. Then, the method `save` calls the method `save` of the estimator which in the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file.
* `local`: the class `LocalEstimatorWriter` takes as attributes a `dirname` which is the `log_dir` path, a `prefix` which is the name of the estimator (corresponds to the argument `estimator` in the [config.yaml](../config.yaml) file), and a `suffix` which is by default equal to `.tar` (type of the file) and create a directory which will host the model. Then, the method `save` calls the method `save` of the estimator which in the [pose_estimation_estimator.py](../pose_estimation/pose_estimation_estimator.py) file.
* `gcp`: The class `GCSEstimatorWriter` takes as attributes `cloud_path` which is the `log_dir` path towards the gcp bucket and a `prefix` which is the name of the estimator (corresponds to the argument `estimator` in the [config.yaml](../config.yaml) file). In the method `save`, the model is saved on a temporary directory on the local computer that the cloud uses. The process used is the process I just described a little bit above in `local`. The `save` method returns the full GCS cloud path to the saved checkpoint file. Then the method `upload` from the [GCSClient()](../pose_estimation/storage/gcs.py) class is called: it is a method to upload files on Google Cloud Platform.

Просмотреть файл

@ -1,17 +1,17 @@
Docker
Docker
======
Another option to run the project is to use a Docker image. This option allows you to avoid downloading the project's libraries to your local computer, while still running the project successfully. With a Docker image, you also have the ability to train or evaluate your model on a cloud platform, such as Google Cloud Platform, AWS, Microsoft Cloud, and many others.
Another option to run the project is to use a Docker image. This option allows you to avoid downloading the project's libraries to your local computer, while still running the project successfully. With a Docker image, you also have the ability to train or evaluate your model on a cloud platform, such as Google Cloud Platform, AWS, Microsoft Cloud, and many others.
## Docker Requirements
You will need to have [Docker](https://docs.docker.com/get-docker/) installed on your computer.
You will need to have [Docker](https://docs.docker.com/get-docker/) installed on your computer.
### Running with Docker
### Running with Docker
* **Action**: In [config.yaml](../config.yaml), under `system`, set the argument `log_dir_system` to: `/save/single_cube`.
* **Action**: Set the argument `data_root` under `system` to `/data`.
* **Action**: In [config.yaml](../config.yaml), under `system`, set the argument `log_dir_system` to: `/save/single_cube`.
* **Action**: Set the argument `data_root` under `system` to `/data`.
Before creating the Docker image, you need to be sure your Docker settings are compatible with the project. Open Docker Desktop, click on `Settings` (the gear icon) on the top right, and go to `Resources`. Then change your settings so that it matches the following:
Before creating the Docker image, you need to be sure your Docker settings are compatible with the project. Open Docker Desktop, click on `Settings` (the gear icon) on the top right, and go to `Resources`. Then change your settings so that it matches the following:
<p align="center">
<img src="docs/docker_settings.png" height=400/>
@ -22,11 +22,11 @@ Before creating the Docker image, you need to be sure your Docker settings are c
The first step is to build the Docker image.
* **Action**: Open a new terminal and navigate to the `Robotics-Object-Pose-Estimation/Model` folder. Then run the command to build your docker image, and name it `pose_estimation`:
```bash
```bash
docker build -t pose_estimation .
```
**Note**: If you change any code in the `Model` directory, you will need to rebuild the Docker image.
**Note**: If you change any code in the `Model` directory, you will need to rebuild the Docker image.
* **Action**: Now we need to run the Docker image. One way is to use the bash shell. Still in the same terminal, enter the following:
```bash
@ -35,29 +35,29 @@ docker run -it -v [FULL PATH TO DATA FOLDER]:/data -v [FULL PATH TO MODEL FOLDER
The `FULL PATH TO DATA FOLDER` is the path to the upper directory of your data. As an example, I have put my `UR3_single_cube_training` and `UR3_single_cube_validation` data folder into a folder called `data` that I have created in my `Documents` folder. Thus my `FULL PATH TO DATA FOLDER` will be `/Users/jonathan.leban/Documents/data`.
The `FULL PATH TO MODEL FOLDER` is the directory in which your models and metrics will be saved. For me, I created a folder called `save` into my Documents.
The `/save/single_cube` directory is the directory inside the docker container. That is why in the [config.yaml](../config.yaml) file, under the argument `system` the argument `log_dir_system` is set to `/save/single_cube`.
The `FULL PATH TO MODEL FOLDER` is the directory in which your models and metrics will be saved. For me, I created a folder called `save` into my Documents.
The `/save/single_cube` directory is the directory inside the docker container. That is why in the [config.yaml](../config.yaml) file, under the argument `system` the argument `log_dir_system` is set to `/save/single_cube`.
Thus, the final command for me is:
Thus, the final command for me is:
```bash
docker run -it -v /Users/jonathan.leban/Documents/data:/data -v /Users/jonathan.leban/Documents/save:/save/single_cube pose_estimation bash
```
### CLI
At the top of the [cli.py](../pose_estimation/cli.py) file, you can see the documentation for all supported commands.
### CLI
At the top of the [cli.py](../pose_estimation/cli.py) file, you can see the documentation for all supported commands.
#### Train
To run the training commmand with default values:
* **Action**:
```bash
* **Action**:
```bash
python -m pose_estimation.cli train
```
You can override many hyperparameters by adding additional arguments to this command. See [cli.py](../pose_estimation/cli.py) for a view of all supported arguments.
You can override many hyperparameters by adding additional arguments to this command. See [cli.py](../pose_estimation/cli.py) for a view of all supported arguments.
#### Evaluate
#### Evaluate
To run the evaluate commmand:
```bash
@ -66,8 +66,8 @@ python -m pose_estimation.cli evaluate --load-dir-checkpoint=/save/single_cube/U
Again, you can override many hyperparameters by adding additional arguments to this command. See [cli.py](../pose_estimation/cli.py) for a view of all supported arguments.
### Copy metrics and models saved on Docker on your local machine
Once you have trained or evaluated your model, you may want to copy the results out of the docker container, to your local computer.
### Copy metrics and models saved on Docker on your local machine
Once you have trained or evaluated your model, you may want to copy the results out of the docker container, to your local computer.
After building and running the docker image your terminal should look something like this:
@ -75,26 +75,26 @@ After building and running the docker image your terminal should look something
<img src="docs/docker_id_image.png" height=40/>
</p>
Here you can see on the right of `root@` the id of the docker container you are in. Copy this id.
Here you can see on the right of `root@` the id of the docker container you are in. Copy this id.
As a reminder, we want to extract some files of `save/single_cube/` inside the docker container into your `save` folder you have created on your local computer.
Open a new terminal and enter the following:
As a reminder, we want to extract some files of `save/single_cube/` inside the docker container into your `save` folder you have created on your local computer.
Open a new terminal and enter the following:
```bash
docker cp <containerId>:<source path> <destination path>
```
As an example, I will enter the following:
As an example, I will enter the following:
```bash
docker cp 48a81368b095:/save/single_cube/UR3_single_cube_model_ep120.tar /Users/jonathan.leban/Documents/save
```
To copy my metrics data out of docker, I will enter the following:
To copy my metrics data out of docker, I will enter the following:
```bash
docker cp 48a81368b095:/save/single_cube/events.out.tfevents.1612402202.48a81368b095 /Users/jonathan.leban/Documents/save
```
The metrics folder should have the same format as **events.out.tfevents.<`number`>.<`number`>**
### Troubleshooting
If when you launch the training you have an issue saying `Killed`, then you may want to try increasing the `Memory` allowance in your Docker settings.
### Troubleshooting
If when you launch the training you have an issue saying `Killed`, then you may want to try increasing the `Memory` allowance in your Docker settings.

Просмотреть файл

@ -1,22 +1,22 @@
## Running on the Cloud
Instead of training or evaluating your model on your local computer, you can use the cloud. The advantages of using the cloud are:
## Running on the Cloud
Instead of training or evaluating your model on your local computer, you can use the cloud. The advantages of using the cloud are:
- Speed
- No local storage problems
- No need to install packages or software on your computer
- No need to install packages or software on your computer
- Can run on any computer and at any time without needing monitoring
To run the project on the cloud, you will need to change a few parameters in [config.yaml](../config.yaml) file. The steps are described in the section below, [Google Cloud Platform](#google-cloud-platform).
### Google Cloud Platform
Instead of extracting the data from your local computer, you can also download it form the cloud. In that case, you have two options:
- If you want to access the cloud for your data in the Docker image, you will need to change the [config.yaml](../config.yaml) file.
Instead of extracting the data from your local computer, you can also download it form the cloud. In that case, you have two options:
- If you want to access the cloud for your data in the Docker image, you will need to change the [config.yaml](../config.yaml) file.
- Under `dataset`, set `download_data_gcp` to `True`
- Specify the string value for `gcs_bucket` and `pose_estimation_gcs_path`, where `pose_estimation_gcs_path` is the path under the `gcs_bucket`.
- Specify the string value for `gcs_bucket` and `pose_estimation_gcs_path`, where `pose_estimation_gcs_path` is the path under the `gcs_bucket`.
- For example, if you have called your gcs_bucket `pose-estimation` and you have created a new folder inside `pose-estimation` named `dataset`, then pose_estimation_gcs_path will be equal to `dataset`.
- If you want to use the kubeflow pipeline, you will only need to fill out the respective arguments when you create the pipeline as you can see on the picture below:
- If you want to use the kubeflow pipeline, you will only need to fill out the respective arguments when you create the pipeline as you can see on the picture below:
![](docs/kubeflow_details_pipeline.png)
However, please note that using a Cloud computing platform (Google Cloud, AWS, Azure) is charged.
However, please note that using a Cloud computing platform (Google Cloud, AWS, Azure) is charged.
This project provides the code necessary to run your project on [kubeflow](#https://www.kubeflow.org/) where you can run [machine learning pipelines](#https://www.kubeflow.org/docs/pipelines/overview/pipelines-overview/). You will just need to follow the instructions in the [Kubeflow Pipeline](../kubeflow/README.md).

Просмотреть файл

@ -1,13 +1,13 @@
# Visualizing Training Results with Tensorboard
To view the training or evaluation logs you can you use Tensorboard. The logs are saved in the same directory the model is saved.
To view the training or evaluation logs you can you use Tensorboard. The logs are saved in the same directory the model is saved.
You need to run the following command:
* **Action**:
* **Action**:
```bash
tensorboard --logdir=[LOG DIRECTORY]
```
You should see something similar to this:
You should see something similar to this:
<p align="center">
<img src="../../Documentation/Images/3_tensorboard.png" height=40/>
</p>
@ -19,7 +19,7 @@ The result of this command will show you the port on which Tensorboard is now av
localhost:[PORT_NUMBER]
```
Once you navigate to that location in your browser, you should see something like this:
Once you navigate to that location in your browser, you should see something like this:
<p align="center">
<img src="../../Documentation/Images/3_performance_model.png"/>
</p>

Просмотреть файл

@ -1,11 +1,11 @@
# Kubeflow pipelines
The Kubeflow pipelines are located inside the [kubeflow/](kubeflow/) folder, where you can find one pipeline for training the model, and one for the evaluation.
# Kubeflow pipelines
The Kubeflow pipelines are located inside the [kubeflow/](kubeflow/) folder, where you can find one pipeline for training the model, and one for the evaluation.
Train and Evaluate Model using Kubeflow
=======================================
## Create New Pipeline
You will need a Python environment with [kfp==0.5.1](https://pypi.org/project/kfp/) installed.
You will need a Python environment with [kfp==0.5.1](https://pypi.org/project/kfp/) installed.
### Compile Kubeflow pipeline
@ -14,7 +14,7 @@ cd kubeflow
python pose_estimation_train_pipeline.py
```
This will create a file `pose_estimation_train_pipeline.py.tar.gz` which can be uploaded to Kubeflow pipeline for executions.
This will create a file `pose_estimation_train_pipeline.py.tar.gz` which can be uploaded to Kubeflow pipeline for executions.
Next, go to the Kubeflow dashboard, and upload and create new pipeline using the above pipeline. You should be able to create a new parameterized experiment to run a Kubeflow pipeline following [this tutorial](https://www.kubeflow.org/docs/pipelines/pipelines-quickstart).
@ -23,11 +23,11 @@ Go to [this pipeline](https://www.kubeflow.org/docs/gke/deploy/) and follow the
1. Set `docker_image` to be `gcr.io/unity-ai-thea-test/datasetinsights:<git-commit-sha>` Where `<git-commit-sha>` is the sha from the latest version of master in the thea repo. It should be the latest commit in the history: [link](https://gitlab.internal.unity3d.com/machine-learning/thea/commits/master).
2. To check the progress of your model run `docker run -p 6006:6006 -v $HOME/.config:/root/.config:ro tensorflow/tensorflow tensorboard --host=0.0.0.0 --logdir gs://your/log/directory`. Open `http://localhost:6006` to view the dashboard.
2. To check the progress of your model run `docker run -p 6006:6006 -v $HOME/.config:/root/.config:ro tensorflow/tensorflow tensorboard --host=0.0.0.0 --logdir gs://your/log/directory`. Open `http://localhost:6006` to view the dashboard.
This command assumes you have run `gcloud auth login` command and the local credential is stored in `$HOME/.config`, which is mounted to the home directory inside Docker. It must have read permission to `gs://your/log/directory`
3. If the mAP and mAR for validation are leveling off then you can terminate the run early; it's unlikely the model's performance will improve.
4. The model will save checkpoints after every epoch to the logdir with the format `gs://logdir/ep#.estimator`, e.g.
`gs://thea-dev/runs/20200328_221415/FasterRCNN.ep24.estimator`

Просмотреть файл

@ -1,50 +1,50 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: fcf7219bab7fe46a1ad266029b2fee19, type: 3}
m_Name: Readme
m_EditorClassIdentifier:
icon: {fileID: 2800000, guid: 7801804018a7dcf42abb827444e18660, type: 3}
title: Universal Render Pipeline Template
sections:
- heading: Universal Render Pipeline
text: 'The Universal Project Template configures Project settings for Projects where performance, wide platform support, and ease of customizing graphics are the primary considerations.'
linkText:
url:
- heading:
text: 'This Template uses the Universal Render Pipeline (URP) and Shader Graph.'
linkText:
url:
- heading:
text: 'URP is prebuilt Scriptable Render Pipeline that is quick and easy to customize, and lets you create optimized graphics across a wide range of platforms. URP also includes an optimized 2D renderer complete with 2D lights and pixel perfect rendering, and an integrated post-processing solution.'
linkText:
url:
- heading:
text: 'Shader Graph is a tool that allows you to create shaders using a visual node editor instead of writing code.'
linkText:
url:
- heading:
text: 'This template contains a sample Scene that contains examples of how to configure lighting settings, Materials, Shaders, and post-processing effects in URP, several preconfigured Universal Render Pipeline Assets that let you quickly swap between graphics quality levels, and Presets that have been optimized for use with URP.'
linkText:
url:
- heading:
text: 'This template contains a sample Scene that contains examples of how to configure lighting settings, Materials, Shaders, and post-processing effects in URP, several preconfigured Universal Render Pipeline Assets that let you quickly swap between graphics quality levels, and Presets that have been optimized for use with URP.'
linkText:
url:
- heading:
text: 'To read more about URP and its built-in features, see the '
linkText: URP documentation.
url: https://docs.unity3d.com/Packages/com.unity.render-pipelines.universal@latest/index.html
- heading:
text: 'For more information about Shader Graph, see the '
linkText: Shader Graph documentation
url: https://docs.unity3d.com/Packages/com.unity.shadergraph@latest
loadedLayout: 1
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: fcf7219bab7fe46a1ad266029b2fee19, type: 3}
m_Name: Readme
m_EditorClassIdentifier:
icon: {fileID: 2800000, guid: 7801804018a7dcf42abb827444e18660, type: 3}
title: Universal Render Pipeline Template
sections:
- heading: Universal Render Pipeline
text: 'The Universal Project Template configures Project settings for Projects where performance, wide platform support, and ease of customizing graphics are the primary considerations.'
linkText:
url:
- heading:
text: 'This Template uses the Universal Render Pipeline (URP) and Shader Graph.'
linkText:
url:
- heading:
text: 'URP is prebuilt Scriptable Render Pipeline that is quick and easy to customize, and lets you create optimized graphics across a wide range of platforms. URP also includes an optimized 2D renderer complete with 2D lights and pixel perfect rendering, and an integrated post-processing solution.'
linkText:
url:
- heading:
text: 'Shader Graph is a tool that allows you to create shaders using a visual node editor instead of writing code.'
linkText:
url:
- heading:
text: 'This template contains a sample Scene that contains examples of how to configure lighting settings, Materials, Shaders, and post-processing effects in URP, several preconfigured Universal Render Pipeline Assets that let you quickly swap between graphics quality levels, and Presets that have been optimized for use with URP.'
linkText:
url:
- heading:
text: 'This template contains a sample Scene that contains examples of how to configure lighting settings, Materials, Shaders, and post-processing effects in URP, several preconfigured Universal Render Pipeline Assets that let you quickly swap between graphics quality levels, and Presets that have been optimized for use with URP.'
linkText:
url:
- heading:
text: 'To read more about URP and its built-in features, see the '
linkText: URP documentation.
url: https://docs.unity3d.com/Packages/com.unity.render-pipelines.universal@latest/index.html
- heading:
text: 'For more information about Shader Graph, see the '
linkText: Shader Graph documentation
url: https://docs.unity3d.com/Packages/com.unity.shadergraph@latest
loadedLayout: 1

Просмотреть файл

@ -1,118 +1,118 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &-7893295128165547882
MonoBehaviour:
m_ObjectHideFlags: 3
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 0b2db86121404754db890f4c8dfe81b2, type: 3}
m_Name: Bloom
m_EditorClassIdentifier:
active: 1
m_AdvancedMode: 0
threshold:
m_OverrideState: 1
m_Value: 1
min: 0
intensity:
m_OverrideState: 1
m_Value: 1
min: 0
scatter:
m_OverrideState: 0
m_Value: 0.7
min: 0
max: 1
clamp:
m_OverrideState: 0
m_Value: 65472
min: 0
tint:
m_OverrideState: 0
m_Value: {r: 1, g: 1, b: 1, a: 1}
hdr: 0
showAlpha: 0
showEyeDropper: 1
highQualityFiltering:
m_OverrideState: 0
m_Value: 0
dirtTexture:
m_OverrideState: 0
m_Value: {fileID: 0}
dirtIntensity:
m_OverrideState: 0
m_Value: 0
min: 0
--- !u!114 &-7011558710299706105
MonoBehaviour:
m_ObjectHideFlags: 3
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 899c54efeace73346a0a16faa3afe726, type: 3}
m_Name: Vignette
m_EditorClassIdentifier:
active: 1
m_AdvancedMode: 0
color:
m_OverrideState: 0
m_Value: {r: 0, g: 0, b: 0, a: 1}
hdr: 0
showAlpha: 0
showEyeDropper: 1
center:
m_OverrideState: 0
m_Value: {x: 0.5, y: 0.5}
intensity:
m_OverrideState: 1
m_Value: 0.25
min: 0
max: 1
smoothness:
m_OverrideState: 1
m_Value: 0.4
min: 0.01
max: 1
rounded:
m_OverrideState: 0
m_Value: 0
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: d7fd9488000d3734a9e00ee676215985, type: 3}
m_Name: SampleSceneProfile
m_EditorClassIdentifier:
components:
- {fileID: 849379129802519247}
- {fileID: -7893295128165547882}
- {fileID: -7011558710299706105}
--- !u!114 &849379129802519247
MonoBehaviour:
m_ObjectHideFlags: 3
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 97c23e3b12dc18c42a140437e53d3951, type: 3}
m_Name: Tonemapping
m_EditorClassIdentifier:
active: 1
m_AdvancedMode: 0
mode:
m_OverrideState: 1
m_Value: 2
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &-7893295128165547882
MonoBehaviour:
m_ObjectHideFlags: 3
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 0b2db86121404754db890f4c8dfe81b2, type: 3}
m_Name: Bloom
m_EditorClassIdentifier:
active: 1
m_AdvancedMode: 0
threshold:
m_OverrideState: 1
m_Value: 1
min: 0
intensity:
m_OverrideState: 1
m_Value: 1
min: 0
scatter:
m_OverrideState: 0
m_Value: 0.7
min: 0
max: 1
clamp:
m_OverrideState: 0
m_Value: 65472
min: 0
tint:
m_OverrideState: 0
m_Value: {r: 1, g: 1, b: 1, a: 1}
hdr: 0
showAlpha: 0
showEyeDropper: 1
highQualityFiltering:
m_OverrideState: 0
m_Value: 0
dirtTexture:
m_OverrideState: 0
m_Value: {fileID: 0}
dirtIntensity:
m_OverrideState: 0
m_Value: 0
min: 0
--- !u!114 &-7011558710299706105
MonoBehaviour:
m_ObjectHideFlags: 3
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 899c54efeace73346a0a16faa3afe726, type: 3}
m_Name: Vignette
m_EditorClassIdentifier:
active: 1
m_AdvancedMode: 0
color:
m_OverrideState: 0
m_Value: {r: 0, g: 0, b: 0, a: 1}
hdr: 0
showAlpha: 0
showEyeDropper: 1
center:
m_OverrideState: 0
m_Value: {x: 0.5, y: 0.5}
intensity:
m_OverrideState: 1
m_Value: 0.25
min: 0
max: 1
smoothness:
m_OverrideState: 1
m_Value: 0.4
min: 0.01
max: 1
rounded:
m_OverrideState: 0
m_Value: 0
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: d7fd9488000d3734a9e00ee676215985, type: 3}
m_Name: SampleSceneProfile
m_EditorClassIdentifier:
components:
- {fileID: 849379129802519247}
- {fileID: -7893295128165547882}
- {fileID: -7011558710299706105}
--- !u!114 &849379129802519247
MonoBehaviour:
m_ObjectHideFlags: 3
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: 97c23e3b12dc18c42a140437e53d3951, type: 3}
m_Name: Tonemapping
m_EditorClassIdentifier:
active: 1
m_AdvancedMode: 0
mode:
m_OverrideState: 1
m_Value: 2

Просмотреть файл

@ -1,53 +1,53 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: bf2edee5c58d82540a51f03df9d42094, type: 3}
m_Name: UniversalRP-HighQuality
m_EditorClassIdentifier:
k_AssetVersion: 5
k_AssetPreviousVersion: 5
m_RendererType: 1
m_RendererData: {fileID: 0}
m_RendererDataList:
- {fileID: 11400000, guid: 4a8e21d5c33334b11b34a596161b9360, type: 2}
m_DefaultRendererIndex: 0
m_RequireDepthTexture: 0
m_RequireOpaqueTexture: 0
m_OpaqueDownsampling: 1
m_SupportsHDR: 1
m_MSAA: 2
m_RenderScale: 1
m_MainLightRenderingMode: 1
m_MainLightShadowsSupported: 1
m_MainLightShadowmapResolution: 2048
m_AdditionalLightsRenderingMode: 1
m_AdditionalLightsPerObjectLimit: 4
m_AdditionalLightShadowsSupported: 1
m_AdditionalLightsShadowmapResolution: 512
m_ShadowDistance: 50
m_ShadowCascades: 1
m_Cascade2Split: 0.25
m_Cascade4Split: {x: 0.067, y: 0.2, z: 0.467}
m_ShadowDepthBias: 1
m_ShadowNormalBias: 1
m_SoftShadowsSupported: 1
m_UseSRPBatcher: 1
m_SupportsDynamicBatching: 0
m_MixedLightingSupported: 1
m_DebugLevel: 0
m_ColorGradingMode: 0
m_ColorGradingLutSize: 32
m_ShadowType: 1
m_LocalShadowsSupported: 0
m_LocalShadowsAtlasResolution: 256
m_MaxPixelLights: 0
m_ShadowAtlasResolution: 256
m_ShaderVariantLogLevel: 0
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: bf2edee5c58d82540a51f03df9d42094, type: 3}
m_Name: UniversalRP-HighQuality
m_EditorClassIdentifier:
k_AssetVersion: 5
k_AssetPreviousVersion: 5
m_RendererType: 1
m_RendererData: {fileID: 0}
m_RendererDataList:
- {fileID: 11400000, guid: 4a8e21d5c33334b11b34a596161b9360, type: 2}
m_DefaultRendererIndex: 0
m_RequireDepthTexture: 0
m_RequireOpaqueTexture: 0
m_OpaqueDownsampling: 1
m_SupportsHDR: 1
m_MSAA: 2
m_RenderScale: 1
m_MainLightRenderingMode: 1
m_MainLightShadowsSupported: 1
m_MainLightShadowmapResolution: 2048
m_AdditionalLightsRenderingMode: 1
m_AdditionalLightsPerObjectLimit: 4
m_AdditionalLightShadowsSupported: 1
m_AdditionalLightsShadowmapResolution: 512
m_ShadowDistance: 50
m_ShadowCascades: 1
m_Cascade2Split: 0.25
m_Cascade4Split: {x: 0.067, y: 0.2, z: 0.467}
m_ShadowDepthBias: 1
m_ShadowNormalBias: 1
m_SoftShadowsSupported: 1
m_UseSRPBatcher: 1
m_SupportsDynamicBatching: 0
m_MixedLightingSupported: 1
m_DebugLevel: 0
m_ColorGradingMode: 0
m_ColorGradingLutSize: 32
m_ShadowType: 1
m_LocalShadowsSupported: 0
m_LocalShadowsAtlasResolution: 256
m_MaxPixelLights: 0
m_ShadowAtlasResolution: 256
m_ShaderVariantLogLevel: 0

Просмотреть файл

@ -1,53 +1,53 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: bf2edee5c58d82540a51f03df9d42094, type: 3}
m_Name: UniversalRP-LowQuality
m_EditorClassIdentifier:
k_AssetVersion: 5
k_AssetPreviousVersion: 5
m_RendererType: 1
m_RendererData: {fileID: 0}
m_RendererDataList:
- {fileID: 11400000, guid: 4a8e21d5c33334b11b34a596161b9360, type: 2}
m_DefaultRendererIndex: 0
m_RequireDepthTexture: 0
m_RequireOpaqueTexture: 0
m_OpaqueDownsampling: 1
m_SupportsHDR: 0
m_MSAA: 1
m_RenderScale: 1
m_MainLightRenderingMode: 1
m_MainLightShadowsSupported: 0
m_MainLightShadowmapResolution: 2048
m_AdditionalLightsRenderingMode: 0
m_AdditionalLightsPerObjectLimit: 4
m_AdditionalLightShadowsSupported: 0
m_AdditionalLightsShadowmapResolution: 512
m_ShadowDistance: 50
m_ShadowCascades: 0
m_Cascade2Split: 0.25
m_Cascade4Split: {x: 0.067, y: 0.2, z: 0.467}
m_ShadowDepthBias: 1
m_ShadowNormalBias: 1
m_SoftShadowsSupported: 0
m_UseSRPBatcher: 1
m_SupportsDynamicBatching: 0
m_MixedLightingSupported: 1
m_DebugLevel: 0
m_ColorGradingMode: 0
m_ColorGradingLutSize: 16
m_ShadowType: 1
m_LocalShadowsSupported: 0
m_LocalShadowsAtlasResolution: 256
m_MaxPixelLights: 0
m_ShadowAtlasResolution: 256
m_ShaderVariantLogLevel: 0
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: bf2edee5c58d82540a51f03df9d42094, type: 3}
m_Name: UniversalRP-LowQuality
m_EditorClassIdentifier:
k_AssetVersion: 5
k_AssetPreviousVersion: 5
m_RendererType: 1
m_RendererData: {fileID: 0}
m_RendererDataList:
- {fileID: 11400000, guid: 4a8e21d5c33334b11b34a596161b9360, type: 2}
m_DefaultRendererIndex: 0
m_RequireDepthTexture: 0
m_RequireOpaqueTexture: 0
m_OpaqueDownsampling: 1
m_SupportsHDR: 0
m_MSAA: 1
m_RenderScale: 1
m_MainLightRenderingMode: 1
m_MainLightShadowsSupported: 0
m_MainLightShadowmapResolution: 2048
m_AdditionalLightsRenderingMode: 0
m_AdditionalLightsPerObjectLimit: 4
m_AdditionalLightShadowsSupported: 0
m_AdditionalLightsShadowmapResolution: 512
m_ShadowDistance: 50
m_ShadowCascades: 0
m_Cascade2Split: 0.25
m_Cascade4Split: {x: 0.067, y: 0.2, z: 0.467}
m_ShadowDepthBias: 1
m_ShadowNormalBias: 1
m_SoftShadowsSupported: 0
m_UseSRPBatcher: 1
m_SupportsDynamicBatching: 0
m_MixedLightingSupported: 1
m_DebugLevel: 0
m_ColorGradingMode: 0
m_ColorGradingLutSize: 16
m_ShadowType: 1
m_LocalShadowsSupported: 0
m_LocalShadowsAtlasResolution: 256
m_MaxPixelLights: 0
m_ShadowAtlasResolution: 256
m_ShaderVariantLogLevel: 0

Просмотреть файл

@ -1,53 +1,53 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: bf2edee5c58d82540a51f03df9d42094, type: 3}
m_Name: UniversalRP-MediumQuality
m_EditorClassIdentifier:
k_AssetVersion: 5
k_AssetPreviousVersion: 5
m_RendererType: 1
m_RendererData: {fileID: 0}
m_RendererDataList:
- {fileID: 11400000, guid: 4a8e21d5c33334b11b34a596161b9360, type: 2}
m_DefaultRendererIndex: 0
m_RequireDepthTexture: 0
m_RequireOpaqueTexture: 0
m_OpaqueDownsampling: 1
m_SupportsHDR: 0
m_MSAA: 1
m_RenderScale: 1
m_MainLightRenderingMode: 1
m_MainLightShadowsSupported: 1
m_MainLightShadowmapResolution: 2048
m_AdditionalLightsRenderingMode: 1
m_AdditionalLightsPerObjectLimit: 4
m_AdditionalLightShadowsSupported: 0
m_AdditionalLightsShadowmapResolution: 512
m_ShadowDistance: 50
m_ShadowCascades: 0
m_Cascade2Split: 0.25
m_Cascade4Split: {x: 0.067, y: 0.2, z: 0.467}
m_ShadowDepthBias: 1
m_ShadowNormalBias: 1
m_SoftShadowsSupported: 0
m_UseSRPBatcher: 1
m_SupportsDynamicBatching: 0
m_MixedLightingSupported: 1
m_DebugLevel: 0
m_ColorGradingMode: 0
m_ColorGradingLutSize: 32
m_ShadowType: 1
m_LocalShadowsSupported: 0
m_LocalShadowsAtlasResolution: 256
m_MaxPixelLights: 0
m_ShadowAtlasResolution: 256
m_ShaderVariantLogLevel: 0
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!114 &11400000
MonoBehaviour:
m_ObjectHideFlags: 0
m_CorrespondingSourceObject: {fileID: 0}
m_PrefabInstance: {fileID: 0}
m_PrefabAsset: {fileID: 0}
m_GameObject: {fileID: 0}
m_Enabled: 1
m_EditorHideFlags: 0
m_Script: {fileID: 11500000, guid: bf2edee5c58d82540a51f03df9d42094, type: 3}
m_Name: UniversalRP-MediumQuality
m_EditorClassIdentifier:
k_AssetVersion: 5
k_AssetPreviousVersion: 5
m_RendererType: 1
m_RendererData: {fileID: 0}
m_RendererDataList:
- {fileID: 11400000, guid: 4a8e21d5c33334b11b34a596161b9360, type: 2}
m_DefaultRendererIndex: 0
m_RequireDepthTexture: 0
m_RequireOpaqueTexture: 0
m_OpaqueDownsampling: 1
m_SupportsHDR: 0
m_MSAA: 1
m_RenderScale: 1
m_MainLightRenderingMode: 1
m_MainLightShadowsSupported: 1
m_MainLightShadowmapResolution: 2048
m_AdditionalLightsRenderingMode: 1
m_AdditionalLightsPerObjectLimit: 4
m_AdditionalLightShadowsSupported: 0
m_AdditionalLightsShadowmapResolution: 512
m_ShadowDistance: 50
m_ShadowCascades: 0
m_Cascade2Split: 0.25
m_Cascade4Split: {x: 0.067, y: 0.2, z: 0.467}
m_ShadowDepthBias: 1
m_ShadowNormalBias: 1
m_SoftShadowsSupported: 0
m_UseSRPBatcher: 1
m_SupportsDynamicBatching: 0
m_MixedLightingSupported: 1
m_DebugLevel: 0
m_ColorGradingMode: 0
m_ColorGradingLutSize: 32
m_ShadowType: 1
m_LocalShadowsSupported: 0
m_LocalShadowsAtlasResolution: 256
m_MaxPixelLights: 0
m_ShadowAtlasResolution: 256
m_ShaderVariantLogLevel: 0

Просмотреть файл

@ -25,9 +25,9 @@ namespace RosMessageTypes.Ur3Moveit
public override List<byte[]> SerializationStatements()
{
var listOfSerializations = new List<byte[]>();
listOfSerializations.Add(BitConverter.GetBytes(trajectory.Length));
foreach(var entry in trajectory)
foreach (var entry in trajectory)
listOfSerializations.Add(entry.Serialize());
return listOfSerializations;
@ -35,11 +35,11 @@ namespace RosMessageTypes.Ur3Moveit
public override int Deserialize(byte[] data, int offset)
{
var trajectoryArrayLength = DeserializeLength(data, offset);
offset += 4;
this.trajectory= new Moveit.RobotTrajectory[trajectoryArrayLength];
for(var i = 0; i < trajectoryArrayLength; i++)
this.trajectory = new Moveit.RobotTrajectory[trajectoryArrayLength];
for (var i = 0; i < trajectoryArrayLength; i++)
{
this.trajectory[i] = new Moveit.RobotTrajectory();
offset = this.trajectory[i].Deserialize(data, offset);

Просмотреть файл

@ -25,9 +25,9 @@ namespace RosMessageTypes.Ur3Moveit
public override List<byte[]> SerializationStatements()
{
var listOfSerializations = new List<byte[]>();
listOfSerializations.Add(BitConverter.GetBytes(trajectories.Length));
foreach(var entry in trajectories)
foreach (var entry in trajectories)
listOfSerializations.Add(entry.Serialize());
return listOfSerializations;
@ -35,11 +35,11 @@ namespace RosMessageTypes.Ur3Moveit
public override int Deserialize(byte[] data, int offset)
{
var trajectoriesArrayLength = DeserializeLength(data, offset);
offset += 4;
this.trajectories= new Moveit.RobotTrajectory[trajectoriesArrayLength];
for(var i = 0; i < trajectoriesArrayLength; i++)
this.trajectories = new Moveit.RobotTrajectory[trajectoriesArrayLength];
for (var i = 0; i < trajectoriesArrayLength; i++)
{
this.trajectories[i] = new Moveit.RobotTrajectory();
offset = this.trajectories[i].Deserialize(data, offset);

Просмотреть файл

@ -10,11 +10,11 @@ using UnityEngine.Perception.Randomization.Samplers;
[AddRandomizerMenu("Perception/Light Randomizer")]
public class LightRandomizer : Randomizer
{
public FloatParameter lightIntensityParameter = new FloatParameter{ value = new UniformSampler(.9f, 1.1f)};
public FloatParameter lightIntensityParameter = new FloatParameter { value = new UniformSampler(.9f, 1.1f) };
public FloatParameter rotationX = new FloatParameter { value = new UniformSampler(40, 80)};
public FloatParameter rotationX = new FloatParameter { value = new UniformSampler(40, 80) };
public FloatParameter rotationY = new FloatParameter { value = new UniformSampler(-180, 180)};
public FloatParameter rotationY = new FloatParameter { value = new UniformSampler(-180, 180) };
public ColorRgbParameter lightColorParameter = new ColorRgbParameter
{

Просмотреть файл

@ -7,5 +7,5 @@ using UnityEngine.Perception.Randomization.Randomizers;
[RequireComponent(typeof(Light))]
public class LightRandomizerTag : RandomizerTag
{
}

Просмотреть файл

@ -34,7 +34,7 @@ public class RobotArmObjectPositionRandomizer : Randomizer
public GameObject robotBase;
public float minRobotReachability;
public float maxRobotReachability;
private FloatParameter random = new FloatParameter {value = new UniformSampler(0f, 1f)};
private FloatParameter random = new FloatParameter { value = new UniformSampler(0f, 1f) };
private SurfaceObjectPlacer placer;

Просмотреть файл

@ -17,7 +17,7 @@ public class UniformPoseRandomizer : Randomizer
*/
public float positionRange = 0.005f;
public float rotationRangeDegrees = 1.0f;
public float rotationRangeDegrees = 1.0f;
public FloatParameter random; //(-1, 1)

Просмотреть файл

@ -11,7 +11,7 @@ using UnityEngine.Perception.Randomization.Samplers;
[AddRandomizerMenu("Perception/Y Rotation Randomizer")]
public class YRotationRandomizer : Randomizer
{
public FloatParameter rotationRange = new FloatParameter { value = new UniformSampler(0f, 360f)}; // in range (0, 1)
public FloatParameter rotationRange = new FloatParameter { value = new UniformSampler(0f, 360f) }; // in range (0, 1)
protected override void OnIterationStart()
{

Просмотреть файл

@ -11,7 +11,7 @@ public class PoseEstimationScenarioConstants : ScenarioConstants
public class PoseEstimationScenario : PerceptionScenario<PoseEstimationScenarioConstants>
{
public bool automaticIteration = true;
bool m_ShouldIterate;
public void Move()
@ -26,7 +26,7 @@ public class PoseEstimationScenario : PerceptionScenario<PoseEstimationScenarioC
}
protected override bool isIterationComplete => m_ShouldIterate || automaticIteration && currentIterationFrame >= 1;
protected override bool isScenarioComplete => currentIteration >= constants.totalFrames;
protected override void OnComplete()

Просмотреть файл

@ -26,7 +26,7 @@ public class TrajectoryPlanner : MonoBehaviour
// Multipliers correspond to the URDF mimic tag for each joint
private float[] multipliers = new float[] { -1f, -1f, -1f, 1f, 1f, 1f };
// Orientation is hardcoded for this example so the gripper is always directly above the placement object
private readonly Quaternion pickOrientation = new Quaternion(-0.5f,-0.5f,0.5f,-0.5f);
private readonly Quaternion pickOrientation = new Quaternion(-0.5f, -0.5f, 0.5f, -0.5f);
// Variables required for ROS communication
public string rosServiceName = "ur3_moveit";
@ -84,14 +84,16 @@ public class TrajectoryPlanner : MonoBehaviour
/// <summary>
/// Button callback for setting the robot to default position
/// </summary>
public void Initialize(){
public void Initialize()
{
StartCoroutine(MoveToInitialPosition());
}
/// <summary>
/// Button callback for the Cube Randomization
/// </summary>
public void RandomizeCube(){
public void RandomizeCube()
{
scenario.Move();
ActualPos.text = target.transform.position.ToString();
ActualRot.text = target.transform.eulerAngles.ToString();
@ -100,7 +102,8 @@ public class TrajectoryPlanner : MonoBehaviour
/// <summary>
/// Button callback for the Pose Estimation
/// </summary>
public void PoseEstimation(){
public void PoseEstimation()
{
Debug.Log("Capturing screenshot...");
InitializeButton.interactable = false;
@ -136,16 +139,16 @@ public class TrajectoryPlanner : MonoBehaviour
{
var tempXDrive = jointArticulationBodies[i].xDrive;
float currentRotation = tempXDrive.target;
float rotationChange = rotationSpeed * Time.fixedDeltaTime;
if (currentRotation > 0f) rotationChange *= -1;
if (Mathf.Abs(currentRotation) < rotationChange)
rotationChange = 0;
else
isRotationFinished = false;
// the new xDrive target is the currentRotation summed with the desired change
float rotationGoal = currentRotation + rotationChange;
tempXDrive.target = rotationGoal;
@ -170,10 +173,10 @@ public class TrajectoryPlanner : MonoBehaviour
RosMessageTypes.Sensor.Image rosImage = new RosMessageTypes.Sensor.Image(new RosMessageTypes.Std.Header(), imageWidth, imageHeight, "RGBA", isBigEndian, step, imageData);
PoseEstimationServiceRequest poseServiceRequest = new PoseEstimationServiceRequest(rosImage);
ros.SendServiceMessage<PoseEstimationServiceResponse>("pose_estimation_srv", poseServiceRequest, PoseEstimationCallback);
}
}
void PoseEstimationCallback(PoseEstimationServiceResponse response)
{
{
if (response != null)
{
// The position output by the model is the position of the cube relative to the camera so we need to extract its global position
@ -185,7 +188,8 @@ public class TrajectoryPlanner : MonoBehaviour
EstimatedPos.text = estimatedPosition.ToString();
EstimatedRot.text = estimatedRotation.eulerAngles.ToString();
}
else {
else
{
InitializeButton.interactable = true;
RandomizeButton.interactable = true;
}
@ -218,7 +222,7 @@ public class TrajectoryPlanner : MonoBehaviour
UR3MoveitJoints CurrentJointConfig()
{
UR3MoveitJoints joints = new UR3MoveitJoints();
joints.joint_00 = jointArticulationBodies[0].xDrive.target;
joints.joint_01 = jointArticulationBodies[1].xDrive.target;
joints.joint_02 = jointArticulationBodies[2].xDrive.target;
@ -286,18 +290,18 @@ public class TrajectoryPlanner : MonoBehaviour
if (response.trajectories != null)
{
// For every trajectory plan returned
for (int poseIndex = 0 ; poseIndex < response.trajectories.Length; poseIndex++)
for (int poseIndex = 0; poseIndex < response.trajectories.Length; poseIndex++)
{
// For every robot pose in trajectory plan
for (int jointConfigIndex = 0 ; jointConfigIndex < response.trajectories[poseIndex].joint_trajectory.points.Length; jointConfigIndex++)
for (int jointConfigIndex = 0; jointConfigIndex < response.trajectories[poseIndex].joint_trajectory.points.Length; jointConfigIndex++)
{
var jointPositions = response.trajectories[poseIndex].joint_trajectory.points[jointConfigIndex].positions;
float[] result = jointPositions.Select(r=> (float)r * Mathf.Rad2Deg).ToArray();
float[] result = jointPositions.Select(r => (float)r * Mathf.Rad2Deg).ToArray();
// Set the joint values for every joint
for (int joint = 0; joint < jointArticulationBodies.Length; joint++)
{
var joint1XDrive = jointArticulationBodies[joint].xDrive;
var joint1XDrive = jointArticulationBodies[joint].xDrive;
joint1XDrive.target = result[joint];
jointArticulationBodies[joint].xDrive = joint1XDrive;
}
@ -306,11 +310,13 @@ public class TrajectoryPlanner : MonoBehaviour
}
// Close the gripper if completed executing the trajectory for the Grasp pose
if (poseIndex == (int)Poses.Grasp){
if (poseIndex == (int)Poses.Grasp)
{
StartCoroutine(IterateToGrip(true));
yield return new WaitForSeconds(jointAssignmentWait);
}
else if (poseIndex == (int)Poses.Place){
else if (poseIndex == (int)Poses.Place)
{
yield return new WaitForSeconds(poseAssignmentWait);
// Open the gripper to place the target cube
StartCoroutine(IterateToGrip(false));
@ -338,16 +344,16 @@ public class TrajectoryPlanner : MonoBehaviour
string arm_link = shoulder_link + "/upper_arm_link";
jointArticulationBodies[1] = robot.transform.Find(arm_link).GetComponent<ArticulationBody>();
string elbow_link = arm_link + "/forearm_link";
jointArticulationBodies[2] = robot.transform.Find(elbow_link).GetComponent<ArticulationBody>();
string forearm_link = elbow_link + "/wrist_1_link";
jointArticulationBodies[3] = robot.transform.Find(forearm_link).GetComponent<ArticulationBody>();
string wrist_link = forearm_link + "/wrist_2_link";
jointArticulationBodies[4] = robot.transform.Find(wrist_link).GetComponent<ArticulationBody>();
string hand_link = wrist_link + "/wrist_3_link";
jointArticulationBodies[5] = robot.transform.Find(hand_link).GetComponent<ArticulationBody>();
@ -369,12 +375,12 @@ public class TrajectoryPlanner : MonoBehaviour
{
// Get ROS connection static instance
ros = ROSConnection.instance;
// Assign UI elements
InitializeButton = GameObject.Find("ROSObjects/Canvas/ButtonPanel/DefaultButton").GetComponent<Button>();
RandomizeButton = GameObject.Find("ROSObjects/Canvas/ButtonPanel/RandomButton").GetComponent<Button>();
ServiceButton = GameObject.Find("ROSObjects/Canvas/ButtonPanel/ServiceButton").GetComponent<Button>();
ActualPos = GameObject.Find("ROSObjects/Canvas/PositionPanel/ActualPosField").GetComponent<Text>();
ActualRot = GameObject.Find("ROSObjects/Canvas/PositionPanel/ActualRotField").GetComponent<Text>();
EstimatedPos = GameObject.Find("ROSObjects/Canvas/PositionPanel/EstPosField").GetComponent<Text>();

Просмотреть файл

@ -1,14 +1,14 @@
# Robotiq 140mm 2-Finger-Adaptive-Gripper
# Robotiq 140mm 2-Finger-Adaptive-Gripper
This package contains the URDF files describing the 140mm stroke gripper from robotiq, also known as series **C3**.
To test the gripper URDF description type
To test the gripper URDF description type
```
roslaunch robotiq_2f_140_gripper_visualization test_2f_140_model.launch
roslaunch robotiq_2f_140_gripper_visualization test_2f_140_model.launch
```
## Robot Visual
![140](https://user-images.githubusercontent.com/8356912/49428409-463f8580-f7a6-11e8-8278-5246acdc5c14.png)
## Robot Collision
## Robot Collision
![1402](https://user-images.githubusercontent.com/8356912/49428407-463f8580-f7a6-11e8-9c4e-df69e478f107.png)

Просмотреть файл

@ -1,16 +1,16 @@
<?xml version="1.0"?>
<package format="2">
<name>robotiq_2f_140_gripper_visualization</name>
<version>1.0.0</version>
<description>Robotiq ARG 2-Finger 140mm model C3 description package</description>
<license>BSD</license>
<url type="website">http://ros.org/wiki/robotiq</url>
<author email="ryan@rwsinnet.com">Ryan Sinnet</author>
<maintainer email="ros@robotiq.com">Jean-Philippe Roberge</maintainer>
<maintainer email="daniels.ordoez@gmail.com">Daniel Ordonez</maintainer>
<buildtool_depend>catkin</buildtool_depend>
<exec_depend>urdf</exec_depend>
</package>
<?xml version="1.0"?>
<package format="2">
<name>robotiq_2f_140_gripper_visualization</name>
<version>1.0.0</version>
<description>Robotiq ARG 2-Finger 140mm model C3 description package</description>
<license>BSD</license>
<url type="website">http://ros.org/wiki/robotiq</url>
<author email="ryan@rwsinnet.com">Ryan Sinnet</author>
<maintainer email="ros@robotiq.com">Jean-Philippe Roberge</maintainer>
<maintainer email="daniels.ordoez@gmail.com">Daniel Ordonez</maintainer>
<buildtool_depend>catkin</buildtool_depend>
<exec_depend>urdf</exec_depend>
</package>

Просмотреть файл

@ -1,49 +1,49 @@
<?xml version="1.0"?>
<robot xmlns:xacro="http://ros.org/wiki/xacro">
<xacro:macro name="robotiq_arg2f_base_link" params="prefix connected_to">
<joint name="${prefix}measurement_tool_joint" type="fixed">
<!-- The parent link must be read from the robot model it is attached to. -->
<parent link="${connected_to}"/>
<child link="${prefix}robotiq_arg2f_base_link"/>
<!-- The tool is directly attached to the flange. -->
<origin rpy="0 0 0" xyz="0 0 0"/>
</joint>
<link name="${prefix}robotiq_arg2f_base_link">
<inertial>
<origin xyz="8.625E-08 -4.6583E-06 0.03145" rpy="0 0 0" />
<mass value="0.22652" />
<inertia ixx="0.00020005" ixy="-4.2442E-10" ixz="-2.9069E-10" iyy="0.00017832" iyz="-3.4402E-08" izz="0.00013478" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/visual/robotiq_arg2f_base_link.stl" />
</geometry>
<material name="">
<color rgba="0.1 0.1 0.1 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/collision/robotiq_arg2f_base_link.stl" />
</geometry>
</collision>
</link>
</xacro:macro>
<xacro:macro name="finger_joints" params="prefix fingerprefix reflect">
<xacro:outer_finger_joint prefix="${prefix}" fingerprefix="${fingerprefix}"/>
<xacro:inner_knuckle_joint prefix="${prefix}" fingerprefix="${fingerprefix}" reflect="${reflect}"/>
<xacro:inner_finger_joint prefix="${prefix}" fingerprefix="${fingerprefix}"/>
<xacro:inner_finger_pad_joint prefix="${prefix}" fingerprefix="${fingerprefix}"/>
</xacro:macro>
<xacro:macro name="finger_links" params="prefix fingerprefix stroke">
<xacro:outer_knuckle prefix="${prefix}" fingerprefix="${fingerprefix}" stroke="${stroke}"/>
<xacro:outer_finger prefix="${prefix}" fingerprefix="${fingerprefix}" stroke="${stroke}"/>
<xacro:inner_finger prefix="${prefix}" fingerprefix="${fingerprefix}" stroke="${stroke}"/>
<xacro:inner_finger_pad prefix="${prefix}" fingerprefix="${fingerprefix}"/>
<xacro:inner_knuckle prefix="${prefix}" fingerprefix="${fingerprefix}" stroke="${stroke}"/>
</xacro:macro>
</robot>
<?xml version="1.0"?>
<robot xmlns:xacro="http://ros.org/wiki/xacro">
<xacro:macro name="robotiq_arg2f_base_link" params="prefix connected_to">
<joint name="${prefix}measurement_tool_joint" type="fixed">
<!-- The parent link must be read from the robot model it is attached to. -->
<parent link="${connected_to}"/>
<child link="${prefix}robotiq_arg2f_base_link"/>
<!-- The tool is directly attached to the flange. -->
<origin rpy="0 0 0" xyz="0 0 0"/>
</joint>
<link name="${prefix}robotiq_arg2f_base_link">
<inertial>
<origin xyz="8.625E-08 -4.6583E-06 0.03145" rpy="0 0 0" />
<mass value="0.22652" />
<inertia ixx="0.00020005" ixy="-4.2442E-10" ixz="-2.9069E-10" iyy="0.00017832" iyz="-3.4402E-08" izz="0.00013478" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/visual/robotiq_arg2f_base_link.stl" />
</geometry>
<material name="">
<color rgba="0.1 0.1 0.1 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/collision/robotiq_arg2f_base_link.stl" />
</geometry>
</collision>
</link>
</xacro:macro>
<xacro:macro name="finger_joints" params="prefix fingerprefix reflect">
<xacro:outer_finger_joint prefix="${prefix}" fingerprefix="${fingerprefix}"/>
<xacro:inner_knuckle_joint prefix="${prefix}" fingerprefix="${fingerprefix}" reflect="${reflect}"/>
<xacro:inner_finger_joint prefix="${prefix}" fingerprefix="${fingerprefix}"/>
<xacro:inner_finger_pad_joint prefix="${prefix}" fingerprefix="${fingerprefix}"/>
</xacro:macro>
<xacro:macro name="finger_links" params="prefix fingerprefix stroke">
<xacro:outer_knuckle prefix="${prefix}" fingerprefix="${fingerprefix}" stroke="${stroke}"/>
<xacro:outer_finger prefix="${prefix}" fingerprefix="${fingerprefix}" stroke="${stroke}"/>
<xacro:inner_finger prefix="${prefix}" fingerprefix="${fingerprefix}" stroke="${stroke}"/>
<xacro:inner_finger_pad prefix="${prefix}" fingerprefix="${fingerprefix}"/>
<xacro:inner_knuckle prefix="${prefix}" fingerprefix="${fingerprefix}" stroke="${stroke}"/>
</xacro:macro>
</robot>

Просмотреть файл

@ -1,25 +1,25 @@
<?xml version="1.0"?>
<robot xmlns:xacro="http://ros.org/wiki/xacro" name="ur3_with_gripper">
<!-- ur3 -->
<xacro:include filename="$(find ur_description)/urdf/ur3.urdf.xacro" />
<!-- end-effector -->
<xacro:include filename="$(find robotiq_2f_140_gripper_visualization)/urdf/robotiq_arg2f_140_model_macro.xacro" />
<xacro:robotiq_arg2f_140 prefix=""/>
<!-- ur3 -->
<!-- The ur3 xacro must be included with passing parameters -->
<xacro:ur3_robot prefix="" joint_limited="true"/>
<!-- end-effector -->
<!-- Here we include the end-effector by setting the parameters -->
<!-- TODO: check end-effector link name of robot -->
<xacro:measurement_tool prefix="" connected_to="tool0"/>
<!-- define the ur3's position and orientation in the world coordinate system -->
<link name="world" />
<joint name="world_joint" type="fixed">
<parent link="world" />
<child link="base_link" /> <!-- TODO: check base_link name of robot -->
<origin xyz="0.0 0.0 0.0" rpy="0.0 0.0 0.0" />
</joint>
</robot>
<?xml version="1.0"?>
<robot xmlns:xacro="http://ros.org/wiki/xacro" name="ur3_with_gripper">
<!-- ur3 -->
<xacro:include filename="$(find ur_description)/urdf/ur3.urdf.xacro" />
<!-- end-effector -->
<xacro:include filename="$(find robotiq_2f_140_gripper_visualization)/urdf/robotiq_arg2f_140_model_macro.xacro" />
<xacro:robotiq_arg2f_140 prefix=""/>
<!-- ur3 -->
<!-- The ur3 xacro must be included with passing parameters -->
<xacro:ur3_robot prefix="" joint_limited="true"/>
<!-- end-effector -->
<!-- Here we include the end-effector by setting the parameters -->
<!-- TODO: check end-effector link name of robot -->
<xacro:measurement_tool prefix="" connected_to="tool0"/>
<!-- define the ur3's position and orientation in the world coordinate system -->
<link name="world" />
<joint name="world_joint" type="fixed">
<parent link="world" />
<child link="base_link" /> <!-- TODO: check base_link name of robot -->
<origin xyz="0.0 0.0 0.0" rpy="0.0 0.0 0.0" />
</joint>
</robot>

Просмотреть файл

@ -1,226 +1,226 @@
<?xml version="1.0"?>
<robot xmlns:xacro="http://ros.org/wiki/xacro">
<xacro:include filename="$(find robotiq_2f_140_gripper_visualization)/urdf/robotiq_arg2f_transmission.xacro" />
<xacro:macro name="outer_knuckle" params="prefix fingerprefix stroke">
<link name="${prefix}${fingerprefix}_outer_knuckle">
<inertial>
<origin xyz="-0.000200000000003065 0.0199435877845359 0.0292245259211331" rpy="0 0 0" />
<mass value="0.00853198276973456" />
<inertia
ixx="2.89328108496468E-06"
ixy="-1.57935047237397E-19"
ixz="-1.93980378593255E-19"
iyy="1.86719750325683E-06"
iyz="-1.21858577871576E-06"
izz="1.21905238907251E-06" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/visual/robotiq_arg2f_${stroke}_outer_knuckle.stl" />
</geometry>
<material name="">
<color rgba="0.792156862745098 0.819607843137255 0.933333333333333 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/collision/robotiq_arg2f_${stroke}_outer_knuckle.stl" />
</geometry>
</collision>
</link>
</xacro:macro>
<xacro:macro name="outer_finger" params="prefix fingerprefix stroke">
<link name="${prefix}${fingerprefix}_outer_finger">
<inertial>
<origin xyz="0.00030115855001899 0.0373907951953854 -0.0208027427000385" rpy="0 0 0" />
<mass value="0.022614240507152" />
<inertia
ixx="1.52518312458174E-05"
ixy="9.76583423954399E-10"
ixz="-5.43838577022588E-10"
iyy="6.17694243867776E-06"
iyz="6.78636130740228E-06"
izz="1.16494917907219E-05" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/visual/robotiq_arg2f_${stroke}_outer_finger.stl" />
</geometry>
<material name="">
<color rgba="0.1 0.1 0.1 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/collision/robotiq_arg2f_${stroke}_outer_finger.stl" />
</geometry>
</collision>
</link>
</xacro:macro>
<xacro:macro name="inner_knuckle" params="prefix fingerprefix stroke">
<link name="${prefix}${fingerprefix}_inner_knuckle">
<inertial>
<origin xyz="0.000123011831763771 0.0507850843201817 0.00103968640075166" rpy="0 0 0" />
<mass value="0.0271177346495152" />
<inertia
ixx="2.61910379223783E-05"
ixy="-2.43616858946494E-07"
ixz="-6.37789906117123E-09"
iyy="2.8270243746167E-06"
iyz="-5.37200748039765E-07"
izz="2.83695868220296E-05" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/visual/robotiq_arg2f_${stroke}_inner_knuckle.stl" />
</geometry>
<material name="">
<color rgba="0.1 0.1 0.1 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/collision/robotiq_arg2f_${stroke}_inner_knuckle.stl" />
</geometry>
</collision>
</link>
</xacro:macro>
<xacro:macro name="inner_finger" params="prefix fingerprefix stroke">
<link name="${prefix}${fingerprefix}_inner_finger">
<inertial>
<origin xyz="0.000299999999999317 0.0160078233491243 -0.0136945669206257" rpy="0 0 0" />
<mass value="0.0104003125914103" />
<inertia
ixx="2.71909453810972E-06"
ixy="1.35402465472579E-21"
ixz="-7.1817349065269E-22"
iyy="7.69100314106116E-07"
iyz="6.74715432769696E-07"
izz="2.30315190420171E-06" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/visual/robotiq_arg2f_${stroke}_inner_finger.stl" />
</geometry>
<material name="">
<color rgba="0.1 0.1 0.1 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/collision/robotiq_arg2f_${stroke}_inner_finger.stl" />
</geometry>
</collision>
</link>
</xacro:macro>
<!-- Finger pad link, the default are the "big pads" with rubber-->
<xacro:macro name="inner_finger_pad" params="prefix fingerprefix">
<link name="${prefix}${fingerprefix}_inner_finger_pad">
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<box size="0.027 0.065 0.0075"/>
</geometry>
<material name="">
<color rgba="0.9 0.9 0.9 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<box size="0.03 0.07 0.0075"/>
</geometry>
<material name="">
<color rgba="0.9 0.0 0.0 1" />
</material>
</collision>
</link>
</xacro:macro>
<xacro:macro name="outer_finger_joint" params="prefix fingerprefix">
<joint name="${prefix}${fingerprefix}_outer_finger_joint" type="fixed">
<origin xyz="0 0.01821998610742 0.0260018192872234" rpy="0 0 0" />
<parent link="${prefix}${fingerprefix}_outer_knuckle" />
<child link="${prefix}${fingerprefix}_outer_finger" />
<axis xyz="1 0 0" />
</joint>
</xacro:macro>
<xacro:macro name="inner_knuckle_joint" params="prefix fingerprefix reflect">
<joint name="${prefix}${fingerprefix}_inner_knuckle_joint" type="revolute">
<origin xyz="0 ${reflect * -0.0127} 0.06142" rpy="${pi / 2 + .725} 0 ${(reflect - 1) * pi / 2}" />
<parent link="${prefix}robotiq_arg2f_base_link" />
<child link="${prefix}${fingerprefix}_inner_knuckle" />
<axis xyz="1 0 0" />
<limit lower="-0.8757" upper="0.8757" velocity="2.0" effort="1000" />
<mimic joint="${prefix}finger_joint" multiplier="-1" offset="0" />
</joint>
</xacro:macro>
<xacro:macro name="inner_finger_joint" params="prefix fingerprefix">
<joint name="${prefix}${fingerprefix}_inner_finger_joint" type="revolute">
<origin xyz="0 0.0817554015893473 -0.0282203446692936" rpy="-0.725 0 0" />
<parent link="${prefix}${fingerprefix}_outer_finger" />
<child link="${prefix}${fingerprefix}_inner_finger" />
<axis xyz="1 0 0" />
<limit lower="-0.8757" upper="0.8757" velocity="2.0" effort="1000" />
<mimic joint="${prefix}finger_joint" multiplier="1" offset="0" />
</joint>
</xacro:macro>
<xacro:macro name="inner_finger_pad_joint" params="prefix fingerprefix">
<joint name="${prefix}${fingerprefix}_inner_finger_pad_joint" type="fixed">
<origin xyz="0 0.0457554015893473 -0.0272203446692936" rpy="0 0 0" />
<parent link="${prefix}${fingerprefix}_inner_finger" />
<child link="${prefix}${fingerprefix}_inner_finger_pad" />
<axis xyz="0 0 1" />
</joint>
</xacro:macro>
<xacro:include filename="$(find robotiq_2f_140_gripper_visualization)/urdf/robotiq_arg2f.xacro" />
<xacro:macro name="finger_joint" params="prefix">
<joint name="${prefix}finger_joint" type="revolute">
<origin xyz="0 -0.030601 0.054905" rpy="${pi / 2 + .725} 0 0" />
<parent link="${prefix}robotiq_arg2f_base_link" />
<child link="${prefix}left_outer_knuckle" />
<axis xyz="-1 0 0" />
<limit lower="0" upper="0.7" velocity="2.0" effort="1000" />
</joint>
<xacro:finger_joints prefix="${prefix}" fingerprefix="left" reflect="1.0"/>
</xacro:macro>
<xacro:macro name="right_outer_knuckle_joint" params="prefix">
<joint name="${prefix}right_outer_knuckle_joint" type="revolute">
<origin xyz="0 0.030601 0.054905" rpy="${pi / 2 + .725} 0 ${pi}" />
<parent link="${prefix}robotiq_arg2f_base_link" />
<child link="${prefix}right_outer_knuckle" />
<axis xyz="1 0 0" />
<limit lower="-0.725" upper="0.725" velocity="2.0" effort="1000" />
<mimic joint="${prefix}finger_joint" multiplier="-1" offset="0" />
</joint>
<xacro:finger_joints prefix="${prefix}" fingerprefix="right" reflect="-1.0"/>
</xacro:macro>
<xacro:macro name="robotiq_arg2f_140" params="prefix connected_to">
<xacro:robotiq_arg2f_base_link connected_to = "${connected_to}" prefix="${prefix}"/>
<xacro:finger_links prefix="${prefix}" fingerprefix="left" stroke="140"/>
<xacro:finger_links prefix="${prefix}" fingerprefix="right" stroke="140"/>
<xacro:finger_joint prefix="${prefix}"/>
<xacro:right_outer_knuckle_joint prefix="${prefix}"/>
<xacro:robotiq_arg2f_transmission prefix="${prefix}"/>
</xacro:macro>
</robot>
<?xml version="1.0"?>
<robot xmlns:xacro="http://ros.org/wiki/xacro">
<xacro:include filename="$(find robotiq_2f_140_gripper_visualization)/urdf/robotiq_arg2f_transmission.xacro" />
<xacro:macro name="outer_knuckle" params="prefix fingerprefix stroke">
<link name="${prefix}${fingerprefix}_outer_knuckle">
<inertial>
<origin xyz="-0.000200000000003065 0.0199435877845359 0.0292245259211331" rpy="0 0 0" />
<mass value="0.00853198276973456" />
<inertia
ixx="2.89328108496468E-06"
ixy="-1.57935047237397E-19"
ixz="-1.93980378593255E-19"
iyy="1.86719750325683E-06"
iyz="-1.21858577871576E-06"
izz="1.21905238907251E-06" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/visual/robotiq_arg2f_${stroke}_outer_knuckle.stl" />
</geometry>
<material name="">
<color rgba="0.792156862745098 0.819607843137255 0.933333333333333 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/collision/robotiq_arg2f_${stroke}_outer_knuckle.stl" />
</geometry>
</collision>
</link>
</xacro:macro>
<xacro:macro name="outer_finger" params="prefix fingerprefix stroke">
<link name="${prefix}${fingerprefix}_outer_finger">
<inertial>
<origin xyz="0.00030115855001899 0.0373907951953854 -0.0208027427000385" rpy="0 0 0" />
<mass value="0.022614240507152" />
<inertia
ixx="1.52518312458174E-05"
ixy="9.76583423954399E-10"
ixz="-5.43838577022588E-10"
iyy="6.17694243867776E-06"
iyz="6.78636130740228E-06"
izz="1.16494917907219E-05" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/visual/robotiq_arg2f_${stroke}_outer_finger.stl" />
</geometry>
<material name="">
<color rgba="0.1 0.1 0.1 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/collision/robotiq_arg2f_${stroke}_outer_finger.stl" />
</geometry>
</collision>
</link>
</xacro:macro>
<xacro:macro name="inner_knuckle" params="prefix fingerprefix stroke">
<link name="${prefix}${fingerprefix}_inner_knuckle">
<inertial>
<origin xyz="0.000123011831763771 0.0507850843201817 0.00103968640075166" rpy="0 0 0" />
<mass value="0.0271177346495152" />
<inertia
ixx="2.61910379223783E-05"
ixy="-2.43616858946494E-07"
ixz="-6.37789906117123E-09"
iyy="2.8270243746167E-06"
iyz="-5.37200748039765E-07"
izz="2.83695868220296E-05" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/visual/robotiq_arg2f_${stroke}_inner_knuckle.stl" />
</geometry>
<material name="">
<color rgba="0.1 0.1 0.1 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/collision/robotiq_arg2f_${stroke}_inner_knuckle.stl" />
</geometry>
</collision>
</link>
</xacro:macro>
<xacro:macro name="inner_finger" params="prefix fingerprefix stroke">
<link name="${prefix}${fingerprefix}_inner_finger">
<inertial>
<origin xyz="0.000299999999999317 0.0160078233491243 -0.0136945669206257" rpy="0 0 0" />
<mass value="0.0104003125914103" />
<inertia
ixx="2.71909453810972E-06"
ixy="1.35402465472579E-21"
ixz="-7.1817349065269E-22"
iyy="7.69100314106116E-07"
iyz="6.74715432769696E-07"
izz="2.30315190420171E-06" />
</inertial>
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/visual/robotiq_arg2f_${stroke}_inner_finger.stl" />
</geometry>
<material name="">
<color rgba="0.1 0.1 0.1 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<mesh filename="package://robotiq_2f_140_gripper_visualization/meshes/collision/robotiq_arg2f_${stroke}_inner_finger.stl" />
</geometry>
</collision>
</link>
</xacro:macro>
<!-- Finger pad link, the default are the "big pads" with rubber-->
<xacro:macro name="inner_finger_pad" params="prefix fingerprefix">
<link name="${prefix}${fingerprefix}_inner_finger_pad">
<visual>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<box size="0.027 0.065 0.0075"/>
</geometry>
<material name="">
<color rgba="0.9 0.9 0.9 1" />
</material>
</visual>
<collision>
<origin xyz="0 0 0" rpy="0 0 0" />
<geometry>
<box size="0.03 0.07 0.0075"/>
</geometry>
<material name="">
<color rgba="0.9 0.0 0.0 1" />
</material>
</collision>
</link>
</xacro:macro>
<xacro:macro name="outer_finger_joint" params="prefix fingerprefix">
<joint name="${prefix}${fingerprefix}_outer_finger_joint" type="fixed">
<origin xyz="0 0.01821998610742 0.0260018192872234" rpy="0 0 0" />
<parent link="${prefix}${fingerprefix}_outer_knuckle" />
<child link="${prefix}${fingerprefix}_outer_finger" />
<axis xyz="1 0 0" />
</joint>
</xacro:macro>
<xacro:macro name="inner_knuckle_joint" params="prefix fingerprefix reflect">
<joint name="${prefix}${fingerprefix}_inner_knuckle_joint" type="revolute">
<origin xyz="0 ${reflect * -0.0127} 0.06142" rpy="${pi / 2 + .725} 0 ${(reflect - 1) * pi / 2}" />
<parent link="${prefix}robotiq_arg2f_base_link" />
<child link="${prefix}${fingerprefix}_inner_knuckle" />
<axis xyz="1 0 0" />
<limit lower="-0.8757" upper="0.8757" velocity="2.0" effort="1000" />
<mimic joint="${prefix}finger_joint" multiplier="-1" offset="0" />
</joint>
</xacro:macro>
<xacro:macro name="inner_finger_joint" params="prefix fingerprefix">
<joint name="${prefix}${fingerprefix}_inner_finger_joint" type="revolute">
<origin xyz="0 0.0817554015893473 -0.0282203446692936" rpy="-0.725 0 0" />
<parent link="${prefix}${fingerprefix}_outer_finger" />
<child link="${prefix}${fingerprefix}_inner_finger" />
<axis xyz="1 0 0" />
<limit lower="-0.8757" upper="0.8757" velocity="2.0" effort="1000" />
<mimic joint="${prefix}finger_joint" multiplier="1" offset="0" />
</joint>
</xacro:macro>
<xacro:macro name="inner_finger_pad_joint" params="prefix fingerprefix">
<joint name="${prefix}${fingerprefix}_inner_finger_pad_joint" type="fixed">
<origin xyz="0 0.0457554015893473 -0.0272203446692936" rpy="0 0 0" />
<parent link="${prefix}${fingerprefix}_inner_finger" />
<child link="${prefix}${fingerprefix}_inner_finger_pad" />
<axis xyz="0 0 1" />
</joint>
</xacro:macro>
<xacro:include filename="$(find robotiq_2f_140_gripper_visualization)/urdf/robotiq_arg2f.xacro" />
<xacro:macro name="finger_joint" params="prefix">
<joint name="${prefix}finger_joint" type="revolute">
<origin xyz="0 -0.030601 0.054905" rpy="${pi / 2 + .725} 0 0" />
<parent link="${prefix}robotiq_arg2f_base_link" />
<child link="${prefix}left_outer_knuckle" />
<axis xyz="-1 0 0" />
<limit lower="0" upper="0.7" velocity="2.0" effort="1000" />
</joint>
<xacro:finger_joints prefix="${prefix}" fingerprefix="left" reflect="1.0"/>
</xacro:macro>
<xacro:macro name="right_outer_knuckle_joint" params="prefix">
<joint name="${prefix}right_outer_knuckle_joint" type="revolute">
<origin xyz="0 0.030601 0.054905" rpy="${pi / 2 + .725} 0 ${pi}" />
<parent link="${prefix}robotiq_arg2f_base_link" />
<child link="${prefix}right_outer_knuckle" />
<axis xyz="1 0 0" />
<limit lower="-0.725" upper="0.725" velocity="2.0" effort="1000" />
<mimic joint="${prefix}finger_joint" multiplier="-1" offset="0" />
</joint>
<xacro:finger_joints prefix="${prefix}" fingerprefix="right" reflect="-1.0"/>
</xacro:macro>
<xacro:macro name="robotiq_arg2f_140" params="prefix connected_to">
<xacro:robotiq_arg2f_base_link connected_to = "${connected_to}" prefix="${prefix}"/>
<xacro:finger_links prefix="${prefix}" fingerprefix="left" stroke="140"/>
<xacro:finger_links prefix="${prefix}" fingerprefix="right" stroke="140"/>
<xacro:finger_joint prefix="${prefix}"/>
<xacro:right_outer_knuckle_joint prefix="${prefix}"/>
<xacro:robotiq_arg2f_transmission prefix="${prefix}"/>
</xacro:macro>
</robot>

Просмотреть файл

@ -1,19 +1,19 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!11 &1
AudioManager:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Volume: 1
Rolloff Scale: 1
Doppler Factor: 1
Default Speaker Mode: 2
m_SampleRate: 0
m_DSPBufferSize: 1024
m_VirtualVoiceCount: 512
m_RealVoiceCount: 32
m_SpatializerPlugin:
m_AmbisonicDecoderPlugin:
m_DisableAudio: 0
m_VirtualizeEffects: 1
m_RequestedDSPBufferSize: 0
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!11 &1
AudioManager:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Volume: 1
Rolloff Scale: 1
Doppler Factor: 1
Default Speaker Mode: 2
m_SampleRate: 0
m_DSPBufferSize: 1024
m_VirtualVoiceCount: 512
m_RealVoiceCount: 32
m_SpatializerPlugin:
m_AmbisonicDecoderPlugin:
m_DisableAudio: 0
m_VirtualizeEffects: 1
m_RequestedDSPBufferSize: 0

Просмотреть файл

@ -1,6 +1,6 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!236 &1
ClusterInputManager:
m_ObjectHideFlags: 0
m_Inputs: []
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!236 &1
ClusterInputManager:
m_ObjectHideFlags: 0
m_Inputs: []

Просмотреть файл

@ -1,11 +1,11 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!1045 &1
EditorBuildSettings:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Scenes:
- enabled: 1
path: Assets/Scenes/SampleScene.unity
guid: d1c3109bdb54ad54c8a2b2838528e640
m_configObjects: {}
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!1045 &1
EditorBuildSettings:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Scenes:
- enabled: 1
path: Assets/Scenes/SampleScene.unity
guid: d1c3109bdb54ad54c8a2b2838528e640
m_configObjects: {}

Просмотреть файл

@ -1,68 +1,68 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!30 &1
GraphicsSettings:
m_ObjectHideFlags: 0
serializedVersion: 13
m_Deferred:
m_Mode: 1
m_Shader: {fileID: 69, guid: 0000000000000000f000000000000000, type: 0}
m_DeferredReflections:
m_Mode: 1
m_Shader: {fileID: 74, guid: 0000000000000000f000000000000000, type: 0}
m_ScreenSpaceShadows:
m_Mode: 1
m_Shader: {fileID: 64, guid: 0000000000000000f000000000000000, type: 0}
m_LegacyDeferred:
m_Mode: 1
m_Shader: {fileID: 63, guid: 0000000000000000f000000000000000, type: 0}
m_DepthNormals:
m_Mode: 1
m_Shader: {fileID: 62, guid: 0000000000000000f000000000000000, type: 0}
m_MotionVectors:
m_Mode: 1
m_Shader: {fileID: 75, guid: 0000000000000000f000000000000000, type: 0}
m_LightHalo:
m_Mode: 1
m_Shader: {fileID: 105, guid: 0000000000000000f000000000000000, type: 0}
m_LensFlare:
m_Mode: 1
m_Shader: {fileID: 102, guid: 0000000000000000f000000000000000, type: 0}
m_AlwaysIncludedShaders:
- {fileID: 7, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 15104, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 15105, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 15106, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 10753, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 10770, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 10783, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 16000, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 16001, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 17000, guid: 0000000000000000f000000000000000, type: 0}
m_PreloadedShaders: []
m_SpritesDefaultMaterial: {fileID: 10754, guid: 0000000000000000f000000000000000,
type: 0}
m_CustomRenderPipeline: {fileID: 11400000, guid: 19ba41d7c0026c3459d37c2fe90c55a0,
type: 2}
m_TransparencySortMode: 0
m_TransparencySortAxis: {x: 0, y: 0, z: 1}
m_DefaultRenderingPath: 1
m_DefaultMobileRenderingPath: 1
m_TierSettings: []
m_LightmapStripping: 0
m_FogStripping: 0
m_InstancingStripping: 0
m_LightmapKeepPlain: 1
m_LightmapKeepDirCombined: 1
m_LightmapKeepDynamicPlain: 1
m_LightmapKeepDynamicDirCombined: 1
m_LightmapKeepShadowMask: 1
m_LightmapKeepSubtractive: 1
m_FogKeepLinear: 1
m_FogKeepExp: 1
m_FogKeepExp2: 1
m_AlbedoSwatchInfos: []
m_LightsUseLinearIntensity: 1
m_LightsUseColorTemperature: 0
m_LogWhenShaderIsCompiled: 0
m_AllowEnlightenSupportForUpgradedProject: 1
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!30 &1
GraphicsSettings:
m_ObjectHideFlags: 0
serializedVersion: 13
m_Deferred:
m_Mode: 1
m_Shader: {fileID: 69, guid: 0000000000000000f000000000000000, type: 0}
m_DeferredReflections:
m_Mode: 1
m_Shader: {fileID: 74, guid: 0000000000000000f000000000000000, type: 0}
m_ScreenSpaceShadows:
m_Mode: 1
m_Shader: {fileID: 64, guid: 0000000000000000f000000000000000, type: 0}
m_LegacyDeferred:
m_Mode: 1
m_Shader: {fileID: 63, guid: 0000000000000000f000000000000000, type: 0}
m_DepthNormals:
m_Mode: 1
m_Shader: {fileID: 62, guid: 0000000000000000f000000000000000, type: 0}
m_MotionVectors:
m_Mode: 1
m_Shader: {fileID: 75, guid: 0000000000000000f000000000000000, type: 0}
m_LightHalo:
m_Mode: 1
m_Shader: {fileID: 105, guid: 0000000000000000f000000000000000, type: 0}
m_LensFlare:
m_Mode: 1
m_Shader: {fileID: 102, guid: 0000000000000000f000000000000000, type: 0}
m_AlwaysIncludedShaders:
- {fileID: 7, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 15104, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 15105, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 15106, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 10753, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 10770, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 10783, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 16000, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 16001, guid: 0000000000000000f000000000000000, type: 0}
- {fileID: 17000, guid: 0000000000000000f000000000000000, type: 0}
m_PreloadedShaders: []
m_SpritesDefaultMaterial: {fileID: 10754, guid: 0000000000000000f000000000000000,
type: 0}
m_CustomRenderPipeline: {fileID: 11400000, guid: 19ba41d7c0026c3459d37c2fe90c55a0,
type: 2}
m_TransparencySortMode: 0
m_TransparencySortAxis: {x: 0, y: 0, z: 1}
m_DefaultRenderingPath: 1
m_DefaultMobileRenderingPath: 1
m_TierSettings: []
m_LightmapStripping: 0
m_FogStripping: 0
m_InstancingStripping: 0
m_LightmapKeepPlain: 1
m_LightmapKeepDirCombined: 1
m_LightmapKeepDynamicPlain: 1
m_LightmapKeepDynamicDirCombined: 1
m_LightmapKeepShadowMask: 1
m_LightmapKeepSubtractive: 1
m_FogKeepLinear: 1
m_FogKeepExp: 1
m_FogKeepExp2: 1
m_AlbedoSwatchInfos: []
m_LightsUseLinearIntensity: 1
m_LightsUseColorTemperature: 0
m_LogWhenShaderIsCompiled: 0
m_AllowEnlightenSupportForUpgradedProject: 1

Просмотреть файл

@ -1,487 +1,487 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!13 &1
InputManager:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Axes:
- serializedVersion: 3
m_Name: Horizontal
descriptiveName:
descriptiveNegativeName:
negativeButton: left
positiveButton: right
altNegativeButton: a
altPositiveButton: d
gravity: 3
dead: 0.001
sensitivity: 3
snap: 1
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Vertical
descriptiveName:
descriptiveNegativeName:
negativeButton: down
positiveButton: up
altNegativeButton: s
altPositiveButton: w
gravity: 3
dead: 0.001
sensitivity: 3
snap: 1
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Fire1
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left ctrl
altNegativeButton:
altPositiveButton: mouse 0
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Fire2
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left alt
altNegativeButton:
altPositiveButton: mouse 1
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Fire3
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left shift
altNegativeButton:
altPositiveButton: mouse 2
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Jump
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: space
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Mouse X
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton:
altNegativeButton:
altPositiveButton:
gravity: 0
dead: 0
sensitivity: 0.1
snap: 0
invert: 0
type: 1
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Mouse Y
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton:
altNegativeButton:
altPositiveButton:
gravity: 0
dead: 0
sensitivity: 0.1
snap: 0
invert: 0
type: 1
axis: 1
joyNum: 0
- serializedVersion: 3
m_Name: Mouse ScrollWheel
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton:
altNegativeButton:
altPositiveButton:
gravity: 0
dead: 0
sensitivity: 0.1
snap: 0
invert: 0
type: 1
axis: 2
joyNum: 0
- serializedVersion: 3
m_Name: Horizontal
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton:
altNegativeButton:
altPositiveButton:
gravity: 0
dead: 0.19
sensitivity: 1
snap: 0
invert: 0
type: 2
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Vertical
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton:
altNegativeButton:
altPositiveButton:
gravity: 0
dead: 0.19
sensitivity: 1
snap: 0
invert: 1
type: 2
axis: 1
joyNum: 0
- serializedVersion: 3
m_Name: Fire1
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: joystick button 0
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Fire2
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: joystick button 1
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Fire3
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: joystick button 2
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Jump
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: joystick button 3
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Submit
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: return
altNegativeButton:
altPositiveButton: joystick button 0
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Submit
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: enter
altNegativeButton:
altPositiveButton: space
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Cancel
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: escape
altNegativeButton:
altPositiveButton: joystick button 1
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Enable Debug Button 1
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left ctrl
altNegativeButton:
altPositiveButton: joystick button 8
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Enable Debug Button 2
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: backspace
altNegativeButton:
altPositiveButton: joystick button 9
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Reset
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left alt
altNegativeButton:
altPositiveButton: joystick button 1
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Next
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: page down
altNegativeButton:
altPositiveButton: joystick button 5
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Previous
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: page up
altNegativeButton:
altPositiveButton: joystick button 4
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Validate
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: return
altNegativeButton:
altPositiveButton: joystick button 0
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Persistent
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: right shift
altNegativeButton:
altPositiveButton: joystick button 2
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Multiplier
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left shift
altNegativeButton:
altPositiveButton: joystick button 3
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Horizontal
descriptiveName:
descriptiveNegativeName:
negativeButton: left
positiveButton: right
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Vertical
descriptiveName:
descriptiveNegativeName:
negativeButton: down
positiveButton: up
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Vertical
descriptiveName:
descriptiveNegativeName:
negativeButton: down
positiveButton: up
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 2
axis: 6
joyNum: 0
- serializedVersion: 3
m_Name: Debug Horizontal
descriptiveName:
descriptiveNegativeName:
negativeButton: left
positiveButton: right
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 2
axis: 5
joyNum: 0
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!13 &1
InputManager:
m_ObjectHideFlags: 0
serializedVersion: 2
m_Axes:
- serializedVersion: 3
m_Name: Horizontal
descriptiveName:
descriptiveNegativeName:
negativeButton: left
positiveButton: right
altNegativeButton: a
altPositiveButton: d
gravity: 3
dead: 0.001
sensitivity: 3
snap: 1
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Vertical
descriptiveName:
descriptiveNegativeName:
negativeButton: down
positiveButton: up
altNegativeButton: s
altPositiveButton: w
gravity: 3
dead: 0.001
sensitivity: 3
snap: 1
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Fire1
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left ctrl
altNegativeButton:
altPositiveButton: mouse 0
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Fire2
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left alt
altNegativeButton:
altPositiveButton: mouse 1
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Fire3
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left shift
altNegativeButton:
altPositiveButton: mouse 2
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Jump
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: space
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Mouse X
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton:
altNegativeButton:
altPositiveButton:
gravity: 0
dead: 0
sensitivity: 0.1
snap: 0
invert: 0
type: 1
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Mouse Y
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton:
altNegativeButton:
altPositiveButton:
gravity: 0
dead: 0
sensitivity: 0.1
snap: 0
invert: 0
type: 1
axis: 1
joyNum: 0
- serializedVersion: 3
m_Name: Mouse ScrollWheel
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton:
altNegativeButton:
altPositiveButton:
gravity: 0
dead: 0
sensitivity: 0.1
snap: 0
invert: 0
type: 1
axis: 2
joyNum: 0
- serializedVersion: 3
m_Name: Horizontal
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton:
altNegativeButton:
altPositiveButton:
gravity: 0
dead: 0.19
sensitivity: 1
snap: 0
invert: 0
type: 2
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Vertical
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton:
altNegativeButton:
altPositiveButton:
gravity: 0
dead: 0.19
sensitivity: 1
snap: 0
invert: 1
type: 2
axis: 1
joyNum: 0
- serializedVersion: 3
m_Name: Fire1
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: joystick button 0
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Fire2
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: joystick button 1
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Fire3
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: joystick button 2
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Jump
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: joystick button 3
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Submit
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: return
altNegativeButton:
altPositiveButton: joystick button 0
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Submit
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: enter
altNegativeButton:
altPositiveButton: space
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Cancel
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: escape
altNegativeButton:
altPositiveButton: joystick button 1
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Enable Debug Button 1
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left ctrl
altNegativeButton:
altPositiveButton: joystick button 8
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Enable Debug Button 2
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: backspace
altNegativeButton:
altPositiveButton: joystick button 9
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Reset
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left alt
altNegativeButton:
altPositiveButton: joystick button 1
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Next
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: page down
altNegativeButton:
altPositiveButton: joystick button 5
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Previous
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: page up
altNegativeButton:
altPositiveButton: joystick button 4
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Validate
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: return
altNegativeButton:
altPositiveButton: joystick button 0
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Persistent
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: right shift
altNegativeButton:
altPositiveButton: joystick button 2
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Multiplier
descriptiveName:
descriptiveNegativeName:
negativeButton:
positiveButton: left shift
altNegativeButton:
altPositiveButton: joystick button 3
gravity: 0
dead: 0
sensitivity: 0
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Horizontal
descriptiveName:
descriptiveNegativeName:
negativeButton: left
positiveButton: right
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Vertical
descriptiveName:
descriptiveNegativeName:
negativeButton: down
positiveButton: up
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 0
axis: 0
joyNum: 0
- serializedVersion: 3
m_Name: Debug Vertical
descriptiveName:
descriptiveNegativeName:
negativeButton: down
positiveButton: up
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 2
axis: 6
joyNum: 0
- serializedVersion: 3
m_Name: Debug Horizontal
descriptiveName:
descriptiveNegativeName:
negativeButton: left
positiveButton: right
altNegativeButton:
altPositiveButton:
gravity: 1000
dead: 0.001
sensitivity: 1000
snap: 0
invert: 0
type: 2
axis: 5
joyNum: 0

Просмотреть файл

@ -1,91 +1,91 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!126 &1
NavMeshProjectSettings:
m_ObjectHideFlags: 0
serializedVersion: 2
areas:
- name: Walkable
cost: 1
- name: Not Walkable
cost: 1
- name: Jump
cost: 2
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
m_LastAgentTypeID: -887442657
m_Settings:
- serializedVersion: 2
agentTypeID: 0
agentRadius: 0.5
agentHeight: 2
agentSlope: 45
agentClimb: 0.75
ledgeDropHeight: 0
maxJumpAcrossDistance: 0
minRegionArea: 2
manualCellSize: 0
cellSize: 0.16666667
manualTileSize: 0
tileSize: 256
accuratePlacement: 0
debug:
m_Flags: 0
m_SettingNames:
- Humanoid
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!126 &1
NavMeshProjectSettings:
m_ObjectHideFlags: 0
serializedVersion: 2
areas:
- name: Walkable
cost: 1
- name: Not Walkable
cost: 1
- name: Jump
cost: 2
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
- name:
cost: 1
m_LastAgentTypeID: -887442657
m_Settings:
- serializedVersion: 2
agentTypeID: 0
agentRadius: 0.5
agentHeight: 2
agentSlope: 45
agentClimb: 0.75
ledgeDropHeight: 0
maxJumpAcrossDistance: 0
minRegionArea: 2
manualCellSize: 0
cellSize: 0.16666667
manualTileSize: 0
tileSize: 256
accuratePlacement: 0
debug:
m_Flags: 0
m_SettingNames:
- Humanoid

Просмотреть файл

@ -1,56 +1,56 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!19 &1
Physics2DSettings:
m_ObjectHideFlags: 0
serializedVersion: 4
m_Gravity: {x: 0, y: -9.81}
m_DefaultMaterial: {fileID: 0}
m_VelocityIterations: 8
m_PositionIterations: 3
m_VelocityThreshold: 1
m_MaxLinearCorrection: 0.2
m_MaxAngularCorrection: 8
m_MaxTranslationSpeed: 100
m_MaxRotationSpeed: 360
m_BaumgarteScale: 0.2
m_BaumgarteTimeOfImpactScale: 0.75
m_TimeToSleep: 0.5
m_LinearSleepTolerance: 0.01
m_AngularSleepTolerance: 2
m_DefaultContactOffset: 0.01
m_JobOptions:
serializedVersion: 2
useMultithreading: 0
useConsistencySorting: 0
m_InterpolationPosesPerJob: 100
m_NewContactsPerJob: 30
m_CollideContactsPerJob: 100
m_ClearFlagsPerJob: 200
m_ClearBodyForcesPerJob: 200
m_SyncDiscreteFixturesPerJob: 50
m_SyncContinuousFixturesPerJob: 50
m_FindNearestContactsPerJob: 100
m_UpdateTriggerContactsPerJob: 100
m_IslandSolverCostThreshold: 100
m_IslandSolverBodyCostScale: 1
m_IslandSolverContactCostScale: 10
m_IslandSolverJointCostScale: 10
m_IslandSolverBodiesPerJob: 50
m_IslandSolverContactsPerJob: 50
m_AutoSimulation: 1
m_QueriesHitTriggers: 1
m_QueriesStartInColliders: 1
m_CallbacksOnDisable: 1
m_ReuseCollisionCallbacks: 1
m_AutoSyncTransforms: 0
m_AlwaysShowColliders: 0
m_ShowColliderSleep: 1
m_ShowColliderContacts: 0
m_ShowColliderAABB: 0
m_ContactArrowScale: 0.2
m_ColliderAwakeColor: {r: 0.5686275, g: 0.95686275, b: 0.54509807, a: 0.7529412}
m_ColliderAsleepColor: {r: 0.5686275, g: 0.95686275, b: 0.54509807, a: 0.36078432}
m_ColliderContactColor: {r: 1, g: 0, b: 1, a: 0.6862745}
m_ColliderAABBColor: {r: 1, g: 1, b: 0, a: 0.2509804}
m_LayerCollisionMatrix: ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!19 &1
Physics2DSettings:
m_ObjectHideFlags: 0
serializedVersion: 4
m_Gravity: {x: 0, y: -9.81}
m_DefaultMaterial: {fileID: 0}
m_VelocityIterations: 8
m_PositionIterations: 3
m_VelocityThreshold: 1
m_MaxLinearCorrection: 0.2
m_MaxAngularCorrection: 8
m_MaxTranslationSpeed: 100
m_MaxRotationSpeed: 360
m_BaumgarteScale: 0.2
m_BaumgarteTimeOfImpactScale: 0.75
m_TimeToSleep: 0.5
m_LinearSleepTolerance: 0.01
m_AngularSleepTolerance: 2
m_DefaultContactOffset: 0.01
m_JobOptions:
serializedVersion: 2
useMultithreading: 0
useConsistencySorting: 0
m_InterpolationPosesPerJob: 100
m_NewContactsPerJob: 30
m_CollideContactsPerJob: 100
m_ClearFlagsPerJob: 200
m_ClearBodyForcesPerJob: 200
m_SyncDiscreteFixturesPerJob: 50
m_SyncContinuousFixturesPerJob: 50
m_FindNearestContactsPerJob: 100
m_UpdateTriggerContactsPerJob: 100
m_IslandSolverCostThreshold: 100
m_IslandSolverBodyCostScale: 1
m_IslandSolverContactCostScale: 10
m_IslandSolverJointCostScale: 10
m_IslandSolverBodiesPerJob: 50
m_IslandSolverContactsPerJob: 50
m_AutoSimulation: 1
m_QueriesHitTriggers: 1
m_QueriesStartInColliders: 1
m_CallbacksOnDisable: 1
m_ReuseCollisionCallbacks: 1
m_AutoSyncTransforms: 0
m_AlwaysShowColliders: 0
m_ShowColliderSleep: 1
m_ShowColliderContacts: 0
m_ShowColliderAABB: 0
m_ContactArrowScale: 0.2
m_ColliderAwakeColor: {r: 0.5686275, g: 0.95686275, b: 0.54509807, a: 0.7529412}
m_ColliderAsleepColor: {r: 0.5686275, g: 0.95686275, b: 0.54509807, a: 0.36078432}
m_ColliderContactColor: {r: 1, g: 0, b: 1, a: 0.6862745}
m_ColliderAABBColor: {r: 1, g: 1, b: 0, a: 0.2509804}
m_LayerCollisionMatrix: ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff

Просмотреть файл

@ -1,27 +1,27 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!1386491679 &1
PresetManager:
m_ObjectHideFlags: 0
m_DefaultList:
- type:
m_NativeTypeID: 108
m_ManagedTypePPtr: {fileID: 0}
m_ManagedTypeFallback:
defaultPresets:
- m_Preset: {fileID: 2655988077585873504, guid: 463065d4f17d1d94d848aa127b94dd43,
type: 2}
- type:
m_NativeTypeID: 1020
m_ManagedTypePPtr: {fileID: 0}
m_ManagedTypeFallback:
defaultPresets:
- m_Preset: {fileID: 2655988077585873504, guid: e7689051185d12f4298e1ebb2693a29f,
type: 2}
- type:
m_NativeTypeID: 1006
m_ManagedTypePPtr: {fileID: 0}
m_ManagedTypeFallback:
defaultPresets:
- m_Preset: {fileID: 2655988077585873504, guid: e8537455c6c08bd4e8bf0be3707da685,
type: 2}
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!1386491679 &1
PresetManager:
m_ObjectHideFlags: 0
m_DefaultList:
- type:
m_NativeTypeID: 108
m_ManagedTypePPtr: {fileID: 0}
m_ManagedTypeFallback:
defaultPresets:
- m_Preset: {fileID: 2655988077585873504, guid: 463065d4f17d1d94d848aa127b94dd43,
type: 2}
- type:
m_NativeTypeID: 1020
m_ManagedTypePPtr: {fileID: 0}
m_ManagedTypeFallback:
defaultPresets:
- m_Preset: {fileID: 2655988077585873504, guid: e7689051185d12f4298e1ebb2693a29f,
type: 2}
- type:
m_NativeTypeID: 1006
m_ManagedTypePPtr: {fileID: 0}
m_ManagedTypeFallback:
defaultPresets:
- m_Preset: {fileID: 2655988077585873504, guid: e8537455c6c08bd4e8bf0be3707da685,
type: 2}

Просмотреть файл

@ -1,9 +1,9 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!5 &1
TimeManager:
m_ObjectHideFlags: 0
Fixed Timestep: 0.02
Maximum Allowed Timestep: 0.1
m_TimeScale: 1
Maximum Particle Timestep: 0.03
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!5 &1
TimeManager:
m_ObjectHideFlags: 0
Fixed Timestep: 0.02
Maximum Allowed Timestep: 0.1
m_TimeScale: 1
Maximum Particle Timestep: 0.03

Просмотреть файл

@ -1,11 +1,11 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!937362698 &1
VFXManager:
m_ObjectHideFlags: 0
m_IndirectShader: {fileID: 0}
m_CopyBufferShader: {fileID: 0}
m_SortShader: {fileID: 0}
m_RenderPipeSettingsPath:
m_FixedTimeStep: 0.016666668
m_MaxDeltaTime: 0.05
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!937362698 &1
VFXManager:
m_ObjectHideFlags: 0
m_IndirectShader: {fileID: 0}
m_CopyBufferShader: {fileID: 0}
m_SortShader: {fileID: 0}
m_RenderPipeSettingsPath:
m_FixedTimeStep: 0.016666668
m_MaxDeltaTime: 0.05

Просмотреть файл

@ -1,8 +1,8 @@
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!890905787 &1
VersionControlSettings:
m_ObjectHideFlags: 0
m_Mode: Visible Meta Files
m_CollabEditorSettings:
inProgressEnabled: 1
%YAML 1.1
%TAG !u! tag:unity3d.com,2011:
--- !u!890905787 &1
VersionControlSettings:
m_ObjectHideFlags: 0
m_Mode: Visible Meta Files
m_CollabEditorSettings:
inProgressEnabled: 1

Просмотреть файл

@ -1,10 +1,10 @@
{
"m_SettingKeys": [
"VR Device Disabled",
"VR Device User Alert"
],
"m_SettingValues": [
"False",
"False"
]
}
{
"m_SettingKeys": [
"VR Device Disabled",
"VR Device User Alert"
],
"m_SettingValues": [
"False",
"False"
]
}

Просмотреть файл

@ -1,4 +1,4 @@
# Object Pose Estimation Demo
# Object Pose Estimation Demo
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
@ -15,7 +15,7 @@ This tutorial will go through the steps necessary to perform pose estimation wit
- [Part 2: Setting up the scene for data collection](#link-part-2)
- [Part 3: Data Collection and Model Training](#link-part-3)
- [Part 4: Pick-and-Place](#link-part-4)
---
### <a name="link-part-1">[Part 1: Create Unity Scene with Imported URDF](Documentation/1_set_up_the_scene.md)</a>
@ -23,21 +23,21 @@ This tutorial will go through the steps necessary to perform pose estimation wit
<img src="Documentation/Images/0_scene.png" width=400 />
This part includes downloading and installing the Unity Editor, setting up a basic Unity scene, and importing a robot. We will import the [UR3 robot arm](https://www.universal-robots.com/products/ur3-robot) using the [URDF Importer](https://github.com/Unity-Technologies/URDF-Importer) package.
This part includes downloading and installing the Unity Editor, setting up a basic Unity scene, and importing a robot. We will import the [UR3 robot arm](https://www.universal-robots.com/products/ur3-robot) using the [URDF Importer](https://github.com/Unity-Technologies/URDF-Importer) package.
---
### <a name="link-part-2">[Part 2: Setup the Scene for Data Collection](Documentation/2_set_up_the_data_collection_scene.md)</a>
### <a name="link-part-2">[Part 2: Setup the Scene for Data Collection](Documentation/2_set_up_the_data_collection_scene.md)</a>
<img src="Documentation/Images/0_data_collection_environment.png" width=400/>
This part focuses on setting up the scene for data collection using the Unity Computer Vision [Perception Package](https://github.com/Unity-Technologies/com.unity.perception). You will learn how to use Perception Package [Randomizers](https://github.com/Unity-Technologies/com.unity.perception/blob/master/com.unity.perception/Documentation~/Randomization/Index.md) to randomize aspects of the scene in order to create variety in the training data.
This part focuses on setting up the scene for data collection using the Unity Computer Vision [Perception Package](https://github.com/Unity-Technologies/com.unity.perception). You will learn how to use Perception Package [Randomizers](https://github.com/Unity-Technologies/com.unity.perception/blob/master/com.unity.perception/Documentation~/Randomization/Index.md) to randomize aspects of the scene in order to create variety in the training data.
If you would like to learn more about Randomizers, and apply domain randomization to this scene more thoroughly, check out our further exercises for the reader [here](Documentation/5_more_randomizers.md).
---
### <a name="link-part-3">[Part 3: Data Collection and Model Training](Documentation/3_data_collection_model_training.md)</a>
### <a name="link-part-3">[Part 3: Data Collection and Model Training](Documentation/3_data_collection_model_training.md)</a>
<img src="Documentation/Images/0_json_environment.png" width=400/>
@ -56,7 +56,7 @@ To measure the success of grasping in simulation using our pre-trained model for
---
### <a name="link-part-4">[Part 4: Pick-and-Place](Documentation/4_pick_and_place.md)</a>
### <a name="link-part-4">[Part 4: Pick-and-Place](Documentation/4_pick_and_place.md)</a>
<img src="Documentation/Gifs/0_demo.gif" width=400/>
@ -64,7 +64,7 @@ To measure the success of grasping in simulation using our pre-trained model for
This part includes the preparation and setup necessary to run a pick-and-place task using MoveIt. Here, the cube pose is predicted by the trained deep learning model. Steps covered include:
* Creating and invoking a motion planning service in ROS
* Sending captured RGB images from our scene to the ROS Pose Estimation node for inference
* Using a Python script to run inference on our trained deep learning model
* Using a Python script to run inference on our trained deep learning model
* Moving Unity Articulation Bodies based on a calculated trajectory
* Controlling a gripping tool to successfully grasp and drop an object.
@ -75,7 +75,7 @@ For questions or discussions about Unity Robotics package installations or how t
For feature requests, bugs, or other issues, please file a [GitHub issue](https://github.com/Unity-Technologies/Robotics-Object-Pose-Estimation/issues) using the provided templates and the Robotics team will investigate as soon as possible.
For any other questions or feedback, connect directly with the
For any other questions or feedback, connect directly with the
Robotics team at [unity-robotics@unity3d.com](mailto:unity-robotics@unity3d.com).
## More from Unity Robotics

Просмотреть файл

@ -1,21 +1,21 @@
<?xml version="1.0"?>
<robot xmlns:xacro="http://ros.org/wiki/xacro" name="ur3_with_gripper">
<!-- ur5 -->
<xacro:include filename="$(find ur_description)/urdf/ur3.urdf.xacro" />
<!-- end-effector -->
<xacro:include filename="$(find robotiq_2f_140_gripper_visualization)/urdf/robotiq_arg2f_140_model_macro.xacro" />
<xacro:robotiq_arg2f_140 prefix="" connected_to="tool0"/>
<!-- ur5 -->
<!-- The ur5 xacro must be included with passing parameters -->
<xacro:ur3_robot prefix="" joint_limited="true"/>
<!-- define the ur5's position and orientation in the world coordinate system -->
<link name="world" />
<joint name="world_joint" type="fixed">
<parent link="world" />
<child link="base_link" /> <!-- TODO: check base_link name of robot -->
<origin xyz="0.0 0.0 0.0" rpy="0.0 0.0 0.0" />
</joint>
</robot>
<?xml version="1.0"?>
<robot xmlns:xacro="http://ros.org/wiki/xacro" name="ur3_with_gripper">
<!-- ur5 -->
<xacro:include filename="$(find ur_description)/urdf/ur3.urdf.xacro" />
<!-- end-effector -->
<xacro:include filename="$(find robotiq_2f_140_gripper_visualization)/urdf/robotiq_arg2f_140_model_macro.xacro" />
<xacro:robotiq_arg2f_140 prefix="" connected_to="tool0"/>
<!-- ur5 -->
<!-- The ur5 xacro must be included with passing parameters -->
<xacro:ur3_robot prefix="" joint_limited="true"/>
<!-- define the ur5's position and orientation in the world coordinate system -->
<link name="world" />
<joint name="world_joint" type="fixed">
<parent link="world" />
<child link="base_link" /> <!-- TODO: check base_link name of robot -->
<origin xyz="0.0 0.0 0.0" rpy="0.0 0.0 0.0" />
</joint>
</robot>

Просмотреть файл

@ -130,12 +130,10 @@ def run_model_main(image_file_png, model_file_name):
checkpoint = torch.load(model_file_name, map_location=device)
model = PoseEstimationNetwork(is_symetric=False)
model.load_state_dict(checkpoint["model"])
model.to(device)
model.eval()
image = pre_process_image(image_file_png, device)
output_translation, output_orientation = model(torch.stack(image).reshape(-1, 3, 224, 224))
output_translation, output_orientation = output_translation.detach().numpy(), output_orientation.detach().numpy()
return output_orientation, output_translation
# def run_model_main():
# print("hi")
output_translation, output_orientation = model(torch.stack(image).reshape(-1, 3, 224, 224).to(device))
output_translation, output_orientation = output_translation.cpu().detach().numpy(), output_orientation.cpu().detach().numpy()
return output_orientation, output_translation

Просмотреть файл

@ -5,18 +5,18 @@ License Type: BSD 3-clause<br/>
```
Copyright (c)
Redistribution and use in source and binary forms, with or without modification,
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
@ -40,18 +40,18 @@ License Type: BSD 3-clause<br/>
```
Copyright (c)
Redistribution and use in source and binary forms, with or without modification,
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
@ -107,18 +107,18 @@ License Type: BSD 3-clause<br/>
```
Copyright (c)
Redistribution and use in source and binary forms, with or without modification,
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND