Reverting Gitbook auto-commiting to master. damn...

This commit is contained in:
Mike James 2018-07-05 11:40:52 +02:00
Родитель 454e9bc948
Коммит a4a6cb3303
23 изменённых файлов: 2470 добавлений и 59 удалений

17
ARM/README.md Normal file
Просмотреть файл

@ -0,0 +1,17 @@
# What's in the template?
* Web App
* Azure Function App
* Azure Storage
* Azure Search
* Azure CosmosDB
* Computer Vision (Cognitive Service)
* Application Insights
Correct settings are automatically provided to the Function App and to the Web App
**BEFORE DEPLOYMENT, please enter desired parameter values in AzureDeploy.parameters.json**
# Next Steps
* API Management

Просмотреть файл

@ -0,0 +1,70 @@
@echo off
setlocal
SET password=%1
SET repoName=srcRepo
SET repoUrl=file:///%HOMEDRIVE:~0,1%/%HOMEPATH:~1%/site/%repoName%
SET download=bot-src
echo %repoUrl%
rem cd to project root
pushd ..\wwwroot
rem init git
call git init
call git config user.name "botframework"
call git config user.email "util@botframework.com"
call git add .
call git commit -m "prepare to download source"
call git remote add srcRepo %repoUrl%
popd
rem init upstream
pushd %HOME%\site
mkdir srcRepo
cd srcRepo
call git init --bare
popd
rem push to upstream
pushd ..\wwwroot
call git push --set-upstream srcRepo master
popd
rem clone srcRepo
pushd %HOME%\site
call git clone %repoUrl% %download%
rem delete .git
cd %download%
call rm -r -f .git
popd
rem prepare for publish
pushd %HOME%\site\%download%
mkdir Properties\PublishProfiles
pushd Properties\PublishProfiles
type ..\..\PostDeployScripts\publishProfile.xml.template | sed -e s/\{WEB_SITE_NAME\}/%WEBSITE_SITE_NAME%/g > %WEBSITE_SITE_NAME%-Web-Deploy.pubxml
popd
set SOLUTION_NAME=
for /f "delims=" %%a in ('dir /b *.sln') do @set SOLUTION_NAME=%%a
type PostDeployScripts\publish.cmd.template | sed -e s/\{SOLUTION_NAME\}/%SOLUTION_NAME%/g | sed -e s/\{PUBLISH_PROFILE\}/%WEBSITE_SITE_NAME%-Web-Deploy.pubxml/g | sed -e s/\{PASSWORD\}/%password%/g > publish.cmd
type PostDeployScripts\publishSettings.xml.template | sed -e s/\{WEB_SITE_NAME\}/%WEBSITE_SITE_NAME%/g | sed -e s/\{PASSWORD\}/%password%/g > PostDeployScripts\%WEBSITE_SITE_NAME%.PublishSettings
popd
rem preare the zip file
%HOMEDRIVE%\7zip\7za a %HOME%\site\%download%.zip %HOME%\site\%download%\*
rem cleanup git stuff
pushd ..\wwwroot
call rm -r -f .git
popd
pushd %HOME%\site
call rm -r -f %download%
call rm -r -f %repoName%
popd
endlocal

Просмотреть файл

@ -0,0 +1,23 @@
@echo off
setlocal
set DEPLOYMENT_SOURCE=
set IN_PLACE_DEPLOYMENT=1
if exist ..\wwwroot\deploy.cmd (
pushd ..\wwwroot
rem call deploy.cmd
popd
)
rem kick of build of csproj
echo record deployment timestamp
date /t >> ..\deployment.log
time /t >> ..\deployment.log
echo ---------------------- >> ..\deployment.log
echo Deployment done
endlocal

Просмотреть файл

@ -0,0 +1,44 @@
@echo off
setlocal
rem ------------------------------------------------------------------------------------------
rem setupVsoRemoteRepo [remoteUser] [personalAccessToken] [projName{optional}]
rem create and populate VSO git repo for the ABS code instance
rem
rem remoteUser: user account name of the personal access token
rem personalAccessToken: the personal access token used to access github REST API (requires repos scope)
rem projName the name of the project to create (default to WEBSITE_SITE_NAME)
rem ------------------------------------------------------------------------------------------
set remoteUrl=https://api.github.com
set remoteUser=%1
set remotePwd=%2
set projName=%3
if '%projName%'=='' set projName=%WEBSITE_SITE_NAME%
set repoUrl=https://%remoteUser%:%remotePwd%@github.com/%remoteUser%/%projName%.git
rem use curl to create project
pushd ..\wwwroot
type PostDeployScripts\githubProject.json.template | sed -e s/\{WEB_SITE_NAME\}/%projName%/g > %TEMP%\githubProject.json
call curl -H "Content-Type: application/json" -u %remoteUser%:%remotePwd% -d "@%TEMP%\githubProject.json" -X POST %remoteUrl%/user/repos
rem rm %TEMP%\githubProject.json
popd
popd
rem cd to project root
pushd ..\wwwroot
rem init git
call git init
call git config user.name "%remoteUser%"
call git config user.password "%remotePwd%"
call git config user.email "util@botframework.com"
call git add .
call git commit -m "prepare to setup source control"
call git push %repoUrl% master
popd
rem cleanup git stuff
pushd ..\wwwroot
call rm -r -f .git
popd
endlocal

Просмотреть файл

@ -0,0 +1,50 @@
@echo off
setlocal
rem ------------------------------------------------------------------------------------------
rem setupVsoRemoteRepo [vsoRemote] [vsoUserName] [vsoPersonalAccessToken] [projName{optional}]
rem create and populate VSO git repo for the ABS code instance
rem
rem vsoRmote: url of the VSO site (e.g. https://awesomebot.visualstudio.com )
rem vosUserName: user account name of the personal access token
rem vsoPersonalAccessToken: the personal access token used to access VSO REST api
rem projName the name of the project to create (default to WEBSITE_SITE_NAME)
rem ------------------------------------------------------------------------------------------
set remoteUrl=%1
set remoteUser=%2
set remotePwd=%3
set projName=%4
if '%projName%'=='' set projName=%WEBSITE_SITE_NAME%
set vstsRoot=%remoteUrl%
set repoUrl=https://%remoteUser%:%remotePwd%@%remoteUrl:~8%/_git/%projName%
set vstsCreateProject=https://%remoteUser%:%remotePwd%@%remoteUrl:~8%/defaultcollection/_apis/projects?api-version=3.0
rem use curl to create project
pushd ..\wwwroot
type PostDeployScripts\vsoProject.json.template | sed -e s/\{WEB_SITE_NAME\}/%projName%/g > %TEMP%\vsoProject.json
call curl -H "Content-Type: application/json" -d "@%TEMP%\vsoProject.json" -X POST %vstsCreateProject%
rm %TEMP%\vsoProject.json
rem sleep for 15 seconds for the creation to complete, this is a wild guess
call sleep 15
popd
popd
rem cd to project root
pushd ..\wwwroot
rem init git
call git init
call git config user.name "%remoteUser%"
call git config user.password "%remotePwd%"
call git config user.email "util@botframework.com"
call git add .
call git commit -m "prepare to setup source control"
call git push %repoUrl% master
popd
rem cleanup git stuff
pushd ..\wwwroot
call rm -r -f .git
popd
endlocal

Просмотреть файл

@ -0,0 +1,14 @@
@echo off
setlocal
set DEPLOYMENT_SOURCE=
set IN_PLACE_DEPLOYMENT=1
if exist ..\wwwroot\deploy.cmd (
pushd ..\wwwroot
call deploy.cmd
popd
)
endlocal

Просмотреть файл

@ -0,0 +1,116 @@
@if "%SCM_TRACE_LEVEL%" NEQ "4" @echo off
:: ----------------------
:: KUDU Deployment Script
:: Version: 1.0.15
:: ----------------------
:: Prerequisites
:: -------------
:: Verify node.js installed
where node 2>nul >nul
IF %ERRORLEVEL% NEQ 0 (
echo Missing node.js executable, please install node.js, if already installed make sure it can be reached from current environment.
goto error
)
:: Setup
:: -----
setlocal enabledelayedexpansion
SET ARTIFACTS=%~dp0%..\artifacts
IF NOT DEFINED DEPLOYMENT_SOURCE (
SET DEPLOYMENT_SOURCE=%~dp0%.
)
IF NOT DEFINED DEPLOYMENT_TARGET (
SET DEPLOYMENT_TARGET=%ARTIFACTS%\wwwroot
)
IF NOT DEFINED NEXT_MANIFEST_PATH (
SET NEXT_MANIFEST_PATH=%ARTIFACTS%\manifest
IF NOT DEFINED PREVIOUS_MANIFEST_PATH (
SET PREVIOUS_MANIFEST_PATH=%ARTIFACTS%\manifest
)
)
IF NOT DEFINED KUDU_SYNC_CMD (
:: Install kudu sync
echo Installing Kudu Sync
call npm install kudusync -g --silent
IF !ERRORLEVEL! NEQ 0 goto error
:: Locally just running "kuduSync" would also work
SET KUDU_SYNC_CMD=%appdata%\npm\kuduSync.cmd
)
IF NOT DEFINED DEPLOYMENT_TEMP (
SET DEPLOYMENT_TEMP=%temp%\___deployTemp%random%
SET CLEAN_LOCAL_DEPLOYMENT_TEMP=true
)
IF DEFINED CLEAN_LOCAL_DEPLOYMENT_TEMP (
IF EXIST "%DEPLOYMENT_TEMP%" rd /s /q "%DEPLOYMENT_TEMP%"
mkdir "%DEPLOYMENT_TEMP%"
)
IF DEFINED MSBUILD_PATH goto MsbuildPathDefined
SET MSBUILD_PATH=%ProgramFiles(x86)%\MSBuild\14.0\Bin\MSBuild.exe
:MsbuildPathDefined
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
:: Deployment
:: ----------
echo Handling .NET Web Application deployment.
:: 1. Restore NuGet packages
IF /I "Microsoft.Bot.Sample.LuisBot.sln" NEQ "" (
call :ExecuteCmd nuget restore "%DEPLOYMENT_SOURCE%\Microsoft.Bot.Sample.LuisBot.sln"
IF !ERRORLEVEL! NEQ 0 goto error
)
:: 2. Build to the temporary path
IF /I "%IN_PLACE_DEPLOYMENT%" NEQ "1" (
call :ExecuteCmd "%MSBUILD_PATH%" "%DEPLOYMENT_SOURCE%\Microsoft.Bot.Sample.LuisBot.csproj" /nologo /verbosity:m /t:Build /t:pipelinePreDeployCopyAllFilesToOneFolder /p:_PackageTempDir="%DEPLOYMENT_TEMP%";AutoParameterizationWebConfigConnectionStrings=false;Configuration=Release;UseSharedCompilation=false /p:SolutionDir="%DEPLOYMENT_SOURCE%\.\\" %SCM_BUILD_ARGS%
) ELSE (
call :ExecuteCmd "%MSBUILD_PATH%" "%DEPLOYMENT_SOURCE%\Microsoft.Bot.Sample.LuisBot.csproj" /nologo /verbosity:m /t:Build /p:AutoParameterizationWebConfigConnectionStrings=false;Configuration=Release;UseSharedCompilation=false /p:SolutionDir="%DEPLOYMENT_SOURCE%\.\\" %SCM_BUILD_ARGS%
)
IF !ERRORLEVEL! NEQ 0 goto error
:: 3. KuduSync
IF /I "%IN_PLACE_DEPLOYMENT%" NEQ "1" (
call :ExecuteCmd "%KUDU_SYNC_CMD%" -v 50 -f "%DEPLOYMENT_TEMP%" -t "%DEPLOYMENT_TARGET%" -n "%NEXT_MANIFEST_PATH%" -p "%PREVIOUS_MANIFEST_PATH%" -i ".git;.hg;.deployment;deploy.cmd"
IF !ERRORLEVEL! NEQ 0 goto error
)
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
goto end
:: Execute command routine that will echo out when error
:ExecuteCmd
setlocal
set _CMD_=%*
call %_CMD_%
if "%ERRORLEVEL%" NEQ "0" echo Failed exitCode=%ERRORLEVEL%, command=%_CMD_%
exit /b %ERRORLEVEL%
:error
endlocal
echo An error has occurred during web site deployment.
call :exitSetErrorLevel
call :exitFromFunction 2>nul
:exitSetErrorLevel
exit /b 1
:exitFromFunction
()
:end
endlocal
echo Finished successfully.

136
README.md
Просмотреть файл

@ -1,90 +1,108 @@
# README
![Banner](Resources/WelcomeBanner.png)
![Banner](.gitbook/assets/welcomebanner.png)
<img src="Resources/Design/Design%20Board%20Final.png">
![](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Resources/Design/Design%20Board%20Final.png)
## Mobile Cloud Workshop
# Mobile Cloud Workshop
Welcome to the Microsoft Mobile Cloud Workshop. We've created this workshop to showcase the speed and ease at which you can leverage Microsoft's cloud platform to build Data-Driven Intelligent Apps. The workshop will give you insights and **hands-on labs** on how to leverage innovative scalable Cloud Backend Services for your applications.
**Who is it for?**
#### Who is it for?
Although you can go and explore the content and walkthrough by yourself, the richer learning experience is attending a session led by us. So let's see, if we can schedule something!
[Get in contact](https://mobilecloudworkshop.slack.com)
### Key Technologies
&nbsp;
| [App Service](https://azure.microsoft.com/en-us/services/app-service/) | ![](.gitbook/assets/architecture.png) |
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
| [Cosmos DB](https://azure.microsoft.com/en-us/services/cosmos-db/) | |
| [API Management](https://azure.microsoft.com/en-us/services/api-management/) | |
| [Azure Storage](https://azure.microsoft.com/en-us/services/storage/) | |
| [Azure Functions](https://azure.microsoft.com/en-us/services/functions/) | |
| [Azure Search](https://azure.microsoft.com/en-us/services/search/) | |
| [Cognitive Services](https://azure.microsoft.com/en-us/services/cognitive-services/) | |
| [Bot Service](https://azure.microsoft.com/en-us/services/bot-service/) | |
| [App Center](https://appcenter.ms/) | |
| [Active Directory](https://azure.microsoft.com/en-us/services/active-directory-b2c/) | |
| [Xamarin.Forms](https://www.xamarin.com/forms/) | |
## Key Technologies
**Build Status**
<table>
<tr>
<td nowrap><a href="https://azure.microsoft.com/en-us/services/app-service/">App Service</a></td>
<td rowspan="11"><img src="Resources/Architecture.png"></td>
</tr>
<tr>
<td nowrap><a href="https://azure.microsoft.com/en-us/services/cosmos-db/">Cosmos DB</a></td>
</tr>
<tr>
<td nowrap><a href="https://azure.microsoft.com/en-us/services/api-management/">API Management</a></td>
</tr>
<tr>
<td nowrap><a href="https://azure.microsoft.com/en-us/services/storage/">Azure Storage</a></td>
</tr>
<tr>
<td nowrap><a href="https://azure.microsoft.com/en-us/services/functions/">Azure Functions</a></td>
</tr>
<tr>
<td nowrap><a href="https://azure.microsoft.com/en-us/services/search/">Azure Search</a></td>
</tr>
<tr>
<td nowrap><a href="https://azure.microsoft.com/en-us/services/cognitive-services/">Cognitive Services</a></td>
</tr>
<tr>
<td nowrap><a href="https://azure.microsoft.com/en-us/services/bot-service/">Bot Service</a></td>
</tr>
<tr>
<td nowrap><a href="https://appcenter.ms/">App Center</a></td>
</tr>
<tr>
<td nowrap><a href="https://azure.microsoft.com/en-us/services/active-directory-b2c/">Active Directory</a></td>
</tr>
<tr>
<td nowrap><a href="https://www.xamarin.com/forms/">Xamarin.Forms</a></td>
</tr>
</table>
| iOS | Android |
| :---: | :---: |
##### Build Status
| iOS | Android |
|:---:|:---:|
| [![Build status](https://build.appcenter.ms/v0.1/apps/b9f623cd-4f7d-46cb-9402-c53eeda66986/branches/master/badge)](https://appcenter.ms) | [![Build status](https://build.appcenter.ms/v0.1/apps/038d4f0c-37fc-4aa3-88c1-14ffc3d8d92a/branches/master/badge)](https://appcenter.ms) |
### Guided Walkthrough
&nbsp;
You can find a walkthrough guide for the project that outlines how you too can build and deploy your own version of the app and backend services.
## Guided Walkthrough
> **Hint:** We highly recommend you setup and configure your system _before_ attending one of the guided sessions. Although weve allowed an hour in the morning to assist in trouble shooting configurations, we wont have time to do a fresh installation.
You can find a walkthrough guide for the project that outlines how you too can build and deploy your own version of the app and backend services.
#### [1. Setup](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/01%20Setup/README.md)
> **Hint:** We highly recommend you setup and configure your system *before* attending one of the guided sessions. Although weve allowed an hour in the morning to assist in trouble shooting configurations, we wont have time to do a fresh installation.
#### [2. Architecture Options](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/02%20Architecture%20Options/README.md)
### [1. Setup](Walkthrough%20Guide/01%20Setup/)
### [2. Architecture Options](Walkthrough%20Guide/02%20Architecture%20Options)
### [3. Web API & App Services](Walkthrough%20Guide/03%20Web%20API)
### [4. Data Storage](Walkthrough%20Guide/04%20Data%20Storage)
### [5. Search](Walkthrough%20Guide/05%20Search)
### [6. Functions & Cognitive Services](Walkthrough%20Guide/06%20Functions%20and%20Cognitive%20Services)
### [7. API Management](Walkthrough%20Guide/07%20API%20Management)
### [8. Mobile App Overview](Walkthrough%20Guide/08%20Mobile%20Overview)
### [9. Mobile Network Services](Walkthrough%20Guide/09%20Mobile%20Network%20Services)
### [10. Chatbot](Walkthrough%20Guide/10%20Chatbot)
### [11. Authentication](Walkthrough%20Guide/11%20Authentication)
### [12. Analytics](Walkthrough%20Guide/12%20Anayltics/README.md)
#### [3. Web API & App Services](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/03%20Web%20API/README.md)
&nbsp;
#### [4. Data Storage](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/04%20Data%20Storage/README.md)
#### [5. Search](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/05%20Search/README.md)
#### [6. Functions & Cognitive Services](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/06%20Functions%20and%20Cognitive%20Services/README.md)
#### [7. API Management](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/07%20API%20Management/README.md)
#### [8. Mobile App Overview](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/08%20Mobile%20Overview/README.md)
#### [9. Mobile Network Services](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/09%20Mobile%20Network%20Services/README.md)
#### [10. Chatbot](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/10%20Chatbot/README.md)
#### [11. Authentication](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/11%20Authentication/README.md)
#### [12. Analytics](https://github.com/MikeCodesDotNET/Mobile-Cloud-Workshop/tree/35f9f8a6612d4432090ff39dc804ce89ffc20e36/Walkthrough%20Guide/12%20Anayltics/README.md)
### Support
## Support
**Where to find help**
* Create GitHub issues and we'll respond as quickly as possible
* Ping us on [Slack](https://mobilecloudworkshop.slack.com)
### Authors
&nbsp;
| ![Photo](.gitbook/assets/mikejames.png) | ![Photo](.gitbook/assets/robinmanuelthiel.png) |
| :---: | :---: |
| **Mike James** | **Robin-Manuel Thiel** |
| [GitHub](https://github.com/MikeCodesDotNet) | [GitHub](https://github.com/MikeCodesDotNet) |
| [Twitter](https://twitter.com/MikeCodesDotNet) | [Twitter](https://twitter.com/robinmanuelt) |
| [Blog](https://mikecodes.net) | [Blog](https://pumpingco.de/) |
## Authors
#### Contributors
| ![Photo](Resources/mikejames.png) | ![Photo](Resources/robinmanuelthiel.png) |
|:----------------------------------------------:|:--------------------------------------------:|
| **Mike James** | **Robin-Manuel Thiel** |
| [GitHub](https://github.com/MikeCodesDotNet) | [GitHub](https://github.com/MikeCodesDotNet) |
| [Twitter](https://twitter.com/MikeCodesDotNet) | [Twitter](https://twitter.com/robinmanuelt) |
| [Blog](https://mikecodes.net) | [Blog](https://pumpingco.de/) |
* **Richard Erwin** \([GitHub](https://github.com/rerwinx)\)
* **Mohamed Saif** \([GitHub](https://github.com/mohamedsaif)\)
* **Michael Sivers** \([GitHub](https://github.com/msivers)\)
* **Simona Tarantola** \([LinkedIn](https://www.linkedin.com/in/simona-tarantola-a654917/)\)
### Contributors
- **Richard Erwin** ([GitHub](https://github.com/rerwinx))
- **Mohamed Saif** ([GitHub](https://github.com/mohamedsaif))
- **Michael Sivers** ([GitHub](https://github.com/msivers))
- **Simona Tarantola** ([LinkedIn](https://www.linkedin.com/in/simona-tarantola-a654917/))

Просмотреть файл

@ -0,0 +1,102 @@
![Banner](Assets/Banner.png)
# Setting Up
> **Hint:** We highly recommend you setup and configure your system *before* attending the mobile workshop. Although weve allowed an hour in the morning to assist in trouble shooting configurations, we wont have time to do a fresh installation.
## Prerequisites
Please bring your own **Windows or Mac** laptop. To participate in this workshop, some prework needs to be done. So please make sure you prepared your environment bringing the following prerequisites.
### Mandatory
- [Microsoft Azure Account with a Subscription](https://aka.ms/azft-mobile)
- [Visual Studio Code](https://code.visualstudio.com/)
- [Azure App Service Extension](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.vscode-azureappservice)
- [Azure Functions Extension](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.vscode-azurefunctions)
- [.NET Core SDK](https://www.microsoft.com/net/download/windows/build) (x64 for Windows)
- [Postman](https://www.getpostman.com/)
### Optional
You can completely follow the workshop without compiling the Mobile App on your own, as the full content is focused on architecting and creating Backends. However, if you already are a Xamarin Developer or always wanted to get started, install the Xamarin Tooling as well.
- [Visual Studio for Windows or Mac](https://www.visualstudio.com/) (Community Version or higher)
- [Xamarin Tooling](https://developer.xamarin.com/guides/cross-platform/getting_started/installation/windows/)
- Android SDK 8.1 Oreo (API Level 27)
- iOS SDK 11
## Prepare your environment
### Download the workshop
You will need some contents of this workshop like the source code for the backeend on your local machine. So it is a good idea, to download the full repository as a ZIP file and extact it somewhere locally. You can easily do this by clicking the ***Clone or download*** button on the start page and select ***Download ZIP***.
![Azure Subscription Overview](Assets/GitHubDownload.png)
### Microsoft Azure
Creating a Microsoft Azure Account is easy! Just head over to the [Microsoft Azure for Mobile Landingpage](https://aka.ms/azft-mobile) and create a free Account. If you already have a Microsoft, Outlook, Office 365 or Active Directory Account from you company, you can re-use it.
![Free Azure Account](Assets/FreeAzureAccount.png)
Although the free Account includes a bunch of services that you can use, in this workshop we will work with advanced resources, which we need an Azure Subscription for. An Azure Subscriptuon is basically the way to pay for charged services and can be backed by a Credit Card or a company agreement.
You can check the Subscriptions for you account when visiting the [Azure Portal](https://portal.azure.com) and selecting ***Subscriptions*** from the side menu.
![Azure Subscription Overview](Assets/AzureSubscriptionOverview.png)
If no Subscriptions appear, visit the [Azure Subscription Portal](https://account.azure.com/Subscriptions) to add one.
## Test your configuration
### .NET Core
Most parts of this workshop are written in .NET Core 2.0 and we need to compile a few things from time to time. For this, we need to the [.NET Core SDK](https://www.microsoft.com/net/download/windows/build) installed. To check if the installation has been successful, open the *Terminal* or *Command Prompt* on your machine and type in
```bash
dotnet --info
```
If the command line answers you similar like shown in the screenshot below, your machine can now run and compile .NET code.
![Visual Studio Running Xamarin iOS and Android App](Assets/DotnetInfoBash.png)
### Visual Studio Code
Open Visual Studio Code and navigate to the ***Extensions*** tab on the left (or select ***View -> Extensions*** from the menu) and make sure that you installed the **Azure App Service** and **Azure Functions** extensions at least. If you don't have them installed, you can search for them in the Extensions tab.
Note, that you need to ***Reload*** Visual Stuido Code after installing extensions!
![Screenshot of Visual Studio Code for Reloading Extensions](Assets/VSCodeReloadExtensions.png)
Once the extensions has been installed successfull and Visual Stuido Code has been reloaded, you should see a new ***Azure*** tab on the side. Select it and make sure that you are logged in with you Azure account. Please verify, that you see at least one of your subscriptions here.
![Screenshot of Visual Studio Code showing Subscriptions in the Azure Tab](Assets/VSCodeAzureSubs.png)
Now you are good to go! Open the recently downloaded and unzipped Workshop folder in Visual Studio Code by clicking ***File -> Open...*** in the menu and select the full folder. The result should look similar to this:
![Screenshot of Visual Studio Code opened the workshop folder](Assets/VSCodeOpenWSFolder.png)
### Mobile Development with Xamarin (optional)
If you want to compile the Xamarin Application on you own, you will need to install the Xamarin tooling. You don't need to have this, to follow the workshop but if you already are a Xamarin Developer or always wanted to get started, give it a go! The setup of a Xamarin development environment can get a little bit tricky and time consuming as it has dependancies on many SDKs and technologies from different companies.
#### Windows
When working in Windows, Visual Studio will be the best IDE for you! You can check internally if you have a license for the paid versions or even go with the free Community Edition. Both will work for you.
Please [follow this guide](https://developer.xamarin.com/guides/cross-platform/getting_started/installation/windows/) to install the Xamarin Tooling for Visual Studio on Windows and make sure, you have at least Android API Level 16 and an Android Emulator installed.
When working on Windows, you won't be able to build iOS solutions unless you connect your machine with a Mac in your network. To follow this workshop, an iOS configuration is not mandatory! [Follow this guide](https://developer.xamarin.com/guides/ios/getting_started/installation/windows/) if you want to connect to a Mac Build Host anyway.
#### Mac
When using a Mac, the best Xamarin Tooling provides Visual Studio for Mac. Xamarin should be installed during the installation of Visual Studio. Please [follow this guide](https://docs.microsoft.com/en-us/visualstudio/mac/installation) to make sure you don't miss anything.
If you want to build iOS solutions, make sure that XCode is also installed on the same device!
#### Test your installation
To make sure your environment works as expected and is able to compile and execute Xamarin apps, your can simply open the [`ContosoMaintenance.sln`](/ContosoMaintenance.sln) solution with Visual Studio and select the `ContosoFieldService.iOS` or `ContosoFieldService.Droid` project as your Startup project. If the application gets compiled and the app can be started, you are good to go.
![Visual Studio Running Xamarin iOS and Android App](Assets/VSMacRunningiOSandAndroid.png)

Просмотреть файл

@ -0,0 +1,75 @@
![Banner](Assets/Banner.png)
# 1. Architecture Options
Deciding how to architect a solution isn't an easy task and depending on who you ask, you'll likely get very different answers. There are many different ways we could design such a system, and we'll cover a few of them below.
We're looking for a solution that allows us lots of flexibility with minimal maintenance. We're interested in focusing on the business problem rather than deploying and maintaining a set of virtual machines.
It's for the reason that we'll opt to use Platform as a Service (PaaS) as much as possible within our design.
## The real architecture
![Azure Functions Architecture](Assets/WebAPI.png)
Above you can see a high-level overview of our production architecture. Some key decisions:
### Orchestration
We were going to leverage our .NET skills and build a ASP.NET Web API targetting .NET Core; we've lots of flexibility on where and how to host the code.
We picked Azure App Service as it supports great IDE integration for both Visual Studio PC and Visual Studio Mac, as well as offering all the PaaS goodness we need to focus on other parts of the solution.
### Security
As we're not going to implement Authentication in today's workshop, we decided to add API Management to add a security layer through the use of API keys.
### Data Storage
We've opted for a NoSQL approach using CosmosDB. Our reasoning for this is based on a few reasons. An important part is the geo-replication features of CosmosDB make it a natural choice for new projects, but secondly, our deadline meant we didn't have time to spend migrating database for small model changes (something we did a lot in the beginning).
---
## Azure Functions
![Azure Functions Architecture](Assets/Functions.png)
We can swap out the orchestration service from App Service to Azure Functions if we're looking to cut costs and move to a 'serverless' architecture.
The truth is, there is a server. Azure Functions runs in the same environment as App Services, but the way we as developers interact with the service is a little different.
The most significant difference is how we scale. With Azure Functions, we do not ever have to worry about scaling our services to meet demand. Azure Functions runs on what we call "dynamic compute", in that Microsoft will scale up, down, out and in instances of your code to meet demand.
We will be developing a version of the backend that is entirely Azure Functions based on the future.
---
## Micro-Services
![Azure Functions Architecture](Assets/MicroServices.png)
Mr Michael Sivers is currently investigating adding a Micro-Services architecture as a branch.
If you're interested in helping, then please reach out to us!
Learn more about [Service Fabric](https://azure.microsoft.com/en-us/services/service-fabric/)
## Connecting to remote resources securely
ExpressRoute is an Azure service that lets you create private connections between Microsoft datacenters and infrastructure thats on your premises or in a colocation facility. ExpressRoute connections do not go over the public Internet, instead ExpressRoute uses dedicated connectivity from your resources to Azure. This provides reliability and speeds guarantees with lower latencies than typical connections over the Internet. Microsoft Azure ExpressRoute lets you extend your on-premises networks into the Microsoft cloud over a private connection facilitated by a connectivity provider. Connectivity can be from an any-to-any (IP VPN) network, a point-to-point Ethernet network, or a virtual cross-connection through a connectivity provider at a co-location facility.
Microsoft uses industry standard dynamic routing protocol (BGP) to exchange routes between your on-premises network, your instances in Azure, and Microsoft public addresses. We establish multiple BGP sessions with your network for different traffic profiles. The advantage of ExpressRoute connections over S2S VPN or accessing Microsoft cloud services over internet are as follows;
* more reliability
* faster speeds
* lower latencies
* higher security than typical connections over the Internet
* extend ExpressRoute connectivity across geopolitical boundaries (using premium add-on)
Bandwidth options available in ExpressRoute are 50 Mbps, 100 Mbps, 200 Mbps, 500 Mbps, 1 Gbps, 2 Gbps, 5 Gbps and 10 Gbps.
![Express Route Connectivity Model](Assets/ERConnectivityModel.png)
There are three ways to connect customers on-premise infrastructure to Azure (or microsoft cloud services) using ExpressRoute, they are;
1. WAN integration (or call IPVPN or MPLS or any-to-any connectivity)
2. Cloud Exchange through Co-Location Provider
3. Point-to-Point Ethernet Connection
A Site-to-Site VPN gateway connection is used to connect your on-premises network to an Azure virtual network over an IPsec/IKE (IKEv1 or IKEv2) VPN tunnel. This type of connection requires a VPN device located on-premises that has an externally facing public IP address assigned to it.
![Site to Site Connectivity Model](Assets/SiteToSiteConnectivityModel.png)
# Next Steps
[Web API](../03%20Web%20API/README.md)

Просмотреть файл

@ -0,0 +1,265 @@
![Banner](Assets/Banner.png)
# App Service
Azure App Service is Microsofts fully managed, highly scalable platform for hosting web, mobile and API apps built using .NET, Java, Ruby, Node.js, PHP, and Python.
App Service is fully managed and allows us to set the maximum number of instances on which we want to run our backend app on. Microsoft will then manage the scaling and load balancing across multiple instances to ensure your app perform well under heavy load. Microsoft manages the underlying compute infrastructure required to run our code, as well as patching and updating the OS and Frameworks when required.
Before we can deploy an App Service instance, we need to create a resource group to hold today's services.
### 1.1 Resource Group
Resource groups can be thought of as containers for your Azure Services (Resources). You may wish to create separate resource groups per location, or you may want to have one resource group per project.
In this workshop, well be deploying just one resource group to manage all of our required services.
Resource groups are great for grouping all the services associated with a solution together. During development, it means you can quickly delete all the resources in one operation!
when in production, it means we can see how much the services are costing us and how the resources are being used.
### 1.2 Create Resource Group
![Create new Resource Group](Assets/CreateResourceGroup.png)
Navigate to the [portal.azure.com](portal.azure.com) and sign in with your MSDN credentials.
1. Click 'Resource Groups' in the top-left corner.
2. Click 'Add' to bring up configuration pane.
3. Supply configuration data. Keep in mind its difficult to change resource group names later.
4. Click 'Create' and relax.
Navigate to the newly created Resource Group.
![Create new Resource Group](Assets/EmptyResourceGroup.png)
### 1.3 App Service Plan
#### 1.3.1 Overview
In-App Service, an app runs in an App Service plan. The App Service plan defines a set of compute resources for a web app to run. These compute resources are analogous to the server farm in conventional web hosting. One or more apps can be configured to run on the same computing resources (or in the same App Service plan).
When you create an App Service plan in a certain region (for example, West Europe), a set of compute resources is created for that plan in that region. Whatever apps you put into this App Service plan run on these compute resources as defined by your App Service plan. Each App Service plan defines:
* **Region** (West Europe, South UK, etc.)
* **Number of VM instances**
* **Size of VM instances** (Small, Medium, Large)
* **Pricing tier** (Free, Shared, Basic, Standard, Premium, PremiumV2, Isolated, Consumption)
#### 1.3.2 Create App Service Plan
From within your new Resource Group, do the following:
* Click "Add" in the top bar.
* Search for "App Service Plan"
![Search for App Service Plan](Assets/AddNewAppServicePlan.png)
![Create new App Service Plan](Assets/CreateNewAppServicePlan.png)
Creating an App Service Plan is easy, but we have to consider where our users are? We want our services to be running as close to our users as possible as this dramatically increases performance.
![Create new App Service Plan](Assets/ConfigureAppServicePlan.png)
We also need to consider how much Compute resources we think we'll need to meet demand.
Clicking 'View all', shows all the different options we have (it's a lot!). I won't list what their differences are as their listed in the portal, but keep it mind, with the cloud we don't need to default to over-provisioning. We can scale up later if we have to!
For this workshop, a B1 Basic site will be more than enough to run this project. More complex development projects should use something in the Standard range of pricing plans. Production apps should be set up in Standard or Premium pricing plans.
Once you have created your app service plan and saved it, Click "Create".
![Create new App Service Plan](Assets/CreateNewAppServicePlan.png)
The deployment of the new service can take a few minutes, but you can watch its progress in the "Bell" notification area in the toolbar.
### 1.4 Adding an App to our App Service Plan
Right now the App Service Plan doesn't contain any Apps. We will want at least one app for our ASP.NET Core 2.0 Web API service. To create this, let's navigate back to the Resource Group and click "Add" again. This time, we'll be searching for a "Web API".
![Create new App Service Plan](Assets/WebAPISearchResults.png)
* Select 'Web App' from the list and click Create.
![Create new App Service Plan](Assets/NewWebAppConfiguration.png)
We'll need to provide a unique app name, which will become part of the URL we use to navigate to the service. We should also select our subscription service, and most importantly, we'll want to run this app in the App Service Plan we just deployed.
Given that we're running our app in Platform as a Service, we don't need to worry too much about the underlying operating system. With that said, I highly recommend picking Windows as we've thoroughly tested this workshop with that configuration.
With all the configuration options set, hit "Create" and hold tight. Once the deployment has finished, we should be able to navigate to our app through the browser and see a generic Azure landing page.
Because my app name was: MyAwesomeStartupAPI
The unique URL would be: https://myawesomestartupapi.azurewebsites.net
You should see something similar to the image below:
![Create new App Service Plan](Assets/AppServiceDeployed.png)
### 1.5 Deploy your apps to App Service
Azure App Service has many options for how to deploy our code. These include continuous integration, which can link to Visual Studio Team Services or GitHub. We could also use FTP to upload the project, but we're not animals, so we won't - let's use Visual Studio Code for that.
The good news is: The full ASP.NET Core WebAPI code for the backend logic is already written for us and is located in the `Backend/Monolithic` folder of the workshop. But before we can upload it to the cloud, we need to **compile** it to make it machine readable. So we quickly have to dive into the .NET Developer's world! For this, right-click the `Monolithic` folder in Visual Studio Code and select ***Open in Terminal / Command Line***.
The Terminal window in Visual Studio Code pops up and we can enter the command to compile the application.
```bash
dotnet build
```
The output should look like this and we should see the **Build succeeded** message.
![VSCode run dotnet build](Assets/VSCodeDotnetBuild.png)
Building (compiling) the code generated two more folders for us: `/bin` and `/obj`. Here we can find executable files that we can upload to the web. As an ASP.NET Core project does not only consist of .NET code, but also contains some HTML, CSS and JavaScript, we need to bundle all the files together. So let's run another command.
```bash
dotnet publish
```
Once this command ran successfully, we have everything we need. Inside our `Monolithic` folder, we should now find a `bin/Debug/netcoreapp2.0/publish` folder that contains our ready-to-run backend logic. Now you can simply right-click this `publish` folder and select ***Deploy to Web App***.
![VSCode Deploy to Web App](Assets/VSCodePublishWebApp.png)
Follow the process of selecting a Subscription and Web App to publish to, confirm the publish process and enjoy your Web API.
> **Hint:** Sometimes, Visual Studio Code fails to load Subscriptions for your account when publishing. To fix this, go back to the ***Azure*** tab in Visual Studio Code and refresh the list of Subscriptions. Now start the Publish process again.
After a few seconds, you Web App should display the published code and look like this:
![Deployed API with Swagger UI](Assets/DeployedWebAPI.png)
To test if the deployment is work and the app is accepting HTTP requests correctly, let's go ahead and navigate to the **/api/ping** endpoint. In my case, I'll use the following URL: `http://myawesomestartupapi.azurewebsites.net/api/ping`.
![Deployed API with no UI](Assets/AppServiceDeploymentTest.png)
This shows that the backend is responding as expected! Before we move onto deploying storage services, you might want to read some tips and tricks for running App Services like a pro.
### 1.6 Application Settings
Although not entirely best pratice, for ease of deployment and learning, we're going to use Application Settings to store API Keys and other configuration info. It's worth configuring the Application Settings Keys ahead of time to avoid mistyping later on in the tutorials. Below is an exhaustive list of settings used in the Web API:
**Cosmos DB**
- **`AzureCosmosDb:DatabaseId`**
- **`AzureCosmosDb:Endpoint`**
- **`AzureCosmosDb:Key`**
**Azure Storage**
- **`AzureStorage:Key`**
- **`AzureStorage:PhotosBlobContainerName`**
- **`AzureStorage:QueueName`**
- **`AzureStorage:StorageAccountName`**
**Azure Search**
- **`AzureSearch:AzureSearchApiKey`**
- **`AzureSearch:AzureSearchServiceName`**
**Active Directory B2C**
- **`ActiveDirectory:ApplicationId`**
- **`ActiveDirectory:SignUpSignInPolicy`**
- **`ActiveDirectory:Tenant`**
**Aplication Insights**
- **`ApplicationInsights:InstrumentationKey`**
## Next Steps
[Data Storage](../04%20Data%20Storage/README.md)
---
![Banner](Assets/TipsTricks.png)
# Tips & Tricks
You've now deployed your first App Service instance! We'll now review some 'Pro tips' to help you get the most out of your Azure service.
## Controlling Density
Most users will have a low number (usually less than 10) applications per App Service Plan. In scenarios where you expect you'll be running many more applications, it's crucial to prevent over-saturating the underlying compute capacity.
Let's imagine that we've deployed one instance of our admin web portal and two instances of our mobile web API to the same App Service Plan. By default, all apps contained in a given App Service Plan will run on all the available compute resources (servers) allocated. If we only have a single server in our App Service Plan, we'll find that this single server will run all our available apps. Alternatively, if we scale out the App Service Plan to run on two servers, we'll run all our applications (3 apps) on both sets of servers.
This approach is absolutely fine if you find that your apps are using approximately the same amount of compute resources. If this isn't the case, then you may find that one app is consuming the lions share of compute resources, thus degrading the entire system performance. In our case, the mobile API will likely drive significant consumption of server resources, so we need to mitigate its effects on the performance of the admin portal.
To do this, what we can do is move lower-volume applications (such as the portal) into a single App Service Plan running on a single compute resource.
Place high demand apps into an App Service Plan which is configured to auto-scale based on CPU and memory utilisation.
## Per-App Scaling
Another alternative for running large numbers of applications more efficiently is to use the per-app scaling feature of Azure App Service. We've [documententation](https://msdn.microsoft.com/en-us/magazine/mt793270.aspx) that covers per-app scaling in detail. Per-App scaling lets you control the maximum number of servers allocated to a given application, and you can do so per application. In this case, an application will run on the defined maximum number of servers and not on all available servers.
## Application Slots
App Service has a feature called [deployment slots](https://docs.microsoft.com/en-gb/azure/app-service/web-sites-staged-publishing). In a nutshell, a deployment slot enables you to have another application (slot) other than your production app. Its another application that you can use to test new code before swapping into production.
Application slots are among the most used feature in App Service. However, its important to understand that each application slot is also an application in its own right. This means application slots can have custom domains associated with them, different SSL certificates, different application settings and so on. It also means the assignment of an application slot to an App Service Plan can be managed separately from the App Service Plan associated with the main production slot.
By default, each application slot is created in the same App Service Plan as the production slot. For low-volume applications, and/or applications with low CPU/memory utilization, this approach is fine.
However, because all applications in an App Service Plan run on the same servers, this means by default all of an Applications Slots are running on the same underlying server as production. This can lead to problems such as CPU or memory constraints if you decide to run stress tests against non-production slots, which run on the same server as your production application slot.
If resource competition is scoped just to scenarios such as running load tests, then temporarily moving a slot to a different App Service Plan, with its own set of servers, will do the following:
* Create additional App Service Plan for the non-production slots. Important note: Each App Service Plan needs to be in the same resource group and the same region as the production slots App Service Plan.
* Move a non-production slot to a different App Service Plan and, thus, a separate pool of compute resources.
* Carry out resource-intensive (or risky) tasks while running in the separate App Service Plan. For example, load tests can be run against a non-production slot without negatively impacting the production slot because there wont be any resource contention.
* When the non-production slot is ready to be swapped into production, move it back to the same App Service Plan running the production slot. Then the slot swap operation can be carried out.
## Deploying to Production with no downtime
You have a successful application running on an App Service Plan, and you have a great team to make updates to your application on a daily basis. In this case, you dont want to deploy bits directly into production. You want to control the deployment and minimize downtime. For that, you can use your application slots. Set your deployment to the “pre-production” slot, which can be configured with production setting, and deploy your latest code. You can now safely test your app. Once youre satisfied, you can swap the new bits into production. The swap operation doesnt restart your application, and in return, the Controller notifies the front-end load balancer to redirect traffic to the latest slots.
Some applications need to warm up before they can safely handle production load—for example, if your application needs to load data into cache, or for a .NET application to allow the .NET runtime to JIT your assemblies. In this case, youll also want to use application slots to warm up your application before swapping it into production.
We often see customers having a pre-production slot thats used to both test and warm up the application. You can use Continuous Deployment tools such as Visual Studio Release Manager to set up a pipeline for your code to get deployed into pre-production slots, run test for verification and warm all required paths in your app before swapping it into production.
## Public Virtual IPs
By default, theres a single public VIP for all inbound HTTP traffic. Any app is addressable to a single VIP. If you have an app on App Service, try running nslookup command and see the result. Heres an example:
```
» nslookup myawesomestartupapi.azurewebsites.net
Server: 2001:4898::1050:1050
Address: 2001:4898::1050:1050#53
Non-authoritative answer:
myawesomestartupapi.azurewebsites.net canonical name = waws-prod-ln1-013.vip.azurewebsites.windows.net.
waws-prod-ln1-013.vip.azurewebsites.windows.net canonical name = waws-prod-ln1-013.cloudapp.net.
Name: waws-prod-ln1-013.cloudapp.net
Address: 51.140.59.233
```
Youll notice that an App Service scale unit is deployed on Azure Cloud Service (by the cloudapp.net suffix). WAWS stands for Windows Azure (when Azure was still called Windows) Web sites (the original name of App Service).
## Outbound Virtual IPs
Most likely your application is connected to other Azure and non-Azure services. As such, your application makes outbound network calls to endpoints, not on the scale unit of your application. This includes calling out to Azure services such as SQL Database and Azure Storage. There are up to five VIPs (the one public VIP and four outbound dedicated VIPs) used for outbound communication. You cant choose which VIP your app uses, and all outbound calls from all apps in scale unit are using the five allocated VIPs. If your application uses a service that requires you to whitelist IPs that are allowed to make API calls into such a service, youll need to register all five VIPs of the scale unit. To view which IPs are allocated to outbound VIPs for a given unit of scale (or for your app from your perspective) go to the Azure portal, as shown in the below image.
![Create new App Service Plan](Assets/OutboundVIP.png)
If you require a dedicated set of inbound and outbound IPs, you should explore using a fully isolated and dedicated App Service Environment.
## IP And SSL
App Service supports IP-based SSL certificates. When using IP-SSL, App Service allocates to your application a dedicated IP address for only in-bound HTTP traffic.
Unlike the rest of Azure dedicated IP addresses, the IP address with App Service via IP-SSL is allocated as long as you opt to use it. You dont own the IP address, and when you delete your IP-SSL, you might lose the IP address (as it might be allocated to a different application).
App Service also supports SNI SSL, which doesnt require a dedicated IP address and is supported by modern browsers.
## Network Port Capacity for Outbound Network Calls
A common requirement for applications is the ability to make outbound network calls to other network endpoints. This includes calling out to Azure internal services such as SQL Database and Azure Storage. It also includes cases where applications make calls to HTTP/HTTPS API endpoints—for example, calling a Bing Search API or calling an API “application” that implements back-end business logic for a Web application.
In almost all of these cases, the calling app running on Azure App Service is implicitly opening a network socket and making outbound calls to endpoints that are considered “remote” from an Azure Networking perspective. This is an important point because calls made from an app running on Azure App Service to a remote endpoint rely on Azure Networking to set up and manage a table of Network Address Translation (NAT) mappings.
Creating new entries in this NAT mapping takes time, and theres ultimately a finite limit on the total number of NAT mappings that can be established for a single Azure App Service scale unit. Because of this, App Service enforces limits on the number of outbound connections that can be outstanding at any given point in time.
The maximum connection limits are the following:
* 1,920 connections per B1/S1/P1 instance
* 3,968 connections per B2/S2/P2 instance
* 8,064 connections per B3/S3/P3 instance
* 64K max upper limit per App Service Environment
Applications that “leak” connections invariably run into these connection limits. Applications will start intermittently failing because calls to remote endpoints fail, with the failures sometimes correlating closely to periods of higher application load. Youll frequently see errors like the following: “An attempt was made to access a socket in a way forbidden by its access permissions aaa.bbb.ccc.ddd.”
The likelihood of running into this problem can be substantially mitigated with a few best practices:
* For .NET applications using ADO.NET/EF, use database connection pooling.
* For .NET applications making outbound HTTP/HTTPS calls, pool and reuse instances of System.Net.Http.HttpClient or use Keep-alive connections with System.Net.HttpWebRequest.
- Tips and Tricks Source: [MSDN](https://msdn.microsoft.com/en-us/magazine/mt793270.aspx)
---
### Further Reading
* [Offical Documentation](https://docs.microsoft.com/en-us/azure/app-service/)
* [Adding API documentation with Swagger](https://docs.microsoft.com/en-us/aspnet/core/tutorials/web-api-help-pages-using-swagger)

Просмотреть файл

@ -0,0 +1,279 @@
![Banner](Assets/Banner.png)
# Data Storage
As we are collecting and displaying different types of information like *Jobs*, *Parts*, *Users* and *photos*, we need to store them somewhere in the cloud. For this, we chose two different types of storages: **Blob Storage** for raw files like images and a **NoSQL Database** for storing unstructured data like Jobs.
## 1. Azure Cosmos DB for unstructured data
Whenever it comes to unstructured data an NoSQL approaches in the Microsoft Azure ecosystem, Cosmos DB should be our database of choice. It is a globally-distributed, multi-model database service which makes it super flexible to use and extremely easy to scale to other regions.
Beside *Disk Space* and *Consistency*, Cosmos DB's main scale dimension is *Throughput*. For each collection, developers can reserve throughput for their data, which ensures the 99.99th percentile of latency for reads to under 10 ms and for writes to under 15 ms. Pre-reserved Throughput which is defined by request units (RUs) is mainly determining the price of a Cosmos DB instance. Fetching of a single 1KB document by id spends roughly 1 RU. You can use the [Cosmos DB capacity planner tool](https://www.documentdb.com/capacityplanner) to calculate, how many RUs your database might need.
### 1.1 Create a Cosmos DB instance
To create a new Cosmos DB instance in the [Azure Portal](https://portal.azure.com), click the ***New*** button in the top-left corner and find ***Azure Cosmos DB*** within the ***Databases*** section.
![Add new Cosmos DB from the Azure Portal](Assets/AddNewCosmosDb.png)
Choose the following settings and hit the ***Create*** button to start provisioning the Cosmos DB instance.
- **ID:** myawesomenewstartupdb
- **API:** SQL
- **Resouce Group:** Use existing
- **Location:** Same as your Web App
- **Enable geo-redundancy:** No (for now)
### 1.2 Explore Cosmos DB
After a few seconds, Azure should have created the database service and we can start exploring Cosmos DB by navigating to it in the Azure Portal.
![Cosmos DB Overview](Assets/CosmosDbOverview.png)
#### 1.2.1 Scalability and Consistency
As we can see from the ***Overview*** section, Azure Cosmos DB is all about scalability and availability. We get greeted by a map that shows us, which regions our data gets synchronized to and we can easily add and remove regions by selecting or deselecting them on the map or the ***Replicate data globally section*** in the side menu.
With scaling databases to multiple instances, *Consistency* immediately come to our minds. By default, Cosmos DB uses *Session consistency* but we can choose from five different [Consistency levels](https://docs.microsoft.com/en-us/azure/cosmos-db/consistency-levels) in the ***Default Consistency*** menu, if we feel the need to change that.
> **Hint:** Even when selecting multiple regions for Azure Cosmos DB, the connection string will always stay the same. That's a very nice feature, which allows your backend to not care about the location of your database at all. Cosmos DB has its own traffic manager that will route your query to the fastest location autimatically.
#### 1.2.2 Security Keys
Like every other database, Azure Cosmos DB offers security through access control using Keys. Head over to the ***Keys*** section of the data base to check your keys for different access levels (read-write and read-only) and connection strings. We will need these information later, when we connect the Cosmos DB to the Web API.
#### 1.2.3 Data Explorer
One nice feature of Azure Cosmos DB is the ***Data Explorer*** that can be found in the side menu and offers a live view on the data that sits inside the database. We can also edit and query the documents here.
At the moment our database is empty, but we will come back later to take a look at what's going on here.
### 1.3 Connect Azure Cosmos DB with the Web API
#### 1.3.1 Locate the connection in the Web API code
To let the Web API write data into the database and read or modify documents from it, we need to connect it with our backend logic of course. The code for it has already been written using the [Azure Cosmos DB .NET SDK for SQL](https://docs.microsoft.com/en-us/azure/cosmos-db/sql-api-sdk-dotnet). You can find the backend logic for creating, reading, updating and deleting documents in the [`DocumentDBRepositoryBase.cs`](/Backend/Monolithic/Services/DocumentDBRepositoryBase.cs) class of the Web Api code.
![Cosmos DB Connection in Code](Assets/VSCosmosConnectionInCode.png)
As you might have seen, the connection details to the Cosmos DB are not hard-coded but listed in the [`appsettings.json`](/Backend/Monolithic/appsettings.json) file of the Web Api project. This means, that the application is accessing them at runtime either from its environment variables or from this file.
```json
"AzureCosmosDb": {
"Endpoint": "",
"Key": "",
"DatabaseId": ""
}
```
[View in project](/Backend/Monolithic/appsettings.json#L21-L24)
As these settings are empty, let's set them in the Web Api's environment variables!
#### 1.3.2 Add Connection Information to Environment Variables
Navigate to the Web API App Service in the Azure Portal and open the ***Application settings*** from the menu on the left. Scroll down to the ***Application settings*** section and you will see a table where we can add settings to.
![Add App Service App Settings](Assets/AddAppServiceAppSettings.png)
Add the settings in the format `Settings:Key` and take the values from the ***Keys*** section of your Cosmos DB instance. Hit ***Save*** at the top of the page once you are set.
- **`AzureCosmosDb:Endpoint`:** Uri of the Cosmos DB instance
- **`AzureCosmosDb:Key`:** Primary Key of the Cosmos DB instance
- **`AzureCosmosDb:DatabaseId`:** Any name for a database (e.g. contosomaintenance)
The `DatabaseId` is the top-level domain of the collections that will be used (or generated) by the backend. A single Cosmos DB instance can consist of multiple databases. At the moment, we do not have any databases but the backend code will use this Key to create one.
### 1.4 Generate data
#### 1.4.1 Fetch from Cosmos DB for the first time
Once we add the connection information to the App Service Settings, the Web API should be able to connect to the database. We can check that by calling the **Jobs API endpoint** at `/api/job` to fetch a list of all Jobs from the databse.
![Empty List Of Jobs](Assets/EmptyListOfJobs.png)
As we can see, (of course) there are no jobs inside the database at the moment. But we don't get an error message but an empty list. That means, that there is at least "something" inside of our database now. The [`DocumentDBRepositoryBase.cs`](/Backend/Monolithic/Services/DocumentDBRepositoryBase.cs#L97-L138) class creates databases and collections that are not existant automatically when it gets asked for them.
Let's check the Cosmos DB's ***Data Explorer*** at the Azure Portal to see what happened!
![Cosmos DB With Empty Collection](Assets/CosmosWithEmptyCollection.png)
As we can see, a `contosomaintenance` database has been created with an empty `jobs` collection. If we click at the ***Scale & Settings*** tap, we can see that the collection has been created with 400 RUs reserverd, which is the minimum. Whenever we see more traction on this collection, we can scale up here.
#### 1.4.2 Add a new document manually
Time to add our first job manually! Let's click the ***New Document*** button in the `jobs` collection and add a JSON document like the following one in the editor to add a dummy job that points to the Microsoft headquarter in Redmond.
```json
{
"id": "3de8f6d0-e1b6-416a-914d-cd13554929a4",
"Name": "Service ATR 42 Engine",
"Details": "General Service",
"Type": "Service",
"Status": "Waiting",
"Attachements": null,
"Address": {
"point": {
"type": "Point",
"coordinates": [
-122.1517886,
47.6586067
]
}
},
"AssignedTo": null,
"DueDate": "0001-01-01T00:00:00",
"createdAt": "2018-01-25T00:34:49.753398+00:00",
"isDeleted": false
}
```
Once we hit ***Save***, we should be able to return to our API and fetch the list of jobs again. Now, the list should not be empty anymore but contain our new dummy job.
![Fetch Dummy Job From Cosmos DB](Assets/FetchDummyJobFromCosmos.png)
> **Tip:** To get well-formatted JSON in Google Chrome, you can use the [JSONView Plugin](https://chrome.google.com/webstore/detail/jsonview/chklaanhfefbnpoihckbnefhakgolnmc)
#### 1.4.3 Generate Dummy Data
To have actual data in the Cosmos DB instance to play around with and to avoid having you to write a bunch of dummy Jobs and Parts manually, we have prepared some dummy data for this workshop. Once the Cosmos DB connection is configured, you can call the `api/dummy` endpoint of your Web API to fill the database.
[//]: # (Empty line for spacing)
&nbsp;
## 2. Azure Blob Storage for raw files like photos
Now that we can store documents for *Jobs*, *Parts* and other unstructured data in the form of JSON documents, let's add a space to store raw files like pictures. In the **Contoso Maintenance App**, users can take photos and add them to *Jobs* to document damages or process. To store these pictures, we should add **Blob Storage** to our backend infrastructure.
### 2.1 Create a Storage Account
For that, head over to the [Azure Portal](https://portal.azure.com), click the ***New*** button, open the ***Storage*** category and select ***Storage Account*** to add some cloud storage to store your files at.
![Add a Storage Account in the Azure Portal](Assets/AddStorageAccount.png)
Choose the following settings and hit the Create button to start provisioning the Storage Account.
- **ID:** myawesomestartupstorage
- **Deployment model:** Resource manager
- **Account kind:** Storage (general purpose v1)
- **Performance:** Standard
- **Replication:** Locally-redundant storage (LRS)
- **Secure transfer required:** Disabled
- **Resouce Group:** Use existing
- **Location:** Same as your Web App
### 2.2 Explore Azure Blob Storage
After a few seconds, Azure provisioned a Storage Account for us and we can navigate to it in the Azure Portal.
![Add a Storage Account in the Azure Portal](Assets/StorageAccountOverview.png)
#### 2.2.1 Storage Services
Besides Blob Storage, an Azure Storage Account bundles all kinds of storages like Tables, Blobs, Files and Queues. Whenever we need to store data in one of these areas, we can use this Storage Account for that. For now, **Blobs** is the storage type that is most interesting for our image uploads but we will explore at least one more later at this workshop.
#### 2.2.2 Security Keys
Similar to what we saw with Cosmos DB, Azure Storage is also secured with Access Keys to manage control. We will need also these information later, when we connect the Storage Account to the Web API the same way we did with Cosmos DB before.
#### 2.2.3 Configuration
We can upgrade and configure our Storage Account to use Solid State Disks (Premium Storage), only allow encrypted file transfers and replicate it through multiple data centers or regions for additional availablility and performance.
### 2.3 Connect Blob Storage with the Web API
#### 2.3.1 Create Blob containers for photos
Before we connect the dots between the Web API backend and the Storage Account, we should create **Containers** for storing the uploaded photos at. Navigate to the ***Browse blobs*** section in the menu on the left and create a new container using the ***Add Container*** button.
![Add a Blob Storage Container](Assets/AddBlobContainer.png)
Let's create a container for the uploaded images in their original size with anonymous read access from external.
- **Name:** images-large
- **Public access level:** Blob (anonymous read access for blobs only)
The `images-large` containter will be used by the backend to upload all pictures that have been taken with the device camera to. Later in this workshop, we will down-scale these images automatically for performance enhancements at it is not a best practice to always download full-size images.
So let's also create two more containers for scaled images with the same properties, so that we end up with three containers.
- `images-large` (Blob)
- `images-medium` (Blob)
- `images-icon` (Blob)
![Blob Container Overview](Assets/BlobContainerOverview.png)
#### 2.3.2 Add Storage Queue
Now that we have added Containers for uploaded photos, we use another Storage Type of Azure Storage Accounts: Storage Queues. Those are simple message queues that can handle any kind of information and saves them until they got processed. Although we do not need the Storage Queue for the image upload directly, it will become important later at this workshop and it is a good time to create it now.
![Add Storage Queue](Assets/AddStorageQueue.png)
Enter the ***Queues*** section of the Storage Account via the side menu and create a new Queue called `processphotos`.
#### 2.3.3 Add Connection Information to Environment Variables
Similar to the Cosmos DB configuration above, the Web API backend project also manages Azure Blob Storage Access through environment variables.
```json
"AzureStorage": {
"StorageAccountName": "",
"Key": "",
"PhotosBlobContainerName": "",
"QueueName": ""
}
```
[View in project](/Backend/Monolithic/appsettings.json#L15-L20)
These environment variables can also be set in the ***Application Settings*** section of the App Servive, so let's navigate to our Web API and extend its environment variables!
![Add Azure Storage to Settings](Assets/AddStorageAppSettings.png)
Add the settings in the format `Settings:Key` and take the values from your Storage Account's ***Access Keys*** section.
- **`AzureStorage:StorageAccountName`:** Storage Account name (e.g. myawesomestartupstorage)
- **`AzureStorage:Key`:** Key 1 key from the ***Access Keys*** section
- **`AzureStorage:PhotosBlobContainerName`:** images-large
- **`AzureStorage:QueueName`:** processphotos
Once you click the ***Save*** button at the top of the page, the backend will choose this Storage Account and the `images-large` blob container to upload photos.
### 2.4 Test the photo upload
Let's test if everything works as expected and send our first photo to the Web API. For this, API Development Tools like [Postman](https://www.getpostman.com/) helps us to send files against network endpoints.
#### 2.4.1 Uploading a photo
The API endpoint for the photo upload is `/api/photo/{jobId}` and we can basically upload any file we want. You can choose a picture from the web or your computer or use the [Demo-AirplaneAssembly.jpg](Assets/Demo-AirplaneAssembly.jpg) ([Source](https://en.wikipedia.org/wiki/Airplane)) from this repository. Make sure to send the picture as **form-data** file to the API as it expects it in the [`PhotoController.cs`](/Backend/Monolithic/Controllers/PhotoController.cs#L30).
Take the `id` from any job in your Cosmos DB to build the url and attach the photo to a specific *Job*.
![Postman Image Upload Test](Assets/PostmanImageUploadTest.png)
You can send the photo via [Postman](https://www.getpostman.com/) with the following settings.
- **Method:** POST
- **Url:** `http://myawesomestartupapi.azurewebsites.net/api/photo/3de8f6d0-e1b6-416a-914d-cd13554929a4` (or similar)
- **Body:** form-data
- **form-data:**
- **Key:** `file`
- **Value:** `Demo-AirplaneAssembly.jpg` (or similar)
Hit ***Send*** and check for the response status **200 OK**.
#### 2.4.2 Checking the result
We should now see the uploaded photo in our Blob Storage container `images-large`. So let's navigate to the Storage Account in the Azure Portal and check if it's there by selecting ***Containers*** from the side menu and opening the `images-large` container.
![Photo Upload Result](Assets/PhotoUploadResult.png)
And voilà, here it is. The API replaces the original name by a GUID ([View in project](/Backend/Monolithic/Controllers/PhotoController.cs#L40)) but that's okay as we need to give uploaded photos a unique ID. Congratulations, you data storage works!
# Additional Resouces
- [Modeling Data in Cosmos DB vs. traditional SQL Databases](https://docs.microsoft.com/en-us/azure/cosmos-db/modeling-data)
- [Azure Cosmos DB RU Calculator and capacity planner](https://www.documentdb.com/capacityplanner)
# Next Steps
[Search](../05%20Search)

Просмотреть файл

@ -0,0 +1,72 @@
![Banner](Assets/Banner.png)
# Azure Search
Many applications use search as the primary interaction pattern for their customers who expect great relevance, suggestions, near-instantaneous responses, multiple languages, faceting and more. Azure Search makes it easy for you to add powerful and sophisticated search capabilities to your website or application. The integrated Microsoft natural language stack, also used in Bing and Office, has been improved over 16 years of development. Quickly tune search results and construct rich, fine-tuned ranking models to tie search results to business goals. Reliable throughput and storage give you fast search indexing and querying to support time-sensitive search scenarios.
We're going to add Azure Search to our project for searching the jobs. Right now we're using it in the most basic possible way, but we'll be expanding this later as the app grows in complexity.
## Deploying Azure Search
![Search for Azure Search](Assets/SearchForSearch.png)
Click the "Create a Resource" in the top-left menu bar. You'll then be able to search for 'Azure Search'.
![Azure Search Results](Assets/SearchResults.png)
Select Azure Search and click 'Create'.
![Azure Search Configure](Assets/ConfigureSearchService.png)
You'll have a few options for pricing, but for this demo, we should have plenty of capacity left over if we use the Free tier. Once you've deployed Azure Search, go to the resource
![Azure Search Overview](Assets/SearchOverview.png)
### Indexing our data
There are two ways to get data into Azure Search. The easiest is to make use of the automatic indexers. With the indexers, we're able to point Azure Search to our database and have it on a schedule look for new data. This can lead to situations where the database and search index are out-of-sync so be wary of using this approach in production. Instead, you should manage the search index manually using the lovely SDKs provided.
For ease of use, we'll make use of the Indexers to get some data quickly into our index.
To do this, click the "Import Data" button in the top toolbar.
![Azure Search Import Data](Assets/ImportData.png)
![Azure Search Connect To Data](Assets/ConnectToDataDefault.png)
We already have our database deployed, so we can select "Cosmos DB" and then click "Select an account".
![Azure Search New Data Source](Assets/NewDataSourceFilledIn.png)
Once you've selected your Cosmos DB account, you should be able to use the drop-downs to select which database and collections you wish to import from. We'll be picking "Jobs".
![Azure Search Create Index](Assets/CreatingJobsIndex.png)
![Azure Search Create Index](Assets/CreateJobIndexBasic.png)
**Important Note**
The Index name must be set to "job-index", because it is referred to by name in the mobile application.
We need to configure what data we wish to send back down to the device with a search query as well as which properties we'll use to search. The Index is difficult to modify (apart from adding new fields) after we've created it, so its always worth double checking the values.
**Important**
You need to create a _suggester_ called 'suggestions'. This is referred to by the _search_ API which we're writing. To do this, tick the 'suggester' box and enter 'suggestions' as its name. Then you also need to mark at least one field as being part of the suggester. We suggest(!) that the _Name_ and _Details_ fields are marked as such.
Note that the screenshot above is slightly out of date, and the _Suggester_ is now presented as a checkbox on the main screen, rather than another tab. Also note that at the moment the Suggester details aren't visible in the index once you've created (this is a shortcoming of the current Azure portal).
Once you've completed this setup, click "Create".
![Azure Search Create Updates](Assets/IndexerSchedule.png)
You can now set the frequenancy at which Azure Search will look for new data. I recommend for this demo setting it to be 5 minutes. We can do this by selecting "custom".
![Azure Search Customer Timer](Assets/CustomTimer.png)
We also want to track deletions, so go ahead and check the tickbox and select the 'isDelete' item from the drop-down menu and set the marker value to "true".
You're now ready to click "OK" which will create the indexer and importer for you.
![Azure Search Indexers List](Assets/Indexers.png)
Click on the "indexers" item within the overview blade.
![Azure Search Run Indexer](Assets/RunIndex.png)
We can now run the indexer for the first time. Once its completed, navigate back to the Azure Search Overview and click on "Search Explorer". We can now confirm if the search service is working as expected.
![Azure Search Explorer](Assets/SearchExplorer.png)
# Next Steps
[API Management](../07%20API%20Management)

Просмотреть файл

@ -0,0 +1,203 @@
![Banner](Assets/Banner.png)
# Smart Image Resizing with Azure Functions and Cognitive Services
We have come to a point where our backend has grown to a pretty solid state so let's do some of the more advanced stuff and add some intelligence to it! Not every developer has a background in Machine Learning and Artificial Intelligence to we should start with something simple: **Resizing uploaded images in an intelligent way**.
You remember, users can add photos to *Jobs* and upload them through the Web API sothat they get stored in the Blob Storage. These photos are uploaded and stored in **full size**, which results in high network traffic and download times when the Mobile App is fetching them. Sometimes the App just needs a small or preview version of the photo, so it would be nice to store some smaller sizes of the photos in addition to the orginnally uploaded ones.
The problem with simple resizing of the images to a certain square resolution like 150 x 150 pixels for thumbnail icons could cut off important parts of a picture that got taken in portrait- or landscape format. This is why it is recommended to use AI to understand what is shown on a picture and crop it accordingly.
## 1. Microsoft Cognitive Services
Great resources of Intelligence Services for developers without deeper Machine Learning knowledge are [Microsoft's Cognitive Services](https://azure.microsoft.com/en-us/services/cognitive-services/). These are a set of pre-trained Machine Learning APIs across various sections like Vision, Speech or Knowledge that developer's can simply include within their applications using a REST API.
### 1.1 Computer Vision for thumbnail generation
One of these APIs is [Computer Vision](https://azure.microsoft.com/en-us/services/cognitive-services/computer-vision/), a service that tries to understand what's on a picture or video. This service can analyze pictures to generate tags and captions, detect adult or racy content, read text in images, recognizes celebrities and landmarks, detects faces and emotions and much more. You should definitely take some time to explore and play around with all these services!
![Cognitive Services Thumbnail Preview](Assets/CogServicesThumbnailPreview.png)
The perfect service for our scenario is the [Thumbnail Generation API](https://westcentralus.dev.cognitive.microsoft.com/docs/services/56f91f2d778daf23d8ec6739/operations/56f91f2e778daf14a499e1fb) which allows us to upload a picture together with a preferred size and get a cropped image back that does not miss out important parts of the original photo.
### 2.1 Get access through Azure
Microsoft Cognitive Services are hosted in various Azure Regions and can be consumed through an Azure Subscription. They are priced by transactions with numerous **free transactions** every month and a pay-per-use model for each additional request.
To add Computer Vision to our solution, enter the [Azure Portal](https://portal.azure.com) and click on the ***Create a resource*** button, find ***AI + Cognitive Services*** and select ***Computer Vision API*** to configure access to the API.
![Add Computer Vision to Azure](Assets/AddComputerVision.png)
Choose the following settings and hit the ***Create*** button to start.
- **ID:** myawesomenewstartupcognitivevision
- **Location:** Same as your Web App(or close as Cognitive Services are not available in all Regions)
- **Pricing tier:** F0
- **Resouce Group:** Use existing
Once the deployment is succeeded, you can navigate to the resource and access the API Key from the ***Keys*** section.
## 2. Azure Functions
Functions are a **Serverless** component of Microsoft Azure and abstract even more of the underlying hardware that Platform-as-a-Service (PaaS) offerings like App Service does. An Azure Functions basically just persists of a code snipped and some meta information when and how it should get executed. This code snipped sleeps until it got triggered by an event or other service, wakes up then, executes its code and falls asleep again.
This behaviour allows Microsoft to offer a [**very attractive pricing model**](https://azure.microsoft.com/en-us/pricing/details/functions/) where you only pay for pure execution time of an Azure Function. That means you only pay and Azure Function when it is actually used. If you write code that never gets executed, it won't cost you anything! The ultimate idea of cloud computing! Event better, [the first 1 million executions or 400000 GB-s are free](https://azure.microsoft.com/en-us/pricing/details/functions/)!
> **Hint:** Azure Functions are the ideal service to extend existing large backend architectures with additional functionality or to process data in the cloud. The latter is exactly what we need to do when resizing images.
Whenever a user uploads an image, he should get immediate feedback and should not have to wait for the Cognitive Services. Once the image gets dropped to the Blob Storage, the Function awakes and calls the Cognitive Service API to resize it in a smart way in the background. Next time a user fetches images, he will receive the resized versions.
We have already prepared an Azure Function so we don't need to start from scratch! In the repository, there is an Azure Function called [`ResizeImage.cs`](/Backend/Functions/ResizeImage.cs) that contains the code for our scenario.
1. Get triggered by a Storage Queue message
1. Take an image from Azure Blob Storage
1. Upload it to the Cognitive Services Computer Vision API
1. Write the resized images back to Azure Blob Storage
1. Update the Cosmos DB entry
### 2.1 Create an Azure Function
Multiple Azure Functions are hosted in a *Function App*. To create one, click the ***Create a resource*** button at the top-left corner of the [Azure Portal](https://portal.azure.com), select ***Compute*** from the categories and click on ***Function App***.
![Add Azure Functions](Assets/AddAzureFunctions.png)
Add a *Function App* to your solution using the following properties.
- **App name:** myawesomenewstartupfunctions
- **Resouce Group:** Use existing
- **OS:** Windows
- **Hosting Plan:** Consumption Plan
- **Location:** Same as your Web App
- **Storage:** Use existing (same Storage Account as created for Blob Storage)
Click the ***Create*** button and wait until Azure provisioned your Function App.
#### 2.1.1 Explore Function Apps
Once the Function App has been created, we can navigate to it and start exploring the Dashboard. There is not much to see, as we have not any Functions and the moment and the Function App just acts as a container for those.
![Explore Azure Functions](Assets/ExploreAzureFunctions.png)
There are multiple ways to add Azure Functions. One is to click the small ***+*** button next to the ***Functions*** entry in the side menu and start from scratch. You can see, that Azure Functions are suitable for different scenarios like Webhooks, Timed executions or Data processing. This basically defines when Functions should be triggered. Azure also supports different programming languages.
#### 2.1.2 Tooling
The cross-platform code editor Visual Studio Code supports Azure Functions tooling when the [Azure Functions Extension](https://marketplace.visualstudio.com/items?itemName=ms-azuretools.vscode-azurefunctions) gets added. Open the [`Backend/Functions`](/Backend/Functions/) folder from the repository in Visual Studio Code and explore the code.
![Explore Azure Functions in Visual Studio Code](Assets/VSCodeAzureFunction.png)
Let's quickly explore what has been pre-written for us here!
#### 2.1.3 Triggers
Azure Functions are based on the concept of **Triggers**, which define when a Function should wake up and execute its code. There are several different [Trigger Bindings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-triggers-bindings) that can be defined in the function's source code or configuration files. Our function uses the [Queue Storage Binding](https://docs.microsoft.com/en-us/azure/azure-functions/functions-bindings-storage-queue) as a Trigger so it wakes up whenever a new message appears in a Storage Queue.
> **Hint:** Do you remember the Storage Queue that we created earlier? Our Web API backend sends a small message with a `jobId` and `photoId` to the queue every time a new photo got uploaded. So the Function will fire up every time a photo gets uploaded.
```csharp
// Trigger
[QueueTrigger("processphotos")] PhotoProcess queueItem,
```
[View in project](/Backend/Functions/ResizeImage.cs#L22)
It listens on a Storage Queue called `processphotos` and wakes up once a new message arrives in there. Then it takes the message `PhotoProcess queueItem` and starts processing it.
#### 2.1.4 Inputs and Outputs
When an Azure Function awakes, it can fetch additional **Inputs** from multiple sources that are needed for the processing. Similar to the Triggers, these Inputs also use [Bindings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-triggers-bindings). Beside the Queue message itself that wakes our Function up, it needs two additional inputs: The uploaded photo from the Blob Storage and the *Job* document from Cosmos DB. These are also defined in the function's code.
```csharp
// Inputs
[CosmosDB("contosomaintenance", "jobs", Id = "{jobId}", ConnectionStringSetting = "CosmosDb")] Job job,
[Blob("images-large/{photoId}.jpg", FileAccess.Read)] byte[] imageLarge,
```
[View in project](/Backend/Functions/ResizeImage.cs#L25-L26)
This passes a `Job job` based with its `id` set to `{jobId}` and a `byte[] imageLarge` from `/images-large/{photoId}.jpg` to the Function. The values `{jobId}` and `{photoId}` are from our Trigger the `PhotoProcess queueItem`.
Azure Function Outputs follow the same process. As we want to write two images to our Blob Storage (a medium sized and icon sized one), we define two outputs of the same Binding type.
```csharp
// Outputs
[Blob("images-medium/{photoId}.jpg", FileAccess.Write)] Stream imageMedium,
[Blob("images-icon/{photoId}.jpg", FileAccess.Write)] Stream imageIcon,
```
[View in project](/Backend/Functions/ResizeImage.cs#L29-L30)
Both `Stream` objects get passed to the Function. The rest of the code just connects the dots.
### 2.3 Integrate with Storage, Cosmos DB and Cognitive Services
Of course, all these Trigger, Input and Output [Bindings](https://docs.microsoft.com/en-us/azure/azure-functions/functions-triggers-bindings) have to be configured. As we might be already used to from the App Service, this configuration is done via Environment Variables. Each Azure Function has a `local.settings.json` file that sets Connection Strings to the used services.
```json
{
"IsEncrypted": false,
"Values": {
"AzureWebJobsStorage": "<Storage Connection String>",
"AzureWebJobsDashboard": "<Storage Connection String>",
"CosmosDb": "<CosmosDB Connection String>",
"Ocp-Apim-Subscription-Key": "<Cognitive Services Computer Vision API Key>"
}
}
```
[View in project](/Backend/Functions/local.settings.json)
For local tests, the Environment Variables can be set in this file, when uploading the Function to Azure, we should save them in the Function App's Application Settings. Navigate to the ***Function App*** in the [Azure Portal](https://portal.azure.com), open the ***Application Settings*** and add the Keyes.
![Set Function Application Settings](Assets/SetFunctionApplicationSettings.png)
Add the settings like the following - getting the values from the relevant sections of your previously created Azure resources.
- **AzureWebJobsDashboard:** *Key 1 Connection String* from the Storage Account ***Access keys*** section (should be already set)
- **AzureWebJobsStorage:** *Key 1 Connection String* from the Storage Account ***Access keys*** section (should be already set)
- **CosmosDB:** *Primary Connection String* from the Cosmos DB ***Keys*** section
- **Ocp-Apim-Subscription-Key:** *Key 1* from the Cognitive Service ***Keys*** section
Scroll up and click ***Save*** to set the Environment Variables for the Function App.
### 2.6 Deploy to Azure
Similar to the ASP.NET Core Web API project, we also need to compile the Azure Function code into an executable, before we can upload it to the cloud. For this, right-click the `Functions` folder in Visual Studio Code and select ***Open in Terminal / Command Line***.
The Terminal window in Visual Studio Code pops up and we can enter the command to compile the application.
```bash
dotnet build
```
The output should look like this and we should see the **Build succeeded** message.
![Build an Azure Function in Visual Studio Code](Assets/VSCodeAzureFunctionBuild.png)
Again, building (compiling) the code generated two more folders for us: `/bin` and `/obj`. Here we can find executable files that we can upload to the cloud. As an Azure Function only constists of .NET code, no `dotnet publish` is needed for it.
Inside our `Functions` folder, we should now find a `bin/Debug/netstandard2.0` folder that contains our ready-to-run backend logic. Now you can simply right-click this `netstandard2.0` folder and select ***Deploy to Function App***.
![Deploy an Azure Function in Visual Studio Code](Assets/VSCodeAzureFunctionDeploy.png)
Follow the process of selecting a Subscription and Function App to publish to, confirm the publish process.
> **Hint:** Visual Studio Code might ask you if you want to override the Remote Runtime. This is because at the time of the creation of this workshop, the Azure Functions Runtime `~1` is set as default when creating a new function app. As we are already using the `beta` version of Runtime 2 in this workshop, we need to update this in Azure, too.
> **Hint:** Sometimes, Visual Studio Code fails to load Subscriptions for your account when publishing. To fix this, go back to the ***Azure*** tab in Visual Studio Code and refresh the list of Subscriptions. Now start the Publish process again.
### 2.7 Test your Azure Function
Back in the [Azure Portal](https://portal.azure.com), we can monitor how our Azure Function behaves. For this, select the ***Monitor*** tab beneth your Function.
For each execution, we can check *Status* and *Execution Time* as well as additional details like *Logs* and *Parameters* when clickinng on a single execution.
![Check Function Success in the Azure Portal](Assets/CheckFunctionSuccess.png)
To check if the Function is runninng as expected, we can trigger it by uploading another image. So let's open [Postman](https://www.getpostman.com/) again and upload other image with using the API ([like we did before](../04%20Data&20Storage#24-test-the-photo-upload)).
Once the photo has been uploaded, we should see a successful execution of the Azure Function in the Portal and two new resized pictures in the Blob Storage.
---
# Next Steps
[Mobile Options](../08%20Mobile%20Overview/README.md)

Просмотреть файл

@ -0,0 +1,173 @@
# API Management
Azure API Management is a turnkey solution for publishing APIs for external and internal consumption. It allows for the quick creation of consistent and modern API gateways for existing or new backend services hosted anywhere, enabling security and protection of the APIs from abuse and overuse. We like to think of API Management as businesses digital transformation hub as it empowers organisations to scale developer onboarding as well as monitoring the health of services.
![Highlighted Architecture Diagram](Assets/HighlightedArchitecture.png)
#### Why API Management
We'll be using API Management in today's workshop to act as both a gateway our Azure Resources and as a source of documentation about what features we've made available to consumers of our services.
- Package and publish APIs to developers and partners
- Onboard developers via self-service portal
- Ramp-up developers with documentation, samples and API console
- Manage and control API access and protect them from abuse
- Monetize APIs or parts of it
- Bundle multiple APIs together
- Add new capabilities to existing APIs, such as respone caching
- Insights of usage and health from analytics reports
#### What does it cost
[Azure API Management Pricing](https://azure.microsoft.com/en-us/pricing/details/api-management/)
![Developer Portal](Assets/DeveloperPortal.png)
You can find a our API Management portal running [here](https://contosomaintenance.portal.azure-api.net/)
### Exploring APIs
You can explore APIs with API Management and even get automatically generated snippets in a variety of languages which demonstrate whats required to interact with the Azure services.
![Developer Portal showing the Get Job API](Assets/DeveloperPortalApiView.png)
## 1. Deploying API Management
Let's head over to our Resource Group again and hit the ***Add*** button again.
![Search for API Management](Assets/SearchForApiManagement.png)
![Search for API Management](Assets/ApiManagmentSearchResults.png)
Select the ***API Management*** result. You'll then navigate to the Creation blade.
![Search for API Management](Assets/ApiManagementFillInfo.png)
Choose the following settings and hit the Create button to start provisioning the API Management instance.
- **Name:** myawesomeneapi
- **Resouce Group:** Use existing
- **Location:** Same as your Web App
- **Organization Name:** The name of your business (it'll appear in the portals).
- **Administrator Email:** Set this to yourself
- **Pricing Tier:** You can select *Developer* for this workshop.
![Search for API Management](Assets/DeploymentProgress.png)
API Management can take about 20 minutes to deploy so now might be a good time to take a quick break if you need it. If you wish to monitor the deployment progress, you can click on the right bell icon.
It's worth checking that the service is active after deployment as this can take a couple more minutes.
![Search for API Management](Assets/ActivatingService.png)
## 2. Understanding our usage
We're using API Management as our access layer, routing all HTTP requests to our backend through it. You can see this below in this basic diagram (it's not the entire architecture, but more of a high-level overview).
![Search for API Management](Assets/RequestFlow.png)
If we imagine the flow for searching jobs. Our request leaves the phone, hits our API Management, which will route it to the nearest instance of our backend. The backend that takes the request and routes it to the correct controller, which has the implementation for interacting with Azure Search.
## 3. Configuring API Management
Once API has finished its deployment process, we can start to configure it for interacting with the App Service instance we deployed earlier.
![Search for API Management](Assets/Deployed.png)
### 3.1 Implementing Operations
We need to define our operations for the API Management. We have already deployed our backend so we should be in a position to hook up to the App Service instance and consume real data. It's worth keeping in mind that its possible to send Mock responses back from API Management, which can help in the development of large solutions.
To kick off, we'll create the Parts API manually, and then for the rest of the APIs, we'll use pre-built OpenAPI Specifications to automagically configure API Management. I've deleted the default Echo API from the API list as we won't be needing this. It's entirely up to you if you want to do this as well (it wont affect your project).
#### 3.1.1 Parts
Parts is one of the easiest APIs to implement within the project as we'll only be requesting an array of parts from our backend. We don't-have any variables within our queries or other elements that could complicate the request.
![Search for API Management](Assets/AddAPIPartsFirstStep.png)
![Search for API Management](Assets/CreateBlankAPI.png)
Click on the ***Add API*** Button and select ***Blank API***.
![Search for API Management](Assets/AddingPartsAPI.png)
We can then provide a few details about our API.
- **Display Name:** This name is displayed in the Developer portal.
- **Name:** Provides a unique name for the API.
- **Description:** Description of the API
- **Web Service URL:** The URL where we'll be sending these requests.
- **URL Scheme** Determines which protocols can be used to access the API.
- **API URL Suffix:** The suffix is appended to the base URL for the API management service. API Management distinguishes APIs by their suffix, and therefore the suffix must be unique for every API for a given publisher.
- **Tags:** Tags enable the organization of large lists – both regarding management and presentation on the developer portal.
- **Products:** Publish the API by associating the API with a product. To optionally add this new API to a product, type the product name. This step can be repeated multiple times to add the API to multiple products.
- **Version This API:** Would you like to version the API?
![Search for API Management](Assets/NewAPIPartsComplete.png)
Once we click ***Create***, we'll be able to add our single REST operation.
![Search for API Management](Assets/EmptyPartsAPI.png)
We can then click on ***Add Operation***.
![Search for API Management](Assets/CreatePartsGETOperation.png)
By default, operations will be set configured for GET Requests, but we can change this using the drop-down menu.
- **HTTP Verb:** You can choose from one of the predefined HTTP verbs.
- **URL:** A URL path for the API.
- **Display Name:**
- **Description:** Describe the operation that is used to provide documentation to the developers using this API in the Developer portal.
- **Tags:** Tags enable the organisation of large lists – both regarding management and presentation on the developer portal.
![Search for API Management](Assets/PartsOperatoinsTypeDropdown.png)
You can see all the HTTP Verbs we can use within API Management Operations.
![Search for API Management](Assets/PartsOperationFilledIn.png)
We can go ahead and click ***Save***, and API Management is ready for testing.
![Search for API Management](Assets/PartsTest.png)
#### 3.1.2 Jobs
To save time, we've produced API specifications for you to use with this project which allow API Management to automatically configure your APIs and operations. You can find these in the [`/Swagger`](/Assets/Swagger/) folder above.
You'll want to edit these on [Line 7](https://github.com/MikeCodesDotNet/Mobile-Cloud-Workshop/blob/cae3c1e5366a78170eb217f897b7d4398f7bfd32/Walkthrough%20Guide/05_API_Management/Assets/Swagger/Jobs.swagger.json#L7) to point to your App Service instance rather than ours.
![Search for API Management](Assets/EditingSwaggerDEf.png)
Change Line 7 to your service and save.
![Search for API Management](Assets/CreateFromOpenAPISpec.png)
We're now ready to create a new API from the Jobs spec found in the Swagger folder. To do this, click on the ***OpenAPI Specification*** options.
![Search for API Management](Assets/CreateFromOpenAPISpecEmpty.png)
We can click on ***Select a File*** and pick the Jobs Swagger file.
![Search for API Management](Assets/CreateAPIAutoFilled.png)
This will fill in most of the information for you, but you should make sure to add the following:
- **API Url Suffix:** `job` (this must be lowercase and do not pluralise it)
- **Products:** Starter & Unlimited.
You're now ready to click ***Create***. The API Spec contains all the operations we'll need for interacting with the Jobs Controller in the ASP.NET Project.
![Search for API Management](Assets/JobsCreated.png)
#### 3.1.3 Search
We can now repeat the same process for Search, but this time we'll want to make sure we set the **API Url Suffix:** to `search`.
![Search for API Management](Assets/AddingSearch.png)
#### 3.1.4 Photos
> **Warning:** We're currently experiencing an issue with our implementation of Photo upload. Please bear with us while we resolve this.
# Next Steps
[Functions & Cognitive Services](../06%20Functions%20and%20Cognitive%20Services)

Просмотреть файл

@ -0,0 +1,69 @@
![Banner](Assets/Banner.png)
# 1. Mobile Overview
The mobile app currently runs on both iOS and Android devices using Xamarin.Forms. Although UWP, macOS and Linux support should technically also work, they're outside the scope of today's learnings.
![iPhone App Design](Assets/AppDesign.png)
### 2.1 Development SDK
The apps have been developed with [Xamarin.Forms](https://github.com/xamarin/Xamarin.Forms) targetting .NET Standard 2.0. You should find all your favourite .NET libraries will work with both the backend (also targeting .NET Standard 2.0) and the mobile apps.
Using Xamarin.Forms makes it possible for us to write our app just once using C# and XAML and have it run natively on a variety of platforms. This is achieved as it's an abstraction API built on top of Xamarin's traditional mobile development SDKs. Looking at the architecture below, you can see that with traditional Xamarin we can achieve up to 75% code reuse through sharing the business logic of our app.
Before we jump into Xamarin.Forms in any depth let take a moment to understand the underlying technology and how this works.
![Xamarin Styles](Assets/XamarinArchitectures.png)
#### Traditional Xamarin
Traditional Xamarin is a one-to-one mapping of every single API available to Objective-C and Java developers for C# developers to consume. If you're familiar with Platform Invokation, then you'll already be familiar with the core concepts of how Xamarin works. It's this one-to-one mapping that is the platform-specific element of a Xamarin app. It's not possible to share the UI layer from iOS to Android when developing with Traditional Xamarin as you won't find iOS APIs such as UIKit as part of the Android SDK. This means that our user interface is unique for the platform and we can create the amazing user experience our users expect from mobile apps.
Where we can share code is the business logic or 'boring bits' of the application. As a general rule, if you writing code that is using only the Base Class Library (BCL) then you should be a great position to reuse this code as the Share C# Core of your app. If you've got existing .NET libraries that you'd like to analyze, then you should install the [.NET Portability Analyzer](https://marketplace.visualstudio.com/items?itemName=ConnieYau.NETPortabilityAnalyzer).
Traditional Xamarin apps perform exceptionally well compared to their 'native native' counterparts, with some benchmarks showing a notable performance increase when picking Xamarin over the 'native native' approach.
One concern we hear from potential users of Xamarin is taking on a large dependency like the Mono runtime in their app. Its worth understanding that our build process does much to reduce the size of our final binary. When building any Xamarin app for release, we make use of a Linker to remove any unused code, including the Mono Runtime and your code. This significantly reduces the size of the app from Debug to Release.
You should consider Traditional Xamarin when you care about code-reuse but not as much as customisation. It's also a great fit if you've experienced with Objective-C, Swift or Java in a mobile context but wish to leverage an existing .NET code base.
#### Xamarin.Forms
Xamarin.Forms is an open-source, cross-platform development library for building native apps for iOS, Android, Mac, Windows, Linux and more. By picking Xamarin.Forms, we're able to reuse our previous experience with Silverlight, WPF and UWP development to target a variety of new platforms. It being an abstraction over Traditional Xamarin means that it still produces 100% native apps that using the same build process but we can write our code in a .NET Standard library to be shared across multiple platforms.
Xamarin.Forms is a fantastic technology for building mobile apps if you've previous experience with MVVM, WPF or Silverlight. It focuses on code-reuse over customisation, but that doesn't limit us from dropping down into platform specific APIs when we want to add deeper integrations to the underlying platforms.
Xamarin.Forms come with 24 controls out of the box, which map directly to their native type. For example, a Xamarin.Forms Button will create a Widget.Button on Android and UIKit.UIButton on iOS. Forms provide a consistent API across all the platforms it supports. This allows us to ensure that functionality we call on iOS will behave the same on Android.
Forms is a great way to leverage existing C# and .NET knowledge to build apps for platforms you may have historically considered not .NET compatible.
We're using C#, though we could have picked F#. We opted not to, given we're terrible F# developers and we want you to learn something today. If you fancy learning a little bit of information on functional programming with Forms, then check out the [blog post](http://www.charlespetzold.com/blog/2015/10/Writing-Xamarin-Forms-Apps-in-FSharp.html) by Charles Petzold (author of the [Xamarin.Forms ebook](https://developer.xamarin.com/guides/xamarin-forms/creating-mobile-apps-xamarin-forms/)), who writes a guide on how to get started.
### 2.2 Mvvm with FreshMvvm
We opted to use [FreshMvvm](https://github.com/rid00z/FreshMvvm) as our MVVM library due to its small size and flexibility. It's specifically designed for Xamarin.Forms and offers lots of helpful extensions to navigation which we make full use of.
### 2.3 Architecture
We've tried to keep the platform-specific code to a minimum with the development of this app. This is because we wanted you to see how its possible to create pleasant user experiences while maximising code reuse.
#### Core Project
The core project contains our app's Pages (Views), ViewModels, Models and network services.
As we're using the MVVM architecture, we have a clear separation of concerns within our app.
#### 3rd Party Packages*
* CarouselView.FormsPlugin
* Airbnb Lottie
* Corcav.Behaviors
* FormsToolkit
* FreshMvvm
* Humnizer
* Refit
* MvvmHelpers
* SkiaSharp.Svg
* Plugin.Media
* Plugin.ImageCircle
* Plugin.Settings
* FFImageLoading.Forms
* FFImageLoad.Transformations
---
# Next Steps
[Mobile Network Services](../09%20Mobile%20Network%20Services/README.md)

Просмотреть файл

@ -0,0 +1,168 @@
![Banner](Assets/Banner.png)
# Mobile App Network Services
Our mobile app connects to our Azure API Management sending HTTP requests to remote services to request resources. The implementation within this demo is very lightweight and designed for use in a POC rather than a production app. If youd like to see a more resilient approach to building networking services then check out the “resilient networking” branch. Here weve implemented a [data caching and a request retry policy](https://github.com/MikeCodesDotNet/Mobile-Cloud-Workshop/blob/b4833120d9ceb70abb8753581f133f3467665edd/Mobile/ContosoFieldService.Core/Services/JobsAPIService.cs#L45), which exponentially delays retry attempts. Well cover this in more detail later, but for our standard app, were using an MVP approach.
We separate out each API from the API management service that well be interacting with. In this case, youll see the following directory structure in the [Xamarin.Forms main shared library.](https://github.com/MikeCodesDotNet/Mobile-Cloud-Workshop/tree/master/Mobile/ContosoFieldService.Core)
### Structure
* [JobsAPIService.cs](https://github.com/MikeCodesDotNet/Mobile-Cloud-Workshop/blob/master/Mobile/ContosoFieldService.Core/Services/JobsAPIService.cs)
* _IJobServiceAPI_
* _JobsAPIService_
* [PartsAPIService.cs](https://github.com/MikeCodesDotNet/Mobile-Cloud-Workshop/blob/master/Mobile/ContosoFieldService.Core/Services/PartsAPIService.cs)
* _IPartsServiceAPI_
* _PartsAPIService._
* [PhotoAPIService.cs](https://github.com/MikeCodesDotNet/Mobile-Cloud-Workshop/blob/master/Mobile/ContosoFieldService.Core/Services/PhotoAPIService.cs)
* _IPhotoServiceAPI_
* _PhotoAPIService_
Each file contains two classes (we know this is bad practice, but keep with us 😏), where you can easily see how weve abstracted away our REST calls using a 3rd party package.
## Refit
Refit is a REST library for .NET developers to easily interact with remote APIs . It make heavy usage of generics and abstractions to minimises the amount of boiler-plate code required to make http requests.
It requires us to define our REST API calls as a C# Interface which is then used with a HTTPClient to handle all the requests"
#### Security
Because were using Azure API Management, we have the ability to restrict access to our APIs through the use of API Keys. With a unique API key, were able to confirm that this app is allowed access to our services. Well want to ensure we add the API key to all our requests and Refit makes this super easy! We just need to add the _Headers_ attribute to our interface. Here we grab our API key from the constants class.
```cs
[Headers(Helpers.Constants.ApiManagementKey)]`
public interface IJobServiceAPI`
{
}
```
To define a request, were again going to use attributes. Take GetJobs for example
```cs
[Get("/job/")]
Task<List<Job>> GetJobs();
```
Adding fields is easy
```cs
[Get("/job/{id}/")]
Task<Job> GetJobById(string id);
//And we can do lots more.
[Post("/job/")]
Task<Job> CreateJob([Body] Job job);
[Delete("/job/{id}/")]
Task<Job> DeleteJob(string id);
[Put("/job/{id}/")]
Task<Job> UpdateJob(string id, [Body] Job job);
```
#### Using the service Interface
Our service implementation is pretty straight forward. We create a class to handle the service implementation. Well stub out the methods to map closely to our interface.
```cs
public async Task<Job> CreateJobAsync(Job job)
{
}
public async Task<List<Job>> GetJobsAsync()
{
}
public async Task<Job> GetJobByIdAsync(string id)
{
}
public async Task<Job> DeleteJobByIdAsync(string id)
{
}
public async Task<Job> UpdateJob(Job job)
{
}
```
#### Implementations
As I mentioned, were implementing a basic service layer so our methods need only be a couple of lines of code.
**Basic**
```cs
public async Task<Job> CreateJobAsync(Job job)
{
var contosoMaintenanceApi = RestService.For<IJobServiceAPI>(Helpers.Constants.BaseUrl);
return await contosoMaintenanceApi.CreateJob(job);
}
//Get by ID
public async Task<Job> GetJobByIdAsync(string id)
{
var contosoMaintenanceApi = RestService.For<IJobServiceAPI>(Helpers.Constants.BaseUrl);
return await contosoMaintenanceApi.GetJobById(id);
}
```
**Resilient**
To build a service layer that is resilient to network outages or poor connectivity, we would want to grab a few extra packages. The first being the Xamarin Connectivity Plugin. This allows us to query what our network connectivity looks like before we decide how to process a request for data. We may want to return a cached copy if its still valid and weve poor connectivity. Alternatively we may want to do a remote fetch and save the response for next time. To help combat against poor connectivity, we also use Polly to handle timeouts and retry logic. You can see in the example below, we will try 5 times before giving up.
```cs
public async Task<List<Job>> GetJobsAsync()
{
var key = "Jobs";
Handle online/offline scenario
if (!CrossConnectivity.Current.IsConnected && Barrel.Current.Exists(key))
{
//If no connectivity, we'll return the cached jobs list.
return Barrel.Current.Get<List<Job>>(key);
}
//If the data isn't too old, we'll go ahead and return it rather than call the backend again.
if (!Barrel.Current.IsExpired(key) && Barrel.Current.Exists(key))
{
return Barrel.Current.Get<List<Job>>(key);
}
//Create an instance of the Refit RestService for the job interface.
var contosoMaintenanceApi = RestService.For<IJobServiceAPI>(Helpers.Constants.BaseUrl);
//Use Polly to handle retrying (helps with bad connectivity)
var jobs = await Policy
.Handle<WebException>()
.Or<HttpRequestException>()
.Or<TimeoutException>()
.WaitAndRetryAsync
(
retryCount: 5,
sleepDurationProvider: retryAttempt => TimeSpan.FromSeconds(Math.Pow(2, retryAttempt))
).ExecuteAsync(async () => await contosoMaintenanceApi.GetJobs());
//Save jobs into the cache
Barrel.Current.Add(key: key, data: jobs, expireIn: TimeSpan.FromSeconds(5));
return jobs;
}
```
---
# Next Steps
[App Center](../12%20Anayltics/README.md)

Просмотреть файл

@ -0,0 +1,173 @@
![Banner](Assets/Banner.png)
Creating intelligent infused apps is now the norm to stay current and competitive. Microsoft offers a wide variety of AI platforms that can be consumed through any device.
Bots are a fantastic channel to deliver intelligent experience. Contoso Maintenance Bot offers a conversational bot that integrates with Azure Search to retrieve relevant jobs from CosmosDB. The bot uses Microsofts Bot Framework with LUIS (Language Understanding Intelligent Service).
Creating an intelligent bot for Contoso Maintenance is a simple 4 steps process. First the LUIS model, the bot app, the bot backend and finally the mobile integration.
## 1. LUIS (Language Understanding Intelligent Service)
LUIS enables you to integrate natural language understanding into your chatbot or other application without having to create the complex part of machine learning models. Instead, you get to focus on your own application's logic and let LUIS do the heavy lifting.
Starting with the intelligence part of the bot, LUIS, you can start by creating your model at https://www.luis.ai/ (or https://eu.luis.ai/ if you intend to host your bot in European data centers). There you will find a link to sign up along with abundant information to get you started.
![LUIS Welcome Page](Assets/LUISWelcome.png)
A typical LUIS app goes through the following three steps: build, train and publish.
### 1.1 Design and building
Start by creating new LUIS app (after signing up or in) by inputting a simple dialogue:
![LUIS New App](Assets/LUISNewApp.png)
After creating your app (or opening an existing app) make sure that (Build) tab is selected in your app.
![LUIS Build](Assets/Intents.png)
Lets get out of the ways a few terms that you need to be familiar with in LUIS:
***Intents***
Intents are how LUIS determines what a user wants to do. If your client application is a travel agency, then you will need the intents "ListJobs" and "RentHotelRoom" in order to identify when your users want to perform these different tasks. See Intents in LUIS for more detail.
***Utterances***
An utterance is a textual input that LUIS will interpret. LUIS first uses example utterances that you add an intent to teach itself how to evaluate the variety of utterances that users will input. See Utterances in LUIS for more detail.
***Entities***
You can think of entities like variables in algebra; it will capture and pass relevant information to your client app. In the utterance, "I want to buy a ticket to Seattle", you would want to capture the city name, Seattle, with the entity, like destination_city. Now LUIS will see the utterance as, "I want to buy a ticket to {destination_city}". This information can now be passed on to your client application and used to complete a task. See Entities in LUIS for more detail.
Now lets start by creating a new intent, in our case “greeting” intent. Next is writing as many Utterance as you need to represent a user greeting:
![LUIS Utterance](Assets/GreetingUtterance.png)
Greeting intent is easy in our case; just we want to respond to this by saying “welcome, this is what I can do…”
You can include a cancel intent to indicate that user does not wish to proceed or to disregard their request (in our case we are not using one).
Moving on with a more complex intent that utilises entities, “services.listjobs” intent. Below is a screenshot of the suggested completed one:
![ListJobs Intent](Assets/ListJobsIntent.png)
When creating intents that have entities that you would like to extract, you need to add them to the entities tab (on the right).
Usually, I start by writing few utterances to gauge my entities if I have no clue about where to start. You can check if the entities are detected correctly or not by monitoring the blue boxes in your utterances.
Entities support multiple types based on its nature. In ContosoMaintenance we used List types only.
![Entities Overview](Assets/EntitiesOverview.png)
![Job Status Entity](Assets/StatusEntity.png)
![Job Type Entity](Assets/TypeEntity.png)
### 1.2 Train and test
After updating the entities or updating any of the utterances, you need to re-train your model which indicated by a red bulb in the train button:
![LUIS Model Training](Assets/TrainButton.png)
Click train often after completing a set of changes. Also, you need to do this before trying to test your model.
You can access the test by clicking on the blue test button:
![Model Testing](Assets/Testing.png)
Before moving to the next step, just keep in mind that LUIS builder can help you get started with intent creation through “Add pre-built domain intent” which will generate an intent based on a predefined template.
### 1.3 Publish & improve
After you are satisfied with LUIS model (you should be proud 😊), it is time to publish it to the world through the Publish tab:
![Model Publishing](Assets/PublishMain.png)
You will notice that under your relevant region you will have an initial starter key at the bottom of the publish page. This key includes low bandwidth that is suitable only for basic testing.
To get a production grade key, you need to go to [Azure Portal](https://portal.azure.com) and issue a new cognitive service key.
Under New -> AI + Cognitive Services you will find Azure marketplace services for provisioning a new LUIS key that can be used in production.
![New Azure Cognitive Service](Assets/AzureNewCognitiveService.png)
Enter the service details and click create.
![New Azure Cognitive Service Dialog](Assets/AzureNewCognitiveServiceDialog.png)
Once it is provisioned, you can find your keys by navigating to it:
![New Azure Cognitive Service Keys](Assets/AzureLUISKeys.png)
Copy your primary key and add it to your LUIS app on the builder website (https://www.luis.ai or https://eu.luis.ai).
> **Hint:** It may take several mins for your new keys to be accessible. Please wait a bit before start using them.
Although you are ready for prime time, you will probably go back to your model and introduce improvements and adjustments on a regular basis to make sure to continue to present the best value to your bot users.
## 2. Chat Bot
Now we have our brain behind our Bot good to go; it is time to think about the bot itself. So bots in a general terms are automation software. This means they are essentially stupid 😊. What makes a bot smart or not, are the actual services that it automates the communication to and from.
What you need to have your bot up and running with basic functionality is a bot app and a bot backend.
### 2.1 Azure Bot Service
Azure Bot Service allows you to build, connect, deploy, and manage intelligent bots to naturally interact with your users on a website, app, Cortana, Microsoft Teams, Skype, Slack, Facebook Messenger, and more. Get started quick with a complete bot building environment, all while only paying for what you use.
It also speeds up development by providing an integrated environment that's purpose-built for bot development with the Microsoft Bot Framework connectors and BotBuilder SDKs. Developers can get started in seconds with out-of-the-box templates for scenarios including basic, form, language understanding, question and answer, and proactive bots.
You can start by creating a bot service on Azure Portal by selecting New -> AI + Cognitive Services -> Web App Bot. This will create all the needed resources to run your bot as fast as possible.
![Bot Creation](Assets/BotCreation.png)
By selecting Language understanding as the startup code template, it will provide the service and deploy source code with bot framework integrated and starter LUIS integration.
> **Hint:** You can then download the source code of the provisioned bot through the FTP URL provided in the bot Web App service.
Below is the architecture of the bot components.
![Bot Architecture](Assets/BotArchitecture.png)
### 2.2 Bot Web App Backend
Now you have a bot service that is ready for your development input. Bot backend is located here Mobile-Cloud-Workshop/Backend/BotBackend/ in the git repo. Open the solution in Visual Studio 2017 (Community edition will work as well).
> **Hint:** As we develop this project, Bot framework didnt fully support .NET Core. This meant that we couldnt develop it on a Mac as we needed the full .NET Framework library to leverage all the features of the Bot Framework. Bot team is working on releasing a full .NET Core support soon.
After starting working with the solution, the first thing is to update the settings in the web.config section below with your keys:
```xml
<appSettings>
<!-- update these with your Microsoft App Id and your Microsoft App Password-->
<add key="MicrosoftAppId" value="YOURS" />
<add key="MicrosoftAppPassword" value="YOURS" />
<add key="SearchName" value="YOURS" />
<add key="IndexName" value="YOURS-index" />
<add key="SearchKey" value="YOURS" />
<add key="LuisAppId" value="YOURS" />
<add key="LuisAPIKey" value="YOURS" />
<add key="LuisAPIHostName" value="YOURS" />
<add key="AzureWebJobsStorage" value="YOURS" />
<add key="BotCardsBlobStorageURL" value="YOURS" />
</appSettings>
```
After updating the keys, you can publish the project to Azure directly from Visual Studio publish options (right-click the project -> Publish). You can connect directly to Azure using your credentials or Import Profile (you can get the publishing provide from the bot web app overview window -> Get publish profile)
You are done! Congratulations!
Now to test the actual bot implementation and code you can open your bot service from Azure and click on the blade says “Test in Web Chat”
![Bot Testing](Assets/AzureBotTesting.png)
> **Hint:** As a recommended practice, you should remove all of your secrets from web.conig and put them inside the “App Settings” blade on Azure Web App service. This way you avoid checking in your secrets in source control.
## 3. Integration with Mobile App
So now after you have built, tested and deployed your bot you can easily integrate in a Mobile App through a simple WebView screen. Just find your Web channel bot URL and included in your app.
![Bot URL](Assets/AzureBotWebUrl.png)
You can reach the Web channel configuration page from the "Channels" blade in your Azure Bot Service instance.
> **Hint:** To have more control on the bot interactions and improve user experience, it is recommended to replace the WebView approach with a more solid native experience. This is done through using configuring and using “Direct Channel” on your bot. Direct channel is about using pure APIs to communicate with the bot. Refere back to Bot Framework documentation for more inforamtion
---
# Next Steps
[Authentication](../11%20Authentication/README.md)

Просмотреть файл

@ -0,0 +1,363 @@
![Banner](Assets/Banner.png)
# Authentication
Adding Authentication to our app and backend is a little outside of the scope of today's workshop due to time constraints. We believe Authentication is an important enough topic that we've opted to include a guide for you to get an understanding of the key concepts required to implement any Identity Provider into your projects. For that, we chose [Azure Active Directory B2C](https://azure.microsoft.com/services/active-directory-b2c/) to manage users and authentication as our service of choice.
> **Hint:** The Mobile App uses the [OAuth 2.0 Implicit Authentication flow](https://oauth.net/2/grant-types/implicit/), which shows the user an Web Browser windows instead of native Textboxes for entering username and password. This adds security as users don't have to trust the app developer to store and hanlde their passwords securely.
>
> Although Azure ADB2C also supports a [native login with resource owner password credentials flow (ROPC)](https://docs.microsoft.com/en-us/azure/active-directory-b2c/configure-ropc?WT.mc_id=b2c-twitter-masoucou), it is [not recommended from a security perspective](https://www.scottbrady91.com/OAuth/Why-the-Resource-Owner-Password-Credentials-Grant-Type-is-not-Authentication-nor-Suitable-for-Modern-Applications).
#### Why Azure Active Directory B2C
For our business driven application, Azure Active Directory B2C (short: Azure ADB2C) is a perfect match because of the following reasons:
- Super simple or super custom authentication flow
- Supports open standards to integrate with all technology stacks
- Support for Social Authentication Providers (like Facebook)
- Scale to hundreds of millions of customers
- Use a white label solution, promote your brand
- Integrates with existing Active Directories or CRM and marketing databases
#### What does it cost
The [Azure Active Directory B2C pricing](https://azure.microsoft.com/en-us/pricing/details/active-directory-b2c/) is quite simple and straightforward. The **first 50.000 users and authentication per month are free** so we basically start without any upfront costs. Once we exceed this threshold, we will pay for additional users staggered.
## 1. Create a new Azure Active Directory B2C
Creating a new Azure Active Directory Service is a bit tricky and requires some steps that can be a bit confusing so let us go through them together carefully.
### 1.1 Create a new Tenant
Browse to the [Azure Portal](https://portal.azure.com), click the ***Create a new resource*** button, search for *"Azure Active Directory B2C"* and click the ***Create*** button of the regarding blade to start the creation wizard.
![Create a new AADB2C Tenant](Assets/CreateNewAADB2C.png)
As we want to create a new Tenant, we should click on ***Create a new Azure ADB2C Tenant*** and fill in the required info.
![Fill in AADB2C Info](Assets/FillAzureADB2CInfo.png)
Click the ***Create*** button to kick-off the creation process.
### 1.2 Link the Tenant with your Azure Subscription
Once the Tenant has been created, it needs to be linked to an Azure Subscription. This step can be done in the same window by clicking on the second option ***Link an Existing Azure AD B2C Tenant to my subscription*** and selecting the recently created Tenant in the opening blade.
![Link Existing AADB2C Tenant](Assets/LinkExistingAADB2CTenant.png)
Fill in the required information and hit **Create**.
- **Azure ADB2C Tenant:** Your recently created Tenant
- **Azure ADB2C Resource name:** *Filled in automatically*
- **Resource Group:** Use existing
## 2. Configure the Active Directory
When we navigate to the B2C Tenant that we have just created, we will not see many details or configuration options. This is simply because it lives in its own Directory within Microsoft Azure. So we need to switch directories. For this, either click the ***Azure AD B2C Settings*** button or switch directories by clicking on your account in the top-right corner.
![Switch Azure Directory](Assets/SwitchAzureDirectory.png)
### 2.1 Add a new Sign-up or sign-in policy
Enabling users to log into our Active Directory or to create an Account in there by themselves is a good start. For this, we need a *Policy*. In Active Directory, Policies define how users can log in, which Authentication Providers (like Facebook) they can use and what important information is, that users have to provide.
To add a new Policy, click on ***Sign-up or sign-in policies*** in the side menu of the Azure AD B2C window and add a new Policy using the ***Add*** button at the top.
![Add Policy](Assets/AddPolicy.png)
When defining a new policy, Azure will ask you for a bunch of attributes so let's inspect them quickly to make the right choices.
#### Identity providers
The services, we want to allow users to register at and log into our application. We can select classic E-Mail signup here, where users define an E-Mail address and password or Social Login Providers like Facebook. As we don't have any social authentication configured yet, E-Mail signup will be the only selectable option at the moment. We can add other authentication providers later.
#### Sign-up attributes
We already talked about these. Here we can define, which information a user has to provide to us, when he signs up for our application for the first time.
#### Application claims
This is the information that Active Directory gives back to our application once the user logs in. We definitely want to get his **User's Object ID** but also might want to get his name or address back.
#### Multifactor authentication
If we want to enhance security through an additional authentication factor, we can enable it here.
#### Page UI customization
As you can see later, the Login UI looks pretty poor by default. Here we can change styling and add corporate CI to the login flow to let it look like a natural part of our application. For the purpose of this tutorial, we can leave it as it is.
![Configure Policy](Assets/ConfigurePolicy.png)
Create your first policy with the inputs below and confirm your selections with the ***Create*** button.
- **Name:** GenericSignUpSignIn
- **Identity providers:** Email signup
- **Sign-up attributes:** Display Name
- **Application claims:** Display Name, Email Addresses, Identity Provider, User's Object ID
- **Multifactor authentication:** Off
- **Page UI customization:** Default
## 3. Setup the Active Directory Application
Now that users can sign-up and log into our Active Directory, we need to register the application itself.
### 3.1 Create a new Application
Select the ***Applications*** menu and click the ***Add*** button from the new blade that appears.
Here you're going to give the Azure AD B2C application a name and specify whether it should contain a Web API and Native client. You want to do both, so we select ***Yes*** on both options which let a bunch of options appear.
![Add new AD Application](Assets/AddNewAdApp.png)
#### Web APP / Web API configuration
Here we configure our backend and API. The **Redirect Uri** gets called to tell a Website if a Login request was successful or not. As we don't use that, we can fill *any* URL in here. Although the **App ID URI** is optional, we need to fill it out as we need it for permissions and scopes later. It does not need to resolve to anything.
#### Native Client configuration
When configuring the native client, we should define a unique **Custom Redirect URI**. This URI is specifying a custom URL scheme that the web view which performs the sign-up and sign-in in the native app will use to communicate back to the app once the sign-up or sign-in is complete. The standard for Active Directory is `msal{APPLICATION-ID}://auth`.
Fill in all the values and register the application with the ***Create*** button.
- **Name:** Contoso Maintenance
- **Include Web App / Web API:** Yes
- **Allow implicid flow:** Yes
- **Reply URL:** `https://myawesomestartupapi.azurewebsites.net/api/login`
- **App ID URI:** `https://myawesomenewstartup.onmicrosoft.com/`**`backend`**
- **Native client:** Yes
- **Custom Redirect URI:** `msalcontosomaintenance://auth`
Once the application is registered, we can get its unique id from the overview page of the portal. We will need this a couple of times, when configuring Frontend and Backend later.
### 3.2 Define a new Scope
The idea behind scopes is to give permission to the backend resource that's being protected, so we should define a new one for the Mobile App.
![Add new Scope](Assets/AddNewScope.png)
Hit the ***Published scopes*** menu option and enter any name and description. We could call it "read_only" for example.
### 3.3 Activate API Access
Once we have a scope defined, we can enable API Access for it. Our Mobile App needs it to communicate with the Active Directory and gain Access Tokens for its users.
![Add API Access](Assets/AddAPIAccess.png)
Click the ***API Access*** menu item and add a new API for our application and select the "read_only" scope that we just created.
## 4. Connect the Web Api Backend with Azure Active Directory
Not that the Active Directory is set up, we can connect it to the Backend and introduce it as the Identity Provider of choice. As ASP.Net Core has support for authentication built-in, not much code is needed, to add Active Directory Authentication application-wide.
> **Hint:** Remember, although we use existing libraries in our Backend and Frontend projects, Azure Active Directory B2C is based on open standards such as OpenID Connect and OAuth 2.0 and can be integrated into any framework out there.
In the Backend's [`Startup.cs`](/Backend/Monolithic/Startup.cs) class, the Active Directory connection is already provided as shown below.
```csharp
public void ConfigureServices(IServiceCollection services)
{
// ...
// Add Azure Active Directory B2C Authentication
services.AddAuthentication(options =>
{
options.DefaultScheme = JwtBearerDefaults.AuthenticationScheme;
})
.AddJwtBearer(options =>
{
options.RequireHttpsMetadata = false;
options.Audience = Configuration["ActiveDirectory:ApplicationId"];
options.Events = new JwtBearerEvents
{
OnAuthenticationFailed = AuthenticationFailed
};
var authorityBase = $"https://login.microsoftonline.com/tfp/{Configuration["ActiveDirectory:Tenant"]}/";
options.Authority = $"{authorityBase}{Configuration["ActiveDirectory:SignUpSignInPolicy"]}/v2.0/";
});
// ...
}
```
[View in project](/Backend/Monolithic/Startup.cs#L47-L65)
As you can see, we use `Configuration` variables one more time to not hard code the config properties. So it takes the Azure Active Directory config out of the Environment Variables as defined in [`appsettings.json`](/Backend/Monolithic/appsettings.json).
```json
"ActiveDirectory": {
"Tenant": "",
"ApplicationId": "",
"SignUpSignInPolicy": ""
}
```
[View in project](/Backend/Monolithic/appsettings.json#L30-L34)
So let's set these variables to the correct values an head back to our App Service, open the ***Application Settings*** and add these variables here as we did before for CosmosDB and Storage.
- **`ActiveDirectory:Tenant`:** "{OUR_AD}.onmicrosoft.com"
- **`ActiveDirectory:ApplicationId`:** *{ID_OF_THE_REGISTERED_APPLICATION}*
- **`ActiveDirectory:SignUpSignInPolicy`:** B2C_1_GenericSignUpSignIn
![Add ADB2C Settings to Azure App Service Settings](Assets/AddADB2CSettings.png)
Don't forget to hit ***Save*** after you have entered all the variables.
Some of the API calls to our backend requires, that a user is authenticated to proceed. `DELETE` operations are a good example for that. The code in the [`BaseController.cs`](/Backend/Monolithic/Controllers/BaseController.cs) has an `[Authenticate]` attribute added to the Delete function. This will automatically refuse calls from unauthenticated clients. In a real-word scenario, you would also want to check if the User's ID matches the owner ID of the item that gets deleted to make sure the client has the right permissions.
```csharp
[Authorize]
[HttpDelete("{id}")]
public async Task<ActionResult> DeleteAsync(string id)
{
// Get ID of user who sends the request
var userId = User.FindFirstValue(ClaimTypes.NameIdentifier);
// TODO: Check, if the user is allowed to delete the item
// Currently left out for demo reasons
// ...
}
```
[View in project](/Backend/Monolithic/Controllers/BaseController.cs#L73-L100)
This basically means that if we fire a Delete request to the backend, without an Access Token in the Header, we will get back a **401 Unauthorized** response as shown in the Postman Screenshot below.
![Delete Call with Postman is Unauthorized](Assets/PostmanUnauthorized.png)
## 5. Configure the Mobile App
Most of the authentication code is already written in the App but let's go through the important parts quickly, to understand how everything is glued together.
Mostly, the whole process of Logging in, Logging out, Refreshing the Access Token in the background, handling the current user and so on lives in the [`AuthenticationService.cs`](/Mobile/ContosoFieldService.Core/Services/AuthenticationService.cs). Check it out, if you need more details on how Authentication is implemented on the client. It uses the [Microsoft.Identity.Client](https://www.nuget.org/packages/Microsoft.Identity.Client/) NuGet package (or MSAL) to take care of communicating to Azure AD B2C (and caching the tokens in response) for us. This removes a lot of work on our end.
The `AuthenticationService` gets configured with a set of variables in the [`Constants.cs`](/Mobile/ContosoFieldService.Core/Helpers/Constants.cs) file. As you can see, we define the recently created Scope "read_only" here.
```csharp
public static class Constants
{
// ...
// Azure Active Directory B2C
public static string Tenant = "myawesomenewstartup.onmicrosoft.com";
public static string ApplicationID = "{ID_OF_THE_REGISTERED_APPLICATION}";
public static string SignUpAndInPolicy = "B2C_1_GenericSignUpSignIn";
public static string[] Scopes = { "https://myawesomenewstartup.onmicrosoft.com/backend/read_only" };
}
```
[View in project](/Mobile/ContosoFieldService.Core/Helpers/Constants.cs#L15-L18)
### 5.1 iOS specific steps
In the iOS project, we have to edit the [`Info.plist`](/Mobile/iOS/Info.plist) file and add a URL type to define a callback URL that gets invoked when the web view is dismissed. We have configured this Callback URL earlier, when adding the Application to Active Directory and added the Native Client.
```xml
<key>CFBundleURLTypes</key>
<array>
<dict>
<key>CFBundleTypeRole</key>
<string>Editor</string>
<key>CFBundleURLName</key>
<!-- Use your Bundle identifier here -->
<string>com.contoso.contosomaintenance</string>
<key>CFBundleURLSchemes</key>
<array>
<!-- Use your Custom Redurect URI minus the ://auth -->
<string>msalcontosomaintenance</string>
</array>
</dict>
</array>
```
[View in project](/Mobile/iOS/Info.plist#L62-L74)
Then we need to override the `OpenUrl` function in the AppDelegate. It's pretty straightforward and will look like this:
```csharp
public override bool OpenUrl(UIApplication app, NSUrl url, NSDictionary options)
{
AuthenticationContinuationHelper.SetAuthenticationContinuationEventArgs(url);
return true;
}
```
[View in project](/Mobile/iOS/AppDelegate.cs#L56-L60)
The `AuthenticationContinuationHelper` is from the MSAL library, and it's there to help us coordinate the authentication flow.
### 5.2 Android specific steps
In the Android app's `MainActivity`, we need to set that `UIParent` property. That's going to be done in the `OnCreate` function and will look like this:
```csharp
// Configure Authentication
AuthenticationService.UIParent = new UIParent(Xamarin.Forms.Forms.Context as Activity);
```
[View in project](/Mobile/Droid/MainActivity.cs#L44)
This `UIParent` allows the MSAL to show the web view using the current Android activity.
Then we need to modify the `AndroidManifest.xml` file. Add this into the `<application>` element:
```xml
<activity android:name="microsoft.identity.client.BrowserTabActivity">
<intent-filter>
<action android:name="android.intent.action.VIEW" />
<category android:name="android.intent.category.DEFAULT" />
<category android:name="android.intent.category.BROWSABLE" />
<!-- Use your Custom Redurect URI minus the ://auth -->
<data android:scheme="msalmsalcontosomaintenance" android:host="auth" />
</intent-filter>
</activity>
```
[View in project](/Mobile/Droid/Properties/AndroidManifest.xml#L15-L22)
That new `<activity>` element is defining a browser window that can be opened and it's going to be used for the web view that lets users sign up or sign in to our app.
## 6. Understanding Authentication Processes
### 6.1 Login flow
For security reasons, OAuth2 dictates that User Logins have to be done via Web Views. When a user presses the Login button, the ([customizable](https://docs.microsoft.com/en-us/azure/active-directory-b2c/active-directory-b2c-reference-ui-customization)) Azure ADB2C Website pops-up and asks the user to create a new account or login with an exiting one. Once the process is finished, the Web View will redirect the results to the application.
![Login Screens](Assets/LoginScreens.png)
When a user logs in and tries to access a protected resource at the Backend that requires authentication, usually a pre-defined login flow will be executed.
![Login Screens](Assets/AuthFlow.png)
A successful login flow would look like this:
1. User opens Login window in the App
1. User logs in and gets an Access Token from the Authentication Provider
1. User sends request to Backend with Access Token
1. Backend contacts Authentication Provider to prove Token validity
1. Authentication Provider approves Token and returns User Account Details
1. Backend returns secure resources
### 6.2 Refresh Access Tokens
Access Tokens usually have a short time to live to provide additional security and let potential attackers that stole and Access Token only operate for a small time.
To avoid that the user has to login and acquire a new token every 30 minutes, the Access Token can be refreshed silently in the background. Usually, a Rrefresh Token is used for this. The Mobile App uses the ADAL library, which already provides a functionality to refresh the Access Token. Check out the [`AuthenticationService.cs`](/Mobile/ContosoFieldService.Core/Services/AuthenticationService.cs) for implementation details.
The App tries to refresh the Access Token automatically when it receives a `401 Unauthorized` response and only shows the Login UI to the user if the background refresh failed.
Check out the [Mobile Network Services](/Walkthrough%20Guide/09%20Mobile%20Network%20Services/) guide for additional details about resilient networking.
# Additional Resouces
There are several cool things you can do with Azure Active Directory, that will not be part of this workshop. If you want to go further, check out these links.
- [Add Social Authentication Providers](https://docs.microsoft.com/en-us/azure/active-directory-b2c/active-directory-b2c-setup-fb-app)
- [Customize the Login UI](https://docs.microsoft.com/en-us/azure/active-directory-b2c/active-directory-b2c-reference-ui-customization)
- [Enable Multifactor authentication](https://docs.microsoft.com/en-us/azure/active-directory-b2c/active-directory-b2c-reference-mfa)
- [Login with and existing Azure Active Directory Account](https://docs.microsoft.com/en-us/azure/active-directory-b2c/active-directory-b2c-setup-aad-custom)
- [Configure the resource owner password credentials flow (ROPC) for native login](https://docs.microsoft.com/en-us/azure/active-directory-b2c/configure-ropc)

Просмотреть файл

@ -0,0 +1,69 @@
![Banner](Assets/Banner.png)
# App Center
[App Center](https://www.visualstudio.com/app-center/) offers a rich suit of services aimed at mobile devlopers. We're going to use it today to add crash reporting, anayltics and push notifications. We also have a CI/CD workshop that'll be running which covers the build and testing elements of mobile development.
## Crash Reporting
App Center Crash Reporting lets us know when our app crashes on any device.
![Crash Reporting Overview](Assets/AppCenterCrashOverview.png)
Crashes are grouped together by similarities like the reason for the crash and where the occur in the app. It is possible to inspect each individual crash report for the last 3 months, after that a stub of 25 crashes will be kept.
![Crash Report](Assets/AppCenterCrashReport.png)
## Anayltics
App Center Analytics will help you understand more about your app users and their behavior when using your app.
![Crash Report](Assets/AppCenterAnalyticsOverview.png)
![Crash Report](Assets/AppCenterAnalyticsEvents.png)
## Push
Use App Center to easily send targeted and personalised push notifications to any mobile platform from any cloud or on-premises backend.
Push notifications is vital for consumer apps and a key compontent in increasing app engagement and usage. For enterprise apps, it can also be used to help communicate up-to-date business information. It is the best app-to-user communication because it is energy-efficient for mobile devices, flexible for the notifications senders, and available while corresponding apps are not active.
### How Push Notifications Works
Push notifications are delivered through platform-specific infrastructures called Platform Notification Systems (PNSes). They offer barebone push functionalities to delivery message to a device with a provided handle, and have no common interface. To send a notification to all customers across iOS and Android, we have to work with APNS (Apple Push Notification Service) and FCM (Firebase Cloud Messaging).
At a high level, here is how push works:
1. The client app decides it wants to receive pushes hence contacts the corresponding PNS to retrieve its unique and temporary push handle. The handle type depends on the system (e.g. WNS has URIs while APNS has tokens).
2. The client app stores this handle in the app back-end or provider.
3. To send a push notification, the app back-end contacts the PNS using the handle to target a specific client app.
4. The PNS forwards the notification to the device specified by the handle.
Thankfully for us, the App Center SDK's handle most of this for us. In our app, all we have to do is ensure we start the AppCenter SDK with Push being enabled. It'll handle the rest for us.
```
AppCenter.Start(
Helpers.Constants.AppCenterIOSKey Helpers.Constants.AppCenterUWPKey Helpers.Constants.AppCenterAndroidKey,
typeof(Analytics), typeof(Crashes), typeof(Push));
```
[View in project](https://github.com/MikeCodesDotNet/Mobile-Cloud-Workshop/blob/5f49af15eb4ad44c9d98e4ded8c32ab6edf87d8d/Mobile/ContosoFieldService.Core/App.xaml.cs#L63)
### Configuring Notifications
#### iOS
-Todo
#### Android
-Todo
### Sending Notifications
Sending push notifications is super simple. Start by clicking the "Send Notification" button in the top-right corner.
![Crash Report](Assets/AppCenterPushNotifications.png)
![Crash Report](Assets/AppCenterPushNewNotification.png)
Fill in the notification payload
![Crash Report](Assets/AppCenterNewPushNotificationTarget.png)
Who do you want to send the notification to?
![Crash Report](Assets/AppCenterPushNewNotificationReview.png)
Does this look correct?
![Crash Report](Assets/AppCenterPushAudiences.png)
---
# Next Steps
[Chat Bot](../10%20Chatbot/README.md)

Просмотреть файл

@ -0,0 +1 @@
coming soon...

Просмотреть файл

@ -0,0 +1,32 @@
# Project Spec
## Customer Overview
Contoso Maintenance is an Airline maintenance business, specializing in servicing aircraft in Northern Europe.
It currently employes >500 engineers who are responsible for ensuring the air-worthiness of the aircraft they're repairing.
The business has been slow to adapt to changes in their processes and have seen customers sign maintenance contracts with their main competitor who has completed a digital transformation process.
The business is looking to level the playing field through the building and deploying mobile apps that utilize artificial intelligence to increase engineer productivity.
## Current Process
The engineers currently receive a number of faxes every morning which contains information on the maintenance tasks required to be completed that day.
The engineer will log any notes and report back at the end of the working day (through Fax)
Spare parts are ordered on a dedicated phone line, which often has long wait times and requires a fulltime employee to be on hand to answer the call.
## Proposed Solution
The engineers should have a mobile app, installed on their personal devices in which they can use to communicate directly with backend-systems.
This will remove the reliance on paper and move everything to the cloud.
## Mobile Application
* The mobile app should display a native user interface
* The mobile app should communicate with APIs hosted in remote data centers.
* It should be possible to retrieve a list of jobs.
### Supported Platforms
* The app should run on iOS 7 and above
* The app should run on Android v4.4 and above

Просмотреть файл

@ -0,0 +1,15 @@
![Banner](Assets/Banner.png)
# Walkthrough Guide
Welcome to the Azure Mobile Workshop. Today youre going to learn how to leverage your existing skills to build highly reliable backend systems to power a modern cross platform mobile app built using Xamarin.Forms.
Weve tried to make this app as production ready as possible and provide a good foundation for if you wish to pursue development in the future. Think of this as your development starter kit. Where we think theres area for improvement within the code base, we've added explanations about how to improve them to bring them up to production standards. We've tried to pick non oppinionated frameworks to give you the flexibility to extend this app to fit your requirements.
## Prerequisites
You should have already followed the [setup guide](01%20Setup/README.md) before attending the workshop. We've scheduled an hour before things start for you to get support with configuration, but please try your best to attend with a fully functioning development enviroment.
This workshop is for intermediate to experienced C# developers who may lack either Azure or Xamarin experiance. This workshop is not for C# beginners and we expect you to know how design patterns such as Mvvm and MVC as well as having a basic understanding of the differences between developing Web APIs vs Mobile front-ends.
If you are unfamiliar with the C# language, you can get started with a free course on the Internet. The basics of the language can be learned at www.learncs.org. Once you have the language basics under your belt, you can move on to building applications with Xamarin. You should be ready to follow this guided workshop, creating both the front-end and backend to run a field service app.