Initial load of DO code for IoT Hub Device Update

This commit is contained in:
Shishir Bhat 2021-02-09 16:30:38 -08:00
Родитель ac8b59f285
Коммит 22ccf3873b
187 изменённых файлов: 14120 добавлений и 68 удалений

65
.clang-format Normal file
Просмотреть файл

@ -0,0 +1,65 @@
# $ sudo apt-get install clang-format-6.0
# Integration with tools like VSCode: https://clang.llvm.org/docs/ClangFormat.html
# Online configuration tool:
# https://zed0.co.uk/clang-format-configurator/
# This is currently disabled (via DisableFormat) because some of our conventions deviate from msft standard
# and clang-format-6.0 does not have customizations for these. The latest release (11.0)
# adds some, like BeforeLambdaBody, but not all and it is not available in a debian release yet.
# Some deviations:
# 1) Lambda header is on the same line but the body+braces are on a new line, not indented.
# 2) Header includes follow the order of do_common.h, <current_cpp's>.h, system/oss, private headers.
# This can and should be solved by leaving a space between do_common.h and <current_cpp's>.h.
# 3) A single space after class data member and before initializer open brace but not in other places.
# Revisit when there is time and see if we can either customize to our convention or get rid of the deviations.
# NOTE: Remove SortIncludes when removing DisableFormat. It is required because includes get sorted even with DisableFormat.
---
BasedOnStyle: Microsoft
BreakConstructorInitializers: AfterColon
AccessModifierOffset: '-4'
AlignAfterOpenBracket: DontAlign
AlignConsecutiveAssignments: 'false'
AlignConsecutiveDeclarations: 'false'
AlignEscapedNewlines: Left
AllowAllConstructorInitializersOnNextLine: 'false'
AllowShortBlocksOnASingleLine: 'false'
AllowShortFunctionsOnASingleLine: InlineOnly
BraceWrapping:
AfterCaseLabel: true
AfterClass: true
AfterControlStatement: true
AfterEnum: true
AfterFunction: true
AfterNamespace: true
AfterObjCDeclaration: true
AfterStruct: true
AfterUnion: false
AfterExternBlock: true
BeforeCatch: true
BeforeElse: true
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBraces: Custom
ColumnLimit: '140'
ConstructorInitializerAllOnOneLineOrOnePerLine: 'true'
Cpp11BracedListStyle: 'false'
DisableFormat: 'true'
FixNamespaceComments: 'true'
IncludeBlocks: Preserve
IndentPPDirectives: None
KeepEmptyLinesAtTheStartOfBlocks: 'false'
Language: Cpp
PointerAlignment: Left
ReflowComments: 'true'
SortIncludes: 'false'
SpaceAfterLogicalNot: 'false'
SpaceBeforeAssignmentOperators: 'true'
SpaceBeforeCpp11BracedList: 'true'
Standard: Cpp11
TabWidth: '4'
UseTab: Never
...

14
.gitattributes поставляемый Normal file
Просмотреть файл

@ -0,0 +1,14 @@
# Set the default behavior, in case people don't have core.autocrlf set.
# Avoid Git line ending issues by forcing everything to use LF
# except Windows batch files which require CRLF.
* text=auto eol=lf
# Explicitly declare text files you want to always be normalized and converted
# to native line endings on checkout.
*.c text
*.cpp text
*.h text
*.cmake text
*.sh text
*.{cmd,[cC][mM][dD]} text eol=crlf
*.{bat,[bB][aA][tT]} text eol=crlf

6
.github/CODEOWNERS поставляемый Normal file
Просмотреть файл

@ -0,0 +1,6 @@
# The DO team will be the default owners for everything in
# the repo. Unless a later match takes precedence,
# DO team members will be requested for review when someone
# opens a pull request. Additional control can be had via
# Code Review Assignment functionality on github.
* @microsoft/deliveryoptimization

57
.gitignore поставляемый Normal file
Просмотреть файл

@ -0,0 +1,57 @@
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
# VSCode
*.vscode
# Visual Studio
*vs/
*.vcxproj*
*.sln
# CMake, CPack
*CMakeFiles/
*CMakeCache.txt
*MakeFile*
CMakeLists.txt.user
CMakeCache.txt
CMakeScripts
Testing
Makefile
cmake_install.cmake
install_manifest.txt
compile_commands.json
CTestTestfile.cmake
_deps
*_CPack_Packages/
*.deb

69
CMakeLists.txt Normal file
Просмотреть файл

@ -0,0 +1,69 @@
cmake_minimum_required(VERSION 3.7)
# The version command, to take effect, must be the first command to execute in the cmake generate step
project (do_project_root)
# Use the build/build.py script to choose and build a subproject.
# More cmake options are defined by individual subprojects.
# Only one subproject can be built with each invocation of cmake. This avoids confusion in specifying options
# exposed by an individual subproject.
option (DO_INCLUDE_AGENT "Build subproject client-lite" OFF)
option (DO_INCLUDE_PLUGINS "Build subproject plugins" OFF)
option (DO_INCLUDE_SDK "Build subproject sdk-cpp" OFF)
option (DO_BUILD_TESTS "Set DO_BUILD_TESTS to OFF to skip building tests." ON)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
# Get verbose output from cmake generation and build steps
set(CMAKE_VERBOSE_MAKEFILE ON)
# PIC (Position Independent Code) ensures .a files can be linked to executables that have PIE enabled
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
# PIE (Position Independent Executable) ensures exe/.so can run when ASLR is enabled in the target OS
set(COMPILER_HARDENING_FLAGS
"-fPIE -pie -D_FORTIFY_SOURCE=2 -fstack-protector-strong -Wformat -Werror=format-security")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall ${COMPILER_HARDENING_FLAGS} -fmerge-all-constants")
# relro+now thwarts some attack vectors by reordering some ELF data structures and also by making the GOT read-only
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie -z relro -z now")
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/common/cmake)
include (do-build-helpers)
include (do-packaging-helpers)
# For contributors, please modify builder name here if changes are made to project source
set(DO_BUILDER_IDENTIFIER "DU")
message(STATUS "Using builder name: ${DO_BUILDER_IDENTIFIER}")
message(STATUS "NOTE: Please modify builder name if modifying project source")
if (DO_BUILD_TIMESTAMP)
message(STATUS "Build timestamp found: ${DO_BUILD_TIMESTAMP}")
else ()
# Note: This is evaluated during generate phase only. Need to generate again to refresh it.
string(TIMESTAMP DO_BUILD_TIMESTAMP "%Y%m%d.%H%M%S" UTC)
message (STATUS "Build timestamp NOT found. Generated it via cmake: ${DO_BUILD_TIMESTAMP}.")
endif ()
# Use ms- prefix to avoid clashes with another company's product named do-agent.
# Use it also for other sub-projects to have consistency.
set(DOSVC_BIN_NAME "deliveryoptimization-agent")
set(DO_PLUGIN_APT_BIN_NAME "deliveryoptimization-plugin-apt")
# Only one subproject can be built with each invocation of cmake. This avoids confusion in specifying options
# exposed by an individual subproject.
if (DO_INCLUDE_AGENT)
message (STATUS "Including subproject client-lite")
add_subdirectory(client-lite)
elseif (DO_INCLUDE_PLUGINS)
message (STATUS "Including subproject plugins")
add_subdirectory(plugins)
elseif (DO_INCLUDE_SDK)
message (STATUS "Including subproject sdk-cpp")
add_subdirectory(sdk-cpp)
else ()
message (WARNING "No subproject chosen. Nothing is configured to be built.")
endif ()

14
CONTRIBUTING.md Normal file
Просмотреть файл

@ -0,0 +1,14 @@
# Contributing
This project welcomes contributions and suggestions. Most contributions require you to
agree to a Contributor License Agreement (CLA) declaring that you have the right to,
and actually do, grant us the rights to use your contribution. For details, visit
https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need
to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the
instructions provided by the bot. You will only need to do this once across all repositories using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

3
CPPLINT.cfg Normal file
Просмотреть файл

@ -0,0 +1,3 @@
set noparent # mark this file as the root
filter=-whitespace,-legal/copyright,-build/c++11,-build/include,-build/include_subdir,-runtime/references
linelength=140

34
LICENSE
Просмотреть файл

@ -1,21 +1,21 @@
MIT License
Copyright (c) Microsoft Corporation.
Copyright (c) Microsoft Corporation.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE
THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

782
NOTICE.md Normal file
Просмотреть файл

@ -0,0 +1,782 @@
### NOTICES AND INFORMATION
Do Not Translate or Localize
This software incorporates material from third parties.
Microsoft makes certain open source code available at https://3rdpartysource.microsoft.com,
or you may send a check or money order for US $5.00, including the product name,
the open source component name, platform, and version number, to:
Source Code Compliance Team
Microsoft Corporation
One Microsoft Way
Redmond, WA 98052
USA
Notwithstanding any other terms, you may reverse engineer this software to the extent
required to debug changes to any libraries licensed under the GNU Lesser General Public License.
====================================================================
microsoft/cpprestsdk 18212a2a7967e12d740bfb957e500892b3463c88 - MIT
Copyright (c) Microsoft.
Copyright (c) 2014, Peter Thorson.
Copyright (c) Microsoft Corporation.
Copyright (c) 2011, Micael Hildenborg
Copyright (c) 2004-2008 Rene Nyffenegger
Portions Copyright (c) Microsoft Corporation
Copyright (c) 1999, 2002 Aladdin Enterprises.
Copyright (c) 2006 Noel Llopis and Charles Nicholson
Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de>
C++ REST SDK
The MIT License (MIT)
Copyright (c) Microsoft Corporation
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
====================================================================
microsoft/gsl 1995e86d1ad70519465374fb4876c6ef7c9f8c61 - MIT
Copyright (c) 2015 Microsoft Corporation.
Copyright (c) 2015 Microsoft Corporation. All rights reserved.
This code is licensed under the MIT License (MIT).
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
====================================================================
Boost
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
====================================================================
OpenSSL
Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. All advertising materials mentioning features or use of this
software must display the following acknowledgment:
"This product includes software developed by the OpenSSL Project
for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
endorse or promote products derived from this software without
prior written permission. For written permission, please contact
openssl-core@openssl.org.
5. Products derived from this software may not be called "OpenSSL"
nor may "OpenSSL" appear in their names without prior written
permission of the OpenSSL Project.
6. Redistributions of any form whatsoever must retain the following
acknowledgment:
"This product includes software developed by the OpenSSL Project
for use in the OpenSSL Toolkit (http://www.openssl.org/)"
THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
====================================================================
Original SSLeay
Original SSLeay License
Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
All rights reserved.
This package is an SSL implementation written
by Eric Young (eay@cryptsoft.com).
The implementation was written so as to conform with Netscapes SSL.
This library is free for commercial and non-commercial use as long as
the following conditions are aheared to. The following conditions
apply to all code found in this distribution, be it the RC4, RSA,
lhash, DES, etc., code; not just the SSL code. The SSL documentation
included with this distribution is covered by the same copyright terms
except that the holder is Tim Hudson (tjh@cryptsoft.com).
Copyright remains Eric Young's, and as such any Copyright notices in
the code are not to be removed.
If this package is used in a product, Eric Young should be given attribution
as the author of the parts of the library used.
This can be in the form of a textual message at program startup or
in documentation (online or textual) provided with the package.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. All advertising materials mentioning features or use of this software
must display the following acknowledgement:
"This product includes cryptographic software written by
Eric Young (eay@cryptsoft.com)"
The word 'cryptographic' can be left out if the rouines from the library
being used are not cryptographic related :-).
4. If you include any Windows specific code (or a derivative thereof) from
the apps directory (application code) you must include an acknowledgement:
"This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
The licence and distribution terms for any publicly available version or
derivative of this code cannot be changed. i.e. this code cannot simply be
copied and put under another distribution licence
[including the GNU Public Licence.]
====================================================================
libproxy
GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it becomes
a de-facto standard. To achieve this, non-free programs must be
allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
c) Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
d) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
e) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
====================================================================
GoogleTest
Copyright 2008, Google Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
====================================================================
UUID (Universally Unique Identifier)
This package was debianized by:
Piotr Roszatycki <dexter@debian.org>
It was downloaded from:
<http://www.ossp.org/pkg/lib/uuid/>
Upstream Authors:
Patrick Powell <papowell@astart.com>
Piotr Roszatycki <dexter@debian.org>
Ralf S. Engelschall <rse@engelschall.com>
Brandon Long <blong@fiction.net>
Thomas Roessler <roessler@guug.de>
Michael Elkins <me@cs.hmc.edu>
Andrew Tridgell <tridge@samba.org>
Luke Mewburn <lukem@netbsd.org>
Copyright:
Copyright (C) 1995 Patrick Powell
Copyright (C) 2004 Piotr Roszatycki
Copyright (C) 2004-2006 Ralf S. Engelschall
Copyright (C) 1996 Brandon Long
Copyright (C) 1998 Thomas Roessler
Copyright (C) 1998 Michael Elkins
Copyright (C) 1998 Andrew Tridgell
Copyright (C) 1999 Luke Mewburn
License:
This file is part of OSSP uuid, a library for the generation
of UUIDs which can found at http://www.ossp.org/pkg/lib/uuid/
Permission to use, copy, modify, and distribute this software for
any purpose with or without fee is hereby granted, provided that
the above copyright notice and this permission notice appear in all
copies.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHORS AND COPYRIGHT HOLDERS AND THEIR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
The Debian packaging is:
Copyright (C) 2005 Piotr Roszatycki <dexter@debian.org>
and is licensed under the same license as the source code, see above.

Просмотреть файл

@ -1,33 +1,77 @@
# Project
# Delivery Optimization Client
> This repo has been populated by an initial template to help get you started. Please
> make sure to update the content to build a great experience for community-building.
This repository contains source code for the following DO components:
As the maintainer of this project, please make a few updates:
* Agent
* SDK
* Plug-ins
- Improving this README.MD file to provide a great experience
- Updating SUPPORT.MD with content about this project's support experience
- Understanding the security reporting process in SECURITY.MD
- Remove this section from the README
## Agent
## Contributing
Delivery Optimization HTTP downloader with Microsoft Connected Cache support.
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
## SDK
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
Library for enabling inter-process communication (IPC) through native C++
code for Delivery Optimization Agent on Linux.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
## Plug-ins
## Trademarks
Add-on that enables APT downloads to go through Delivery Optimization Agent.
It is requires the SDK and Agent components.
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
trademarks or logos is subject to and must follow
[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).
Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.
Any use of third-party trademarks or logos are subject to those third-party's policies.
## Getting Started
Follow the development machine setup on each desktop you'd like to use.
### Development Machine Setup
Clone the repository locally from terminal:
```markdown
> cd (to working directory of your choosing)
> git clone https://github.com/microsoft/do-client
```
Run the appropriate bootstrapper depending on development machine platform:
```markdown
> cd build/bootstrap
```
## Support
This repository is currently in a **Public Preview** state. During this phase, all DO components
found in this repo will be supported for 90 days beyond the release date of a new release. At
the end of the 90 day window, we will not guarantee support for the previous version. Please plan
to migrate to the new DO components within that 90-day window to avoid any disruptions.
## Filing a Bug
Please file a [GitHub Issue](https://github.com/microsoft/do-client/issues) to ensure all issues are
tracked appropriately.
## Build status
#### Ubuntu 18.04
| Architecture | Agent | SDK | Plugin |
|-----|--------|-----|--------|
| x86-64 | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20Simple%20Client%20Ubuntu%2018.04%20x86-64?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=23&branchName=main) | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20CPP-SDK%20Ubuntu%2018.04%20x86-64?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=26&branchName=main) | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20Plugins%20APT%20Ubuntu%2018.04%20x86-64?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=29&branchName=main) |
| arm64 | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20Simple%20Client%20Ubuntu%2018.04%20arm64?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=37&branchName=main) | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20CPP-SDK%20Ubuntu%2018.04%20arm64?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=38&branchName=main) | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20Plugins%20APT%20Ubuntu%2018.04%20arm64?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=39&branchName=main) |
#### Debian 9
| Architecture | Agent | SDK | Plugin |
|-----|--------|-----|--------|
| arm32 | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20Simple%20Client%20Debian9%20arm32?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=25&branchName=main) | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20CPP-SDK%20Debian9%20arm32?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=33&branchName=main) | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20Plugins%20APT%20Debian9%20arm32?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=31&branchName=main) |
#### Debian 10
| Architecture | Agent | SDK | Plugin |
|-----|--------|-----|--------|
| arm32 | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20Simple%20Client%20Debian10%20arm32?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=24&branchName=main) | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20CPP-SDK%20Debian10%20arm32?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=32&branchName=main) | [![Build Status](https://deliveryoptimization.visualstudio.com/client/_apis/build/status/DO%20Plugins%20APT%20Debian10%20arm32?branchName=main)](https://deliveryoptimization.visualstudio.com/client/_build/latest?definitionId=34&branchName=main) |
## Contact
Directly contact us: <docloss@microsoft.com>

Просмотреть файл

@ -14,7 +14,7 @@ Instead, please report them to the Microsoft Security Response Center (MSRC) at
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
@ -38,4 +38,4 @@ We prefer all communications to be in English.
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd).
<!-- END MICROSOFT SECURITY.MD BLOCK -->
<!-- END MICROSOFT SECURITY.MD BLOCK -->

Просмотреть файл

@ -1,25 +0,0 @@
# TODO: The maintainer of this repo has not yet edited this file
**REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project?
- **No CSS support:** Fill out this template with information about how to file issues and get help.
- **Yes CSS support:** Fill out an intake form at [aka.ms/spot](https://aka.ms/spot). CSS will work with/help you to determine next steps. More details also available at [aka.ms/onboardsupport](https://aka.ms/onboardsupport).
- **Not sure?** Fill out a SPOT intake as though the answer were "Yes". CSS will help you decide.
*Then remove this first heading from this SUPPORT.MD file before publishing your repo.*
# Support
## How to file issues and get help
This project uses GitHub Issues to track bugs and feature requests. Please search the existing
issues before filing new issues to avoid duplicates. For new issues, file your bug or
feature request as a new Issue.
For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE
FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER
CHANNEL. WHERE WILL YOU HELP PEOPLE?**.
## Microsoft Support Policy
Support for this **PROJECT or PRODUCT** is limited to the resources listed above.

Просмотреть файл

@ -0,0 +1,39 @@
# Pipeline to build DO Agent using docker to target non-native OS and/or architecture.
# Publishes the binaries + packages as artifacts.
# Disable branch and pr triggers - currently run this manually to avoid hogging build machine resources
# Rely on Ubuntu x64 pipeline for CI/CD
trigger:
- none
pr:
- none
schedules:
- cron: "0 0 * * *"
displayName: Daily midnight build
branches:
include:
- main
pool: dotestlab
jobs:
- template: templates/do-docker-jobs.yml
parameters:
targetOS: 'debian10'
targetArch: 'arm32'
dockerImageName: 'jimsonmsft/debian10-arm32:latest'
stepsTemplate: 'doclient-lite-docker-steps.yml'
- template: templates/do-docker-jobs.yml
parameters:
targetOS: 'debian9'
targetArch: 'arm32'
dockerImageName: 'jimsonmsft/debian9-arm32:latest'
stepsTemplate: 'doclient-lite-docker-steps.yml'
- template: templates/do-docker-jobs.yml
parameters:
targetOS: 'ubuntu1804'
targetArch: 'arm64'
dockerImageName: 'jimsonmsft/ubuntu18.04-arm64:latest'
stepsTemplate: 'doclient-lite-docker-steps.yml'

Просмотреть файл

@ -0,0 +1,56 @@
# Pipeline to build DO Agent targeting x86-64 architecture.
# Publishes the binaries + packages as artifacts.
# Version here refers to test package version, and follows format of 0.0.<Pipeline Build Number>
# This is due to azure universal packaging apis requiring semantic versioning
# Builds are monotonically increasing based on run number so test builds can always pull the newest version
variables:
test.package.version: 0.0.$(Build.BuildId)
trigger:
branches:
include:
- main
paths:
include:
- azure-pipelines/build/doclient-lite-native.yml
- azure-pipelines/build/templates/doclient-lite-native-steps.yml
- client-lite/*
- common/*
- build/build.py
- CMakeLists.txt
exclude:
- azure-pipelines/*
pr:
branches:
include:
- main
paths:
include:
- azure-pipelines/build/doclient-lite-native.yml
- azure-pipelines/build/templates/doclient-lite-native-steps.yml
- client-lite/*
- common/*
- build/build.py
- CMakeLists.txt
exclude:
- azure-pipelines/*
pool: dotestlab
jobs:
- job: Debug
steps:
- template: templates/doclient-lite-native-steps.yml
parameters:
targetOsArch: 'ubuntu1804-x64'
config: debug
- job: Release
steps:
- template: templates/doclient-lite-native-steps.yml
parameters:
targetOsArch: 'ubuntu1804-x64'
config: release
skipTests: true

Просмотреть файл

@ -0,0 +1,39 @@
# Pipeline to build DO Plugins using docker to target non-native OS and/or architecture.
# Publishes the binaries + packages as artifacts.
# Disable branch and pr triggers - currently run this manually to avoid hogging build machine resources
# Rely on Ubuntu x64 pipeline for CI/CD
trigger:
- none
pr:
- none
schedules:
- cron: "0 0 * * *"
displayName: Daily midnight build
branches:
include:
- main
pool: dotestlab
jobs:
- template: templates/do-docker-jobs.yml
parameters:
targetOS: 'debian10'
targetArch: 'arm32'
dockerImageName: 'jimsonmsft/debian10-arm32:latest'
stepsTemplate: 'dopapt-docker-steps.yml'
- template: templates/do-docker-jobs.yml
parameters:
targetOS: 'debian9'
targetArch: 'arm32'
dockerImageName: 'jimsonmsft/debian9-arm32:latest'
stepsTemplate: 'dopapt-docker-steps.yml'
- template: templates/do-docker-jobs.yml
parameters:
targetOS: 'ubuntu1804'
targetArch: 'arm64'
dockerImageName: 'jimsonmsft/ubuntu18.04-arm64:latest'
stepsTemplate: 'dopapt-docker-steps.yml'

Просмотреть файл

@ -0,0 +1,57 @@
# Pipeline to build DO Plugins targeting x86-64 architecture.
# Publishes the binaries + packages as artifacts.
# Version here refers to test package version, and follows format of 0.0.<Pipeline Build Number>
# This is due to azure universal packaging apis requiring semantic versioning
# Builds are monotonically increasing based on run number so test builds can always pull the newest version
variables:
test.package.version: 0.0.$(Build.BuildId)
trigger:
branches:
include:
- main
paths:
include:
- azure-pipelines/build/dopapt-native.yml
- azure-pipelines/build/templates/dopapt-native-steps.yml
- common/*
- plugins/*
- build/build.py
- CMakeLists.txt
exclude:
- 'azure-pipelines/*'
- 'plugins/linux-apt/scripts/configure-apt-method.sh'
pr:
branches:
include:
- main
paths:
include:
- azure-pipelines/build/dopapt-native.yml
- azure-pipelines/build/templates/dopapt-native-steps.yml
- common/*
- plugins/*
- build/build.py
- CMakeLists.txt
exclude:
- 'azure-pipelines/*'
- 'plugins/linux-apt/scripts/configure-apt-method.sh'
pool: dotestlab
jobs:
- job: Debug
steps:
- template: templates/dopapt-native-steps.yml
parameters:
targetOsArch: 'ubuntu1804-x64'
config: debug
- job: Release
steps:
- template: templates/dopapt-native-steps.yml
parameters:
targetOsArch: 'ubuntu1804-x64'
config: release

Просмотреть файл

@ -0,0 +1,39 @@
# Pipeline to build DO C++ SDK using docker to target non-native OS and/or architecture.
# Publishes the binaries + packages as artifacts.
# Disable branch and pr triggers - currently run this manually to avoid hogging build machine resources
# Rely on Ubuntu x64 pipeline for CI/CD
trigger:
- none
pr:
- none
schedules:
- cron: "0 0 * * *"
displayName: Daily midnight build
branches:
include:
- main
pool: dotestlab
jobs:
- template: templates/do-docker-jobs.yml
parameters:
targetOS: 'debian10'
targetArch: 'arm32'
dockerImageName: 'jimsonmsft/debian10-arm32:latest'
stepsTemplate: 'dosdkcpp-docker-steps.yml'
- template: templates/do-docker-jobs.yml
parameters:
targetOS: 'debian9'
targetArch: 'arm32'
dockerImageName: 'jimsonmsft/debian9-arm32:latest'
stepsTemplate: 'dosdkcpp-docker-steps.yml'
- template: templates/do-docker-jobs.yml
parameters:
targetOS: 'ubuntu1804'
targetArch: 'arm64'
dockerImageName: 'jimsonmsft/ubuntu18.04-arm64:latest'
stepsTemplate: 'dosdkcpp-docker-steps.yml'

Просмотреть файл

@ -0,0 +1,59 @@
# Pipeline to build DO C++ SDK targeting x86-64 architecture.
# Client-lite is built for running tests alone, it is not published.
# Publishes the binaries + packages as artifacts.
# Version here refers to test package version, and follows format of 0.0.<Pipeline Build Number>
# This is due to azure universal packaging apis requiring semantic versioning
# Builds are monotonically increasing based on run number so test builds can always pull the newest version
variables:
test.package.version: 0.0.$(Build.BuildId)
trigger:
branches:
include:
- main
paths:
include:
- azure-pipelines/build/dosdkcpp-native.yml
- azure-pipelines/build/templates/dosdkcpp-native-steps.yml
- common/*
- sdk-cpp/*
- build/build.py
- CMakeLists.txt
exclude:
- azure-pipelines/*
- sdk-cpp/build/cleanup-install.sh
pr:
branches:
include:
- main
paths:
include:
- azure-pipelines/build/dosdkcpp-native.yml
- azure-pipelines/build/templates/dosdkcpp-native-steps.yml
- common/*
- sdk-cpp/*
- build/build.py
- CMakeLists.txt
exclude:
- azure-pipelines/*
- sdk-cpp/build/cleanup-install.sh
pool: dotestlab
jobs:
- job: Debug
steps:
- template: templates/dosdkcpp-native-steps.yml
parameters:
targetOsArch: 'ubuntu1804-x64'
config: debug
- job: Release
steps:
- template: templates/dosdkcpp-native-steps.yml
parameters:
targetOsArch: 'ubuntu1804-x64'
config: release
skipTests: true

Просмотреть файл

@ -0,0 +1,29 @@
# Template: Jobs to build DO projects using docker to target non-native OS and/or architecture.
# Consume this jobs template in a pipeline yaml by passing in parameter values.
parameters:
- name: targetOS # example: debian10
type: string
- name: targetArch # example: arm32
type: string
- name: dockerImageName # example: jimsonmsft/debian10-arm32:latest
type: string
- name: stepsTemplate # example: dopapt-docker-steps.yml
type: string
jobs:
- job: ${{parameters.targetOS}}_${{parameters.targetArch}}_debug
steps:
- template: ${{parameters.stepsTemplate}}
parameters:
targetOsArch: ${{parameters.targetOS}}-${{parameters.targetArch}}
dockerImageName: ${{parameters.dockerImageName}}
config: debug
- job: ${{parameters.targetOS}}_${{parameters.targetArch}}_release
steps:
- template: ${{parameters.stepsTemplate}}
parameters:
targetOsArch: ${{parameters.targetOS}}-${{parameters.targetArch}}
dockerImageName: ${{parameters.dockerImageName}}
config: release

Просмотреть файл

@ -0,0 +1,32 @@
# Template: Steps to build DO Agent using docker to target non-native OS and/or architecture.
# Consume this steps template in one or more jobs by passing in parameter values.
parameters:
- name: targetOsArch # example: debian10-arm32
type: string
- name: dockerImageName # example: jimsonmsft/debian10-arm32:latest
type: string
- name: config # debug/release
type: string
steps:
- task: CmdLine@2
inputs:
# Unix Makefiles used in place of Ninja due to issues with 32-bit compatability on cross-arch builds
script: 'sudo docker run --rm --entrypoint=python3 -v $(Build.SourcesDirectory):/code -v /tmp/build-deliveryoptimization-agent-${{parameters.targetOsArch}}:/tmp/build-deliveryoptimization-agent ${{parameters.dockerImageName}} "/code/build/build.py" "--clean" "--package-for" "DEB" "--generator" "Unix Makefiles" "--config" "${{parameters.config}}" "--project" "agent"'
displayName: 'Build client-lite ${{parameters.targetOsArch}}-${{parameters.config}}'
- task: CopyFiles@2
inputs:
SourceFolder: '/tmp/build-deliveryoptimization-agent-${{parameters.targetOsArch}}/linux-${{parameters.config}}'
Contents: |
deliveryoptimization-agent*.deb
TargetFolder: '$(Build.ArtifactStagingDirectory)/${{parameters.targetOsArch}}-${{parameters.config}}'
CleanTargetFolder: true
displayName: 'Populate artifacts staging dir'
- task: PublishBuildArtifacts@1
inputs:
PathtoPublish: '$(Build.ArtifactStagingDirectory)'
ArtifactName: 'deliveryoptimization-agent'
publishLocation: 'Container'

Просмотреть файл

@ -0,0 +1,80 @@
# Template: Steps to build DO Plugins targeting x86-64 architecture.
# Consume this steps template in one or more jobs by passing in parameter values.
parameters:
- name: targetOsArch # example: ubuntu18.04-x64
type: string
- name: config # debug/release
type: string
- name: skipTests
type: boolean
default: false
steps:
# TODO(shishirb) Include --skip-tests build.py option when skipTests is true
- task: PythonScript@0
inputs:
scriptSource: 'filePath'
scriptPath: 'build/build.py'
arguments: '--project agent --config ${{parameters.config}} --package-for DEB --clean'
displayName: 'Build agent ${{parameters.targetOsArch}}-${{parameters.config}}'
- task: CmdLine@2
condition: eq('${{parameters.skipTests}}', false)
inputs:
script: 'sudo dpkg -i deliveryoptimization-agent*.deb'
workingDirectory: '/tmp/build-deliveryoptimization-agent/linux-${{parameters.config}}'
displayName: 'Install agent Debian package'
- task: CmdLine@2
condition: eq('${{parameters.skipTests}}', false)
inputs:
script: './client-lite/test/deliveryoptimization-agent-tests --gtest_filter=-NetworkMonitorTests*'
workingDirectory: '/tmp/build-deliveryoptimization-agent/linux-${{parameters.config}}/'
displayName: 'Run unit tests'
- task: CmdLine@2
condition: eq('${{parameters.skipTests}}', false)
inputs:
script: 'sudo dpkg -r deliveryoptimization-agent'
workingDirectory: '/tmp/build-deliveryoptimization-agent/linux-${{parameters.config}}'
displayName: 'Remove Debian package'
- task: CopyFiles@2
inputs:
SourceFolder: '/tmp/build-deliveryoptimization-agent/linux-${{parameters.config}}'
Contents: |
deliveryoptimization-agent*.deb
TargetFolder: '$(Build.ArtifactStagingDirectory)/${{parameters.targetOsArch}}-${{parameters.config}}'
CleanTargetFolder: true
displayName: 'Populate artifacts staging dir'
- task: CopyFiles@2
condition: eq('${{parameters.skipTests}}', false)
inputs:
SourceFolder: '/tmp/build-deliveryoptimization-agent/linux-${{parameters.config}}'
Contents: |
client-lite/test/deliveryoptimization-agent-tests
TargetFolder: '$(Build.ArtifactStagingDirectory)/${{parameters.targetOsArch}}-${{parameters.config}}'
CleanTargetFolder: false
displayName: 'Populate artifacts staging dir with test binary'
- task: PublishBuildArtifacts@1
inputs:
PathtoPublish: '$(Build.ArtifactStagingDirectory)'
ArtifactName: 'deliveryoptimization-agent'
publishLocation: 'Container'
# TODO(jimson) Azure artifacts are no longer free to publish to, this task will fail as a result, bug to resolve issue here:
# https://microsoft.visualstudio.com/OS/_workitems/edit/30317524
#
# - task: UniversalPackages@0
# inputs:
# command: 'publish'
# publishDirectory: '$(Build.ArtifactStagingDirectory)'
# feedsToUsePublish: 'internal'
# vstsFeedPublish: 'a6e08e1f-d299-4d2f-aaa5-db7ddde849e0'
# vstsFeedPackagePublish: 'deliveryoptimization-agent'
# versionOption: 'custom'
# versionPublish: '$(test.package.version)'
# displayName: 'Publish artifacts to test feed'

Просмотреть файл

@ -0,0 +1,33 @@
# Template: Steps to build DO Plugins using docker to target non-native OS and/or architecture.
# Consume this steps template in one or more jobs by passing in parameter values.
parameters:
- name: targetOsArch # example: debian10-arm32
type: string
- name: dockerImageName # example: jimsonmsft/debian10-arm32:latest
type: string
- name: config # debug/release
type: string
steps:
- checkout: self
- task: CmdLine@2
inputs:
script: 'sudo docker run --rm --entrypoint=/bin/bash -v $(Build.SourcesDirectory):/code -v /tmp/build-deliveryoptimization-plugin-apt-${{parameters.targetOsArch}}:/tmp/build-deliveryoptimization-plugin-apt ${{parameters.dockerImageName}} "/code/build/docker/docker-build-plugin.sh" "/code" "${{parameters.config}}"'
displayName: 'Build deliveryoptimization-plugin-apt ${{parameters.targetOsArch}}-${{parameters.config}}'
- task: CopyFiles@2
inputs:
SourceFolder: '/tmp/build-deliveryoptimization-plugin-apt-${{parameters.targetOsArch}}/linux-${{parameters.config}}'
Contents: |
deliveryoptimization-plugin-apt*.deb
TargetFolder: '$(Build.ArtifactStagingDirectory)/${{parameters.targetOsArch}}-${{parameters.config}}'
CleanTargetFolder: true
displayName: 'Populate artifacts staging dir'
- task: PublishBuildArtifacts@1
inputs:
PathtoPublish: '$(Build.ArtifactStagingDirectory)'
ArtifactName: 'deliveryoptimization-plugin-apt'
publishLocation: 'Container'

Просмотреть файл

@ -0,0 +1,73 @@
# Template: Steps to build DO Plugins targeting x86-64 architecture.
# Consume this steps template in one or more jobs by passing in parameter values.
parameters:
- name: targetOsArch # example: ubuntu18.04-x64
type: string
- name: config # debug/release
type: string
steps:
- checkout: self
- task: PythonScript@0
inputs:
scriptSource: 'filePath'
scriptPath: 'build/build.py'
arguments: '--project sdk --cmaketarget deliveryoptimization --config ${{parameters.config}} --package-for DEB --clean '
displayName: 'Build sdk ${{parameters.targetOsArch}}-${{parameters.config}}'
- task: Bash@3
inputs:
targetType: 'inline'
script: 'sudo dpkg --ignore-depends=deliveryoptimization-agent -i libdeliveryoptimization*.deb'
workingDirectory: '/tmp/build-deliveryoptimization-sdk/linux-${{parameters.config}}'
displayName: 'Install libdeliveryoptimization'
- task: PythonScript@0
inputs:
scriptSource: 'filePath'
scriptPath: 'build/build.py'
arguments: '--project plugin-apt --config ${{parameters.config}} --package-for DEB --clean'
displayName: 'Build deliveryoptimization-plugin-apt ${{parameters.targetOsArch}}-${{parameters.config}}'
- task: CmdLine@2
inputs:
script: 'sudo dpkg -i deliveryoptimization-plugin-apt*.deb'
workingDirectory: '/tmp/build-deliveryoptimization-plugin-apt/linux-${{parameters.config}}'
displayName: 'Install deliveryoptimization-plugin-apt'
- task: CmdLine@2
inputs:
script: 'sudo dpkg -r libdeliveryoptimization-dev deliveryoptimization-plugin-apt libdeliveryoptimization'
displayName: 'Remove Packages'
- task: CopyFiles@2
inputs:
SourceFolder: '/tmp/build-deliveryoptimization-plugin-apt/linux-${{parameters.config}}'
Contents: |
deliveryoptimization-plugin-apt*.deb
TargetFolder: '$(Build.ArtifactStagingDirectory)/${{parameters.targetOsArch}}-${{parameters.config}}'
CleanTargetFolder: true
displayName: 'Populate artifacts staging dir'
- task: PublishBuildArtifacts@1
inputs:
PathtoPublish: '$(Build.ArtifactStagingDirectory)'
ArtifactName: 'deliveryoptimization-plugin-apt'
publishLocation: 'Container'
# TODO(jimson) Azure artifacts are no longer free to publish to, this task will fail as a result, bug to resolve issue here:
# https://microsoft.visualstudio.com/OS/_workitems/edit/30317524
#
# - task: UniversalPackages@0
# inputs:
# command: 'publish'
# publishDirectory: '$(Build.ArtifactStagingDirectory)'
# feedsToUsePublish: 'internal'
# vstsFeedPublish: 'a6e08e1f-d299-4d2f-aaa5-db7ddde849e0'
# publishPackageMetadata: false
# vstsFeedPackagePublish: 'deliveryoptimization-plugin-apt'
# versionOption: 'custom'
# versionPublish: '$(test.package.version)'
# displayName: 'Publish artifacts to test feed'

Просмотреть файл

@ -0,0 +1,32 @@
# Template: Steps to build DO C++ SDK using docker to target non-native OS and/or architecture.
# Consume this steps template in one or more jobs by passing in parameter values.
parameters:
- name: targetOsArch # example: debian10-arm32
type: string
- name: dockerImageName # example: jimsonmsft/debian10-arm32:latest
type: string
- name: config # debug/release
type: string
steps:
- task: CmdLine@2
inputs:
# Unix Makefiles used in place of Ninja due to issues with 32-bit compatability on cross-arch builds
script: 'sudo docker run --rm --entrypoint=python3 -v $(Build.SourcesDirectory):/code -v /tmp/build-deliveryoptimization-sdk-${{parameters.targetOsArch}}:/tmp/build-deliveryoptimization-sdk ${{parameters.dockerImageName}} "/code/build/build.py" "--clean" "--package-for" "DEB" "--generator" "Unix Makefiles" "--config" "${{parameters.config}}" "--project" "sdk" "--cmaketarget" "deliveryoptimization"'
displayName: 'Build sdk-cpp ${{parameters.targetOsArch}}-${{parameters.config}}'
- task: CopyFiles@2
inputs:
SourceFolder: '/tmp/build-deliveryoptimization-sdk-${{parameters.targetOsArch}}/linux-${{parameters.config}}'
Contents: |
libdeliveryoptimization*.deb
TargetFolder: '$(Build.ArtifactStagingDirectory)/${{parameters.targetOsArch}}-${{parameters.config}}'
CleanTargetFolder: true
displayName: 'Populate artifacts staging dir'
- task: PublishBuildArtifacts@1
inputs:
PathtoPublish: '$(Build.ArtifactStagingDirectory)'
ArtifactName: 'deliveryoptimization-sdk'
publishLocation: 'Container'

Просмотреть файл

@ -0,0 +1,108 @@
# Template: Steps to build DO Plugins targeting x86-64 architecture.
# Consume this steps template in one or more jobs by passing in parameter values.
parameters:
- name: targetOsArch # example: ubuntu18.04-x64
type: string
- name: config # debug/release
type: string
- name: skipTests
type: boolean
default: false
steps:
# TODO(shishirb) Include --skip-tests build.py option when skipTests is true
- task: PythonScript@0
condition: eq('${{parameters.skipTests}}', false)
inputs:
scriptSource: 'filePath'
scriptPath: 'build/build.py'
arguments: '--project agent --cmaketarget deliveryoptimization-agent --config ${{parameters.config}} --package-for DEB --clean'
displayName: 'Build agent ${{parameters.targetOsArch}}-${{parameters.config}}'
- task: CmdLine@2
condition: eq('${{parameters.skipTests}}', false)
inputs:
script: 'sudo dpkg -i deliveryoptimization-agent*.deb'
workingDirectory: '/tmp/build-deliveryoptimization-agent/linux-${{parameters.config}}'
displayName: 'Install agent Debian package'
- task: PythonScript@0
inputs:
scriptSource: 'filePath'
scriptPath: 'build/build.py'
arguments: '--project sdk --cmaketarget deliveryoptimization --config ${{parameters.config}} --package-for DEB --clean'
displayName: 'Build sdk-cpp ${{parameters.targetOsArch}}-${{parameters.config}}'
- task: CmdLine@2
condition: eq('${{parameters.skipTests}}', false)
inputs:
script: 'sudo dpkg -i libdeliveryoptimization*.deb'
workingDirectory: '/tmp/build-deliveryoptimization-sdk/linux-${{parameters.config}}'
displayName: 'Install libdeliveryoptimization Debian Package'
- task: PythonScript@0
condition: eq('${{parameters.skipTests}}', false)
inputs:
scriptSource: 'filePath'
scriptPath: 'build/build.py'
arguments: '--project sdk --cmaketarget deliveryoptimization-sdk-tests --config ${{parameters.config}}'
displayName: 'Build sdk-cpp tests ${{parameters.targetOsArch}}-${{parameters.config}}'
# Run all tests. These must run as root because docs is running as the 'do' user
# and thus files created by docs are not owned by us, causing test SetUp/TearDown to fail.
- task: CmdLine@2
condition: eq('${{parameters.skipTests}}', false)
inputs:
script: 'sudo ./sdk-cpp/tests/deliveryoptimization-sdk-tests'
workingDirectory: '/tmp/build-deliveryoptimization-sdk/linux-${{parameters.config}}'
displayName: 'Run unit tests'
- task: CmdLine@2
condition: eq('${{parameters.skipTests}}', false)
inputs:
script: 'sudo dpkg -r libdeliveryoptimization-dev libdeliveryoptimization deliveryoptimization-agent'
displayName: 'Remove installed packages'
- task: CopyFiles@2
inputs:
SourceFolder: '/tmp/build-deliveryoptimization-sdk/linux-${{parameters.config}}'
Contents: |
libdeliveryoptimization*
TargetFolder: '$(Build.ArtifactStagingDirectory)/${{parameters.targetOsArch}}-${{parameters.config}}'
CleanTargetFolder: true
displayName: 'Populate artifacts staging dir'
- task: PublishBuildArtifacts@1
inputs:
PathtoPublish: '$(Build.ArtifactStagingDirectory)'
ArtifactName: 'deliveryoptimization-sdk'
publishLocation: 'Container'
# TODO(jimson) Azure artifacts are no longer free to publish to, this task will fail as a result, bug to resolve issue here:
# https://microsoft.visualstudio.com/OS/_workitems/edit/30317524
# Note: The second feed publishes to a feed that did not hit storage limits. Choose which one to keep when re-enabling the publish.
#
# - task: UniversalPackages@0
# inputs:
# command: 'publish'
# publishDirectory: '$(Build.ArtifactStagingDirectory)'
# feedsToUsePublish: 'internal'
# vstsFeedPublish: 'a6e08e1f-d299-4d2f-aaa5-db7ddde849e0'
# publishPackageMetadata: false
# vstsFeedPackagePublish: 'deliveryoptimization-sdk'
# versionOption: 'custom'
# versionPublish: '$(test.package.version)'
# displayName: 'Publish artifacts to test feed'
# - task: UniversalPackages@0
# inputs:
# command: 'publish'
# publishDirectory: '$(Build.ArtifactStagingDirectory)'
# feedsToUsePublish: 'internal'
# vstsFeedPublish: '7cdf52bf-a6f7-436c-b12f-3f063ef389c4/a671f3bd-f4e8-44b8-9553-9a9c04fa266a'
# vstsFeedPackagePublish: 'deliveryoptimization-sdk'
# versionOption: 'patch'
# packagePublishDescription: 'sdk'
# versionPublish: '$(test.package.version)'
# displayName: 'Publish artifacts to test feed'

Просмотреть файл

@ -0,0 +1,104 @@
# Pipeline to perform automated github release of our components
# Disable branch and pr triggers - run this manually when ready to publish a new release
trigger:
- none
pr:
- none
pool: dotestlab
variables:
release.title: $(release_name)
release.version: $(release_version)
# This variable is used later to generate the changelog
release.previous_version: $(previous_release_version)
jobs:
- job: doclient_release
steps:
- task: CmdLine@2
inputs:
script: 'rm -rf /tmp/do_publishing'
displayName: 'Clear publishing directory'
- task: DownloadBuildArtifacts@0
inputs:
buildType: 'specific'
project: '7cdf52bf-a6f7-436c-b12f-3f063ef389c4'
pipeline: '33'
buildVersionToDownload: 'latest'
downloadType: 'specific'
downloadPath: '/tmp/do_publishing'
displayName: 'Get ms-do-sdk arm artifacts'
- task: DownloadBuildArtifacts@0
inputs:
buildType: 'specific'
project: '7cdf52bf-a6f7-436c-b12f-3f063ef389c4'
pipeline: '26'
buildVersionToDownload: 'latest'
downloadType: 'specific'
downloadPath: '/tmp/do_publishing'
displayName: 'Get ms-do-sdk x64 artifacts'
- task: DownloadBuildArtifacts@0
inputs:
buildType: 'specific'
project: '7cdf52bf-a6f7-436c-b12f-3f063ef389c4'
pipeline: '25'
buildVersionToDownload: 'latest'
downloadType: 'specific'
downloadPath: '/tmp/do_publishing'
displayName: 'Get ms-do-agent arm artifacts'
- task: DownloadBuildArtifacts@0
inputs:
buildType: 'specific'
project: '7cdf52bf-a6f7-436c-b12f-3f063ef389c4'
pipeline: '23'
buildVersionToDownload: 'latest'
downloadType: 'specific'
downloadPath: '/tmp/do_publishing'
displayName: 'Get ms-do-agent x64 artifacts'
- task: DownloadBuildArtifacts@0
inputs:
buildType: 'specific'
project: '7cdf52bf-a6f7-436c-b12f-3f063ef389c4'
pipeline: '31'
buildVersionToDownload: 'latest'
downloadType: 'specific'
downloadPath: '/tmp/do_publishing'
displayName: 'Get ms-do-pluginapt arm artifacts'
- task: DownloadBuildArtifacts@0
inputs:
buildType: 'specific'
project: '7cdf52bf-a6f7-436c-b12f-3f063ef389c4'
pipeline: '29'
buildVersionToDownload: 'latest'
downloadType: 'specific'
downloadPath: '/tmp/do_publishing'
displayName: 'Get ms-do-pluginapt x64 artifacts'
- task: ArchiveFiles@2
inputs:
rootFolderOrFile: '/tmp/do_publishing'
includeRootFolder: true
archiveType: 'tar'
archiveFile: '$(Build.ArtifactStagingDirectory)/artifacts.tar.gz'
replaceExistingArchive: true
- task: GitHubRelease@1
inputs:
gitHubConnection: 'client2'
repositoryName: 'microsoft/do-client'
action: 'create'
target: '$(Build.SourceVersion)'
tagSource: 'userSpecifiedTag'
tag: 'ms-do-client-$(release.version)'
title: '$(release.title) $(release.version)'
changeLogCompareToRelease: 'lastNonDraftReleaseByTag'
changeLogCompareToReleaseTag: 'ms-do-client-$(release.previous_version)'
changeLogType: 'commitBased'

Просмотреть файл

@ -0,0 +1,37 @@
#! /bin/bash
echo "Setting up development environment for do-client"
# Various development machine tools
apt-get update -y --fix-missing
apt-get install -y make build-essential g++ gdb gdbserver gcc git wget
apt-get install -y python3 cmake ninja-build
# Open-source library dependencies
apt-get install -y libboost-all-dev libgtest-dev libproxy-dev libmsgsl-dev libssl-dev uuid-dev
# Install cpprest dependencies
# libssl-dev also required but installed above because plugin uses libssl-dev directly
apt-get install -y zlib1g-dev
# Cpprestsdk 2.10.10 is the latest publicly available version on Debian 10
# Build and install v2.10.16 as it's the earliest version which supports url-redirection
mkdir /tmp/cpprestsdk
cd /tmp/cpprestsdk
git clone https://github.com/microsoft/cpprestsdk.git .
git checkout v2.10.16
git submodule update --init
mkdir /tmp/cpprestsdk/build
cd /tmp/cpprestsdk/build
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_SHARED_LIBS=OFF -DBUILD_TESTS=OFF -DBUILD_SAMPLES=OFF -Wno-dev -DWERROR=OFF ..
ninja
ninja install
# libgtest-dev is a source package and requires manual installation
mkdir /tmp/build_gtest/
cd /tmp/build_gtest
cmake /usr/src/gtest
make
make install
echo "Finished bootstrapping"

Просмотреть файл

@ -0,0 +1,56 @@
#! /bin/bash
echo "Setting up development environment for do-client"
# Various development machine tools
apt-get update -y --fix-missing
apt-get install -y make build-essential g++ gdb gdbserver gcc git wget
# Cpprestsdk below requires min cmake version of 3.9, while 3.7 is the latest available on Debian9
# So build & install cmake from source
cd /tmp
wget https://cmake.org/files/v3.10/cmake-3.10.2.tar.gz
tar xzf cmake-3.10.2.tar.gz
cd /tmp/cmake-3.10.2
make
make install
apt-get install -y python3 ninja-build
# Open-source library dependencies
apt-get install -y libboost-all-dev libcpprest-dev libgtest-dev libproxy-dev libssl-dev uuid-dev
# Install cpprest dependencies
# libssl-dev also required but installed above because plugin uses libssl-dev directly
apt-get install -y zlib1g-dev
# Cpprestsdk 2.9.1 is the latest publicly available version on Debian 9
# Build and install v2.10.16 as it's the earliest version which supports url-redirection
mkdir /tmp/cpprestsdk
cd /tmp/cpprestsdk
git clone https://github.com/microsoft/cpprestsdk.git .
git checkout v2.10.16
git submodule update --init
mkdir /tmp/cpprestsdk/build
cd /tmp/cpprestsdk/build
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_SHARED_LIBS=OFF -DBUILD_TESTS=OFF -DBUILD_SAMPLES=OFF -Wno-dev -DWERROR=OFF ..
ninja
ninja install
# libgtest-dev is a source package and requires manual installation
mkdir /tmp/build_gtest/
cd /tmp/build_gtest
cmake /usr/src/gtest
make
make install
# Install gsl from source (not present in debian stretch packages)
cd /tmp/
git clone https://github.com/Microsoft/GSL.git
cd GSL/
git checkout tags/v2.0.0
RUN cmake -DGSL_TEST=OFF .
RUN make
RUN make install
echo "Finished bootstrapping"

Просмотреть файл

@ -0,0 +1,56 @@
#! /bin/bash
echo "Setting up development environment for do-client"
# Various development machine tools
apt-get update
apt-get install -y build-essential g++ gdb gdbserver git wget
apt-get install -y python3 cmake ninja-build rpm
# Open-source library dependencies
apt-get install -y libboost-all-dev libgtest-dev libproxy-dev libmsgsl-dev libssl-dev uuid-dev
# Install cpprest dependencies
# libssl-dev also required but installed above because plugin uses libssl-dev directly
apt-get install -y zlib1g-dev
apt install python-pip
pip install cpplint
# Installs to a non-standard location so add to PATH manually
export PATH=$PATH:~/.local/bin
# Cpprestsdk 2.10.2 is the latest publicly available version on Ubuntu 18.04
# Build and install v2.10.16 as it's the earliest version which supports url-redirection
mkdir /tmp/cpprestsdk
cd /tmp/cpprestsdk
git clone https://github.com/microsoft/cpprestsdk.git .
git checkout tags/v2.10.16
git submodule update --init
mkdir /tmp/cpprestsdk/build
cd /tmp/cpprestsdk/build
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_SHARED_LIBS=OFF -DBUILD_TESTS=OFF -DBUILD_SAMPLES=OFF -Wno-dev -DWERROR=OFF ..
ninja
ninja install
# libgtest-dev is a source package and requires manual installation
mkdir /tmp/build_gtest/
cd /tmp/build_gtest
cmake /usr/src/gtest
make
make install
if [[ "$1" == "--no-docker" ]]; then
echo "Skipping docker install"
else
# Install docker to enable building cross-arch for arm
# Instructions located at: https://docs.docker.com/engine/install/ubuntu/
curl -fsSL https://get.docker.com -o get-docker.sh
sh get-docker.sh
# Install qemu for cross-arch support
apt-get -y install qemu binfmt-support qemu-user-static
# Register qemu with docker to more easily run cross-arch containers
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
fi
echo "Finished bootstrapping"

519
build/build.py Normal file
Просмотреть файл

@ -0,0 +1,519 @@
"""Build script for local and azure pipeline builds."""
import datetime
import os, sys
import time
import timeit
import subprocess
import shutil
import argparse
import warnings
import tempfile
import fnmatch
from pathlib import Path
from tempfile import gettempdir
#region Globals
VERBOSE = True
DOCLIENT_SUBPROJECT_BUILD_MAP = {
"sdk" : "-DDO_INCLUDE_SDK=TRUE",
"agent" : "-DDO_INCLUDE_AGENT=TRUE",
"plugin-apt" : "-DDO_INCLUDE_PLUGINS=TRUE"
}
#endregion
def main():
"""The main entry point."""
script_args = parse_args()
# Set the global vars for the script
global VERBOSE
global DOCLIENT_SUBPROJECT_BUILD_MAP
build = create_build_runner(script_args)
build.run()
def parse_args():
parser = argparse.ArgumentParser(description='Builds the DeliveryOptimization client components')
parser.add_argument(
'--project', dest='project', type=str, required=True,
help='The cmake subproject to build. e.g. {}'.format(list(DOCLIENT_SUBPROJECT_BUILD_MAP.keys()))
)
parser.add_argument(
'--operation', dest='operation', type=str,
help='The operation to perform. e.g. generate/build/cleanonly. Default is generate+build'
)
parser.add_argument(
'--generator', dest='generator', type=str,
help='The CMake generator to use. e.g. Ninja, Unix Makefiles'
)
parser.add_argument(
'--config', dest='config', type=str,
help='The target configuration. e.g. debug or release'
)
parser.add_argument(
'--cmaketarget', dest='cmaketarget', type=str,
help='The cmake target within each subproject to build. e.g. within sdk: deliveryoptimization-sdk-tests'
)
parser.add_argument(
'--package-for', dest='package_type', type=str,
help='Supply package type. e.g. deb, or rpm'
)
parser.add_argument(
'--clean', dest='clean', action='store_true',
help='Remove built binaries before re-building them'
)
parser.add_argument(
'--static-analysis', dest='static_analysis', action='store_true',
help='Run static analysis tools (cpplint)'
)
parser.add_argument(
'--skip-tests', dest='skip_tests', action='store_true',
help='Skip adding and building test code'
)
'''DOCS only'''
parser.add_argument(
'--no-proxy-support', dest='no_proxy_support', action='store_true',
help='Enable building docs without support for proxy handling'
)
return parser.parse_args()
class NotSupportedTargetPlatformError(ValueError):
pass
class NotSupportedHostEnvironmentError(ValueError):
pass
def create_build_runner(script_args):
"""Creates the appropriate subclass of BuildRunnerBase.
Chooses the correct BuildRunner class for the target platform.
Args:
script_args (namespace):
The arguments passed to the script parsed by argparse.
Returns:
The appropriate subclass of Build.
"""
if is_running_on_linux():
return LinuxBuildRunner(script_args)
else:
raise NotSupportedTargetPlatformError('Target platform is either not supported or could not be deduced from build environment')
#region BuildRunner classes
class BuildRunnerBase(object):
"""Base class for specific platform builds.
BuildRunner classes will inherit from this class
and will implement/override/add additional functionality
for that specific build.
Args:
script_args (namespace):
The arguments passed to the script parsed by argparse.
"""
def __init__(self, script_args):
super().__init__()
self.timeToClean = 0
self.timeToGenerate = 0
self.timeToBuild = 0
self.operation_type = script_args.operation
self.project_root_path = get_project_root_path()
self.cmake_target = None
self.project = None
if (script_args.cmaketarget is None):
self.cmake_target = "all"
else:
self.cmake_target = script_args.cmaketarget
if (script_args.project and script_args.project.lower() in DOCLIENT_SUBPROJECT_BUILD_MAP.keys()):
self.project = script_args.project.lower()
else:
raise ValueError('Project name must be within {}'.format(list(DOCLIENT_SUBPROJECT_BUILD_MAP.keys())))
self.script_args = script_args
self.is_clean_build = self.script_args.clean
if self.script_args.config:
self.config = self.script_args.config.lower()
elif get_env_var('BUILD_CONFIGURATION'):
self.config = get_env_var('BUILD_CONFIGURATION').lower()
else:
self.config = 'debug'
if not (self.config == 'debug' or self.config == 'release'):
raise ValueError('Building configuration for {self.platform} is not supported.'.format(self.config, self.platform))
if self.script_args.generator:
self.generator = self.script_args.generator
else:
self.generator = 'Ninja'
self.package_type = None
if self.script_args.package_type:
self.package_type = self.script_args.package_type.lower()
self.static_analysis = script_args.static_analysis
self.skip_tests = script_args.skip_tests
self.source_path = self.project_root_path
self.build_time = datetime.datetime.utcnow().strftime("%Y%m%d.%H%M%S")
@property
def flavor(self):
"""The unique flavor string for this build.
Returns:
The unique flavor string for this build.
e.g. linux-debug
"""
return '{}-{}'.format(self.platform, self.config)
@property
def platform(self):
"""The target platform.
Should be overriden by subclass.
Returns:
The target platform string.
e.g. windows, linux
"""
pass
@property
def build_path(self):
"""Path for the build."""
return get_default_build_path(self.project, self.flavor)
def run(self):
if self.cmake_target != None:
"""Executes the Build."""
self.print_start_build_msg()
if self.is_clean_build:
self.clean()
if self.operation_type:
if self.operation_type.lower() == "generate":
self.generate()
elif self.operation_type.lower() == "build":
self.build()
elif self.operation_type.lower() == "cleanonly":
if not self.is_clean_build:
self.clean()
else:
raise ValueError('Invalid operation specified: {}'.format(self.operation_type))
else:
self.generate()
self.build()
self.print_end_build_msg()
self.print_times()
if self.package_type:
self.package()
def print_start_build_msg(self):
"""Prints a message at the start of Build.run.
Can be overriden by subclass.
Typically subclasses will call
super().print_start_build_msg before adding their own
print statements.
"""
print('Starting Build for project: {}'.format(self.project))
print('Target OS: {}'.format(self.platform.capitalize()))
print('Flavor: {}'.format(self.flavor))
print('Config: {}'.format(self.config))
print('Subproject: {}'.format(self.project))
print('CMake Target: {}'.format(self.cmake_target))
print('CMake Generator: {}'.format(self.generator))
print('Clean: {}'.format(self.is_clean_build))
print('Source Path: {}'.format(self.source_path))
print('Build Path: {}'.format(self.build_path))
def print_end_build_msg(self):
"""Prints a message at the end of Build.run."""
print('Build Complete')
def print_times(self):
print('Time to clean: {}'.format(self.timeToClean))
print('Time to generate: {}'.format(self.timeToGenerate))
print('Time to build: {}'.format(self.timeToBuild))
def clean(self):
"""Deletes the output directory(s) for this Build."""
build_path = self.build_path
print('Purging: {}'.format(build_path))
start_time = timeit.default_timer()
if os.path.exists(build_path):
shutil.rmtree(build_path)
self.timeToClean = timeit.default_timer() - start_time
def generate(self):
"""Executes the generate phase of the build."""
# Only Windows versions of cmake have
# -S <source dir> or -B <build dir> options.
# To support cmake on all platforms,
# we need to create and change to our build output dir.
original_dir = os.getcwd()
os.makedirs(self.build_path, exist_ok=True)
os.chdir(self.build_path)
generate_command = self.create_generate_command()
start_time = timeit.default_timer()
run_command(generate_command)
self.timeToGenerate = timeit.default_timer() - start_time
os.chdir(original_dir)
def create_generate_command(self):
"""Creates the command to use in the generate phase.
Subclasses can override this method,
but most likely subclasses will want to
override generate_options instead.
Returns:
The generate command as a list of strings.
"""
return ['cmake', self.source_path] + self.generate_options
@property
def generate_options(self):
"""Additional options to use in generate.
Can be overriden by subclass.
Typically subclasses will call
super().generate_options + ['--foo', 'My option value']
to add their own options to the generate_command list.
Returns:
The list of additional generate options.
"""
generate_options = []
if self.generator:
generate_options.extend(['-G', self.generator])
if self.config == "debug":
generate_options.extend(["-DCMAKE_BUILD_TYPE=Debug"])
else:
generate_options.extend(["-DCMAKE_BUILD_TYPE=Release"])
# All pipelines perform a clean build so timestamp will get refreshed
# even though we can pass this only to the generate phase.
generate_options.extend(['-DDO_BUILD_TIMESTAMP={}'.format(self.build_time)])
if self.static_analysis:
generate_options.extend(["-DCMAKE_CXX_CPPLINT=cpplint"])
return generate_options
def build(self):
"""Executes the build phase of the build."""
build_command = self.create_build_command()
print('Executing: {}'.format(' '.join(build_command)))
start_time = timeit.default_timer()
run_command(build_command)
self.timeToBuild = timeit.default_timer() - start_time
def create_build_command(self):
"""Creates the command to use in the build phase.
Subclasses can override this method,
but most likely subclasses will want to
override build_options instead.
Returns:
The build command as a list of strings.
"""
return ['cmake', '--build', self.build_path] + self.build_options
@property
def build_options(self):
"""Additional options to use in build.
Can be overriden by subclass.
Typically subclasses will call
super().build_options + ['--foo', 'My option value'].
Returns:
The list of additional build options.
"""
return ["--target", self.cmake_target]
def package(self):
subprocess.call(['/bin/bash', '-c', 'cd {} && cpack .'.format(self.build_path)])
class LinuxBuildRunner(BuildRunnerBase):
"""Linux BuildRunner class."""
def __init__(self, script_args):
super().__init__(script_args)
@property
def platform(self):
return 'linux'
@property
def generate_options(self):
generate_options = super().generate_options
if self.project:
generate_options.extend([DOCLIENT_SUBPROJECT_BUILD_MAP[self.project]])
if self.package_type:
if self.package_type in ["deb", "debian"]:
generate_options.extend(["-DDO_PACKAGE_TYPE=DEB"])
elif self.package_type == "rpm":
generate_options.extend(["-DDO_PACKAGE_TYPE=RPM"])
else:
raise ValueError('{} is not a supported package_type'.format(self.package_type))
if self.skip_tests:
generate_options.extend(["-DDO_BUILD_TESTS=OFF"])
return generate_options
#endregion BuildRunner Classes
#region Util Functions
def get_os_name():
"""Gets the friendly OS name.
This value can differ for local builds vs pipeline builds.
Returns:
The friendly version of the OS Name.
"""
if get_env_var('AGENT_OS'):
return get_env_var('AGENT_OS').lower()
else:
return sys.platform.lower()
def is_running_on_linux():
"""Indicates if this build is running on a Linux agent/machine
Returns:
True if the build is running on a Linux agent/machine.
False otherwise.
"""
return get_os_name().startswith('linux')
def get_project_root_path():
"""Gets the root path to our git repo.
Note that this function may return a different value
than what is expected after calling os.chdir.
Returns:
The root path to our git repo.
"""
script_path = os.path.dirname(os.path.realpath(__file__))
print('script_path={}'.format(script_path))
return os.path.abspath(os.path.join(script_path, '..'))
def get_cmake_files_path(root_path=None):
"""Gets the path to custom cmake 'include' files for our build
TODO(shishirb) unused method
Args:
root_path (str):
The project root path.
If None, uses get_project_root_path() instead.
Returns:
The path to our custom cmake 'include' files.
"""
if root_path is None:
root_path = get_project_root_path()
return os.path.abspath(os.path.join(root_path, 'build', 'cmake'))
def get_default_build_path(project, flavor=None):
"""Gets the default path to the build folder.
Uses the 'flavor' property to construct the path if available.
Args:
flavor (str):
The unique flavor string for the build.
Returns:
The default bin path.
"""
build_path = os.path.join(tempfile.gettempdir(), "build-deliveryoptimization-" + project, flavor)
return build_path
def get_env_var(name):
"""Gets the environment variable value or None.
Utility function to get an environment variable value
given the name of the environment variable.
Returns None if the environment variable is not set/present.
Args:
name (str):
The name of the environment variable.
Returns:
The value of the environment variable with name.
None if the environment variable is not set/present.
"""
if name.upper() in os.environ:
return os.environ[name.upper()]
else:
return None
def run_command(command):
"""Runs the given command.
Args:
command (list):
The command to run in list form.
Raises:
subprocess.CalledProcessError
"""
command_string = ' '.join(command)
try:
print('Running command {}.'.format(command_string))
_check_call(command)
except subprocess.CalledProcessError:
print('Running {} failed. Rethrowing exception'.format(command_string))
raise
def _check_call(command):
"""Wrapper around subprocess.check_call.
Handles piping output in various cases:
- Verbose logging turned on/off.
Args:
command (list):
The command to run in list form.
Raises:
subprocess.CalledProcessError
"""
# We pipe stderr to stdout because
# some commands (like apt) write output to stderr,
# but that output should not cause a failure
# in the pipeline build job.
global VERBOSE
if VERBOSE:
subprocess.check_call(
command,
stderr=subprocess.STDOUT
)
else:
subprocess.check_call(
command,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL
)
#endregion Util Functions
if __name__ == "__main__":
main()

Просмотреть файл

@ -0,0 +1,45 @@
# Dockerfile for building DO apt plugin for linux-arm (32bit).
# First, install the docker extension for VSCode. Then you can right-click on this file
# and choose Build Image. Give it a name and it will build the image.
#
# Open interactive terminal into the image in a container:
# docker run -ti --rm --entrypoint=/bin/bash -v <project root dir>:/code -v <build root dir>:/build <image_name>
# Example:
# docker run -ti --rm --entrypoint=/bin/bash -v D:\do-client-lite:/code -v D:\temp\build_client_lite\arm-linux-debug:/build custom-debian10-arm32
FROM arm32v7/debian:buster
SHELL [ "/bin/bash", "-c"]
# QEMU is a Linux emulator which enables cross-arch support in docker
# In order to build this image on a Linux host, need to install QEMU:
#
# sudo apt-get install qemu-user
# update-binfmts --display
# sudo apt install qemu binfmt-support qemu-user-static
# cp /usr/bin/qemu-arm-static <src root>/build/docker/arm32/debian10
#
# Then copy the build script to the build directory
# cp <src root>/build/bootstrap/bootstrap-debian-10.sh <src root>build/docker/arm32/debian10
#
# After running the above, you can build the image by running in the current dockerfile directory
# sudo docker build -t <your image name> . --no-cache --network=host
COPY qemu-arm-static /usr/bin/qemu-arm-static
COPY bootstrap-debian-10.sh /tmp/bootstrap-debian-10.sh
WORKDIR /tmp/
RUN chmod +x bootstrap-debian-10.sh
RUN ./bootstrap-debian-10.sh
VOLUME /code
WORKDIR /code
ENTRYPOINT [ "/bin/bash", "-c" ]
# We specify an empty command so that we can pass options to the ENTRYPOINT command.
# This is a bit of a Dockerfile quirk where if the ENTRYPOINT value is defined,
# then CMD becomes the default options passed to ENTRYPOINT.
# In this case we don't have any desired default arguments.
# However, we have to specify CMD to enable passing of command line parameters to ENTRYPOINT in the first place.
CMD [ ]

Просмотреть файл

@ -0,0 +1,45 @@
# Dockerfile for building DO apt plugin for linux-arm (32bit).
# First, install the docker extension for VSCode. Then you can right-click on this file
# and choose Build Image. Give it a name and it will build the image.
#
# Open interactive terminal into the image in a container:
# docker run -ti --rm --entrypoint=/bin/bash -v <project root dir>:/code -v <build root dir>:/build <image_name>
# Example:
# docker run -ti --rm --entrypoint=/bin/bash -v D:\do-client-lite:/code -v D:\temp\build_client_lite\arm-linux-debug:/build custom-debian9-arm32
FROM arm32v7/debian:stretch
SHELL [ "/bin/bash", "-c"]
# QEMU is a Linux emulator which enables cross-arch support in docker
# In order to build this image on a Linux host, need to install QEMU:
#
# sudo apt-get install qemu-user
# update-binfmts --display
# sudo apt install qemu binfmt-support qemu-user-static
# cp /usr/bin/qemu-arm-static <src root>/build/docker/arm32/debian9
#
# Then copy the build script to the build directory
# cp <src root>/build/bootstrap/bootstrap-debian-9.sh <src root>build/docker/arm32/debian9
#
# After running the above, you can build the image by running in the current dockerfile directory
# sudo docker build -t <your image name> . --no-cache --network=host
COPY qemu-arm-static /usr/bin/qemu-arm-static
COPY bootstrap-debian-9.sh /tmp/bootstrap-debian-9.sh
WORKDIR /tmp/
RUN chmod +x bootstrap-debian-9.sh
RUN ./bootstrap-debian-9.sh
VOLUME /code
WORKDIR /code
ENTRYPOINT [ "/bin/bash", "-c"]
# We specify an empty command so that we can pass options to the ENTRYPOINT command.
# This is a bit of a Dockerfile quirk where if the ENTRYPOINT value is defined,
# then CMD becomes the default options passed to ENTRYPOINT.
# In this case we don't have any desired default arguments.
# However, we have to specify CMD to enable passing of command line parameters to ENTRYPOINT in the first place.
CMD [ ]

Просмотреть файл

@ -0,0 +1,42 @@
# Dockerfile for building DO apt plugin for linux-arm (64bit).
# First, install the docker extension for VSCode. Then you can right-click on this file
# and choose Build Image. Give it a name and it will build the image.
#
# Open interactive terminal into the image in a container:
# docker run -ti --rm --entrypoint=/bin/bash -v <project root dir>:/code -v <build root dir>:/build <image_name>
# Example:
# docker run -ti --rm --entrypoint=/bin/bash -v D:\do-client-lite:/code -v D:\temp\build_client_lite\arm-linux-debug:/build custom-ubuntu18.04-arm64
FROM arm64v8/ubuntu:18.04
SHELL [ "/bin/bash", "-c"]
# QEMU is a Linux emulator which enables cross-arch support in docker
# In order to build this image on a Linux host, need to install QEMU:
#
# sudo apt-get install qemu-user
# update-binfmts --display
# sudo apt install qemu binfmt-support qemu-user-static
# cp /usr/bin/qemu-aarch64-static <src root>/build/docker/arm64/ubuntu18.04
#
# Then copy the build script to the build directory
# cp <src root>/build/bootstrap/bootstrap-ubuntu-18.04.sh <src root>build/docker/arm64/ubuntu18.04
#
# After running the above, you can build the image by running in the current dockerfile directory
# sudo docker build -t <your image name> . --no-cache --network=host
COPY qemu-aarch64-static /usr/bin/qemu-aarch64-static
COPY bootstrap-ubuntu-18.04.sh /tmp/bootstrap-ubuntu-18.04.sh
WORKDIR /tmp/
RUN chmod +x bootstrap-ubuntu-18.04.sh
RUN ./bootstrap-ubuntu-18.04.sh
ENTRYPOINT [ "/bin/bash", "-c"]
# We specify an empty command so that we can pass options to the ENTRYPOINT command.
# This is a bit of a Dockerfile quirk where if the ENTRYPOINT value is defined,
# then CMD becomes the default options passed to ENTRYPOINT.
# In this case we don't have any desired default arguments.
# However, we have to specify CMD to enable passing of command line parameters to ENTRYPOINT in the first place.
CMD [ ]

Просмотреть файл

@ -0,0 +1,20 @@
#! /bin/bash
# $1 = path to source code
# $2 = debug or release
set -e
echo "Building apt plugin within Docker on Linux container"
echo "Building & Installing sdk from source"
cd $1
python3 build/build.py --project sdk --cmaketarget deliveryoptimization --config $2 --generator "Unix Makefiles" --clean
cd /tmp/build-deliveryoptimization-sdk/linux-$2/
cmake --build . --target install
cd $1
echo "Building linux-apt plugin from source"
python3 build/build.py --project plugin-apt --config $2 --package-for debian --generator "Unix Makefiles" --clean
echo "Build of doclient-plugin completed"

164
client-lite/CMakeLists.txt Normal file
Просмотреть файл

@ -0,0 +1,164 @@
if (NOT DOSVC_BIN_NAME)
message (FATAL_ERROR "Agent daemon name not defined")
endif ()
project (${DOSVC_BIN_NAME} VERSION 0.4.0)
option (DO_PROXY_SUPPORT "Set DO_PROXY_SUPPORT to OFF to turn off proxy support for downloads and thus remove dependency on libproxy." ON)
add_definitions(-DBOOST_ALL_DYN_LINK=1)
# Get full debug info and also define DEBUG macro in debug builds
string(TOLOWER ${CMAKE_BUILD_TYPE} DO_BUILD_TYPE)
if (DO_BUILD_TYPE MATCHES debug)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DDEBUG")
endif()
# -Wno-noexcept-type, the offending function is SetResultLoggingCallback, this warning is fixed in C++17 because exception specification
# is part of a function type. Since the offending function is not public when compiled into docs_common just add the compiler flag here
# to disable the warning.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-noexcept-type")
fixup_compile_options_for_arm()
function (target_link_dl_lib target)
if (CMAKE_CXX_COMPILER MATCHES arm-linux OR CMAKE_CXX_COMPILER MATCHES aarch64-linux)
# Required for static linking openssl on peabody.
# Provides dynamic library functions like dlopen.
find_library (DL_LIB NAMES dl)
message("Found DL Lib for ${target}: ${DL_LIB}")
target_link_libraries(${target} ${DL_LIB})
endif ()
endfunction ()
# Include external libraries here:
find_package(Boost COMPONENTS log filesystem REQUIRED)
# Cpprest Issues:
# 1. v2.10.10 min version (see required PR link below). cpprestsdk does not seem to support specifying this through cmake.
# https://github.com/microsoft/cpprestsdk/pull/1019/files
#
# 2. Installing libcpprest-dev via apt installs cpprest's cmake config files to a non-default cmake search path before v2.10.9
# See: https://github.com/microsoft/cpprestsdk/issues/686
# This issue has been patched but has not made its way to the stable branch for Ubuntu
# Since we are statically linking to v2.10.16 we no longer need to worry about the above as cpprest is patched to provide the proper package configuration metadata
find_package(cpprestsdk CONFIG REQUIRED)
if (DO_PROXY_SUPPORT)
message (STATUS "Proxy support requested. Will look for libproxy.")
find_package(libproxy REQUIRED)
else ()
message (STATUS "Proxy support not requested.")
endif ()
find_path(GSL_INCLUDE_DIR gsl)
if (GSL_INCLUDE_DIR STREQUAL "GSL_INCLUDE_DIR-NOTFOUND")
message(FATAL_ERROR "Could not find MS Guidelines Support Library.")
endif()
set (docs_common_includes
${include_directories_for_arm}
${GSL_INCLUDE_DIR}
"${PROJECT_SOURCE_DIR}/src/config"
"${PROJECT_SOURCE_DIR}/src/download"
"${PROJECT_SOURCE_DIR}/src/include"
"${PROJECT_SOURCE_DIR}/src/ipc"
"${PROJECT_SOURCE_DIR}/src/threading"
"${PROJECT_SOURCE_DIR}/src/trace"
"${PROJECT_SOURCE_DIR}/src/util"
)
# Please maintain this list in alphabetical order.
file (GLOB files_docs_common
src/config/*.cpp
src/download/*.cpp
src/ipc/*.cpp
src/threading/*.cpp
src/trace/*.cpp
src/util/*.cpp
)
# Dir path used for docs_common and debian control scripts
set(docs_svc_config_dir_path "/etc/${DOSVC_BIN_NAME}")
set(docs_svc_persistence_dir_path "/var/cache/${DOSVC_BIN_NAME}")
set(docs_svc_run_dir_path "/var/run/${DOSVC_BIN_NAME}")
# Build product files into a lib for use by other targets
add_library(docs_common STATIC ${files_docs_common})
target_compile_definitions(docs_common
PRIVATE DO_CONFIG_DIRECTORY_PATH="${docs_svc_config_dir_path}"
DO_PERSISTENCE_DIRECTORY_PATH="${docs_svc_persistence_dir_path}"
DO_RUN_DIRECTORY_PATH="${docs_svc_run_dir_path}"
)
target_include_directories(docs_common PUBLIC ${docs_common_includes})
if (DO_PROXY_SUPPORT)
target_compile_definitions(docs_common PRIVATE DO_PROXY_SUPPORT)
target_link_libraries(docs_common PUBLIC libproxy::proxy)
endif ()
target_link_dl_lib(docs_common)
# Include excutables entry point
file (GLOB files_docs_exe
src/exe/*.cpp
)
add_do_version_lib(${PROJECT_NAME} ${PROJECT_VERSION})
# Add the executable
add_executable(${DOSVC_BIN_NAME} ${files_docs_exe})
target_include_directories(${DOSVC_BIN_NAME} PRIVATE ${docs_common_includes})
target_link_libraries(${DOSVC_BIN_NAME}
doversion
docs_common
cpprestsdk::cpprest
${Boost_LIBRARIES})
if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux")
include(GNUInstallDirs)
# Install binary
set(DO_INSTALL_BINDIR ${CMAKE_INSTALL_FULL_BINDIR})
install(TARGETS ${DOSVC_BIN_NAME} DESTINATION ${DO_INSTALL_BINDIR})
if(DO_PACKAGE_TYPE)
message(STATUS "Packaging for ${DO_PACKAGE_TYPE}")
set_common_cpack_vars(${DOSVC_BIN_NAME} "Delivery Optimization downloader with Microsoft Connected Cache support")
# Packaging scripts and variables required for them
set(docs_svc_name "${DOSVC_BIN_NAME}.service")
set(docs_svc_bin_path ${DO_INSTALL_BINDIR}/${DOSVC_BIN_NAME})
set(docs_systemd_cfg_path "/etc/systemd/system/${docs_svc_name}")
set(do_user "do")
set(do_group "do")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/build/postinst.in.sh
${CMAKE_CURRENT_BINARY_DIR}/packaging/postinst @ONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/build/prerm.in.sh
${CMAKE_CURRENT_BINARY_DIR}/packaging/prerm @ONLY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/build/postrm.in.sh
${CMAKE_CURRENT_BINARY_DIR}/packaging/postrm @ONLY)
if (DO_PACKAGE_TYPE STREQUAL "DEB")
# Install/remove scripts
set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${CMAKE_CURRENT_BINARY_DIR}/packaging/postinst;${CMAKE_CURRENT_BINARY_DIR}/packaging/prerm;${CMAKE_CURRENT_BINARY_DIR}/packaging/postrm")
elseif (DO_PACKAGE_TYPE STREQUAL "RPM")
# Install/Uninstall scripts
set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE ${CMAKE_CURRENT_BINARY_DIR}/packaging/postinst)
set(CPACK_RPM_PRE_UNINSTALL_SCRIPT_FILE ${CMAKE_CURRENT_BINARY_DIR}/packaging/prerm)
set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE ${CMAKE_CURRENT_BINARY_DIR}/packaging/postrm)
# Automatic dependency detection is enabled by default in the rpm generator
endif()
include(CPack)
endif()
endif() # Linux
if(DO_BUILD_TESTS)
add_subdirectory (test)
endif()

Просмотреть файл

@ -0,0 +1,73 @@
#!/bin/bash
do_group_name=@do_group@
do_user_name=@do_user@
config_path=@docs_svc_config_dir_path@
persistence_path=@docs_svc_persistence_dir_path@
svc_name=@docs_svc_name@
svc_config_path=@docs_systemd_cfg_path@
svc_bin_path=@docs_svc_bin_path@
# Exit early to fail the install if any command here fails
set -e
echo "Running post-install script for $svc_name"
if [ ! -f $svc_bin_path ]; then echo "docs binary cannot be found"; exit 1; fi
if ! getent group $do_group_name > /dev/null; then
addgroup --system $do_group_name
fi
if ! getent passwd $do_user_name > /dev/null; then
adduser --system $do_user_name --ingroup $do_group_name --shell /bin/false
fi
# Add each admin user to the do group - for systems installed before Ubuntu 12.04 LTS (Precise Pangolin)
# Use sed to parse the user name from a string in the form group:passwd:gid:member
for u in $(getent group admin | sed -e "s/^.*://" -e "s/,/ /g"); do
adduser "$u" $do_group_name > /dev/null || true
done
# Add each sudo user to the do group
# Use sed to parse the user name from a string in the form group:passwd:gid:member
for u in $(getent group sudo | sed -e "s/^.*://" -e "s/,/ /g"); do
adduser "$u" $do_group_name > /dev/null || true
done
configure_dir() {
local dir_path="$1"
echo "Configuring dir: $dir_path"
if [ ! -d $dir_path ]; then
mkdir $dir_path
fi
chgrp -R $do_group_name $dir_path
chown $do_user_name $dir_path
chmod g+w $dir_path
}
configure_dir "$config_path"
configure_dir "$persistence_path"
# See https://www.freedesktop.org/software/systemd/man/systemd.directives.html
echo "Installing $svc_name"
cat > ${svc_config_path} << EOF
[Unit]
Description=$svc_name: Performs content delivery optimization tasks
[Service]
ExecStart=$svc_bin_path
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
echo "Service conf stored at: $svc_config_path"
echo "Service bin located at: $svc_bin_path"
echo "Reloading systemd daemon list and enabling $svc_name"
systemctl daemon-reload
systemctl enable $svc_name
systemctl stop $svc_name > /dev/null # stop if already running
systemctl start $svc_name
echo "Done!"

Просмотреть файл

@ -0,0 +1,36 @@
#!/bin/bash
do_group_name=@do_group@
do_user_name=@do_user@
config_path=@docs_svc_config_dir_path@
persistence_path=@docs_svc_persistence_dir_path@
run_path=@docs_svc_run_dir_path@
svc_name=@docs_svc_name@
svc_config_path=@docs_systemd_cfg_path@
echo "Running post-removal script for $svc_name"
systemctl reset-failed $svc_name # Remove ourselves from the failed services list. No-op if never failed earlier.
echo "Removing systemd unit file: $svc_config_path"
rm $svc_config_path
echo "Reloading daemons"
systemctl daemon-reload
echo "Removing working directory: $persistence_path"
rm -rf $persistence_path
echo "Removing config directory: $config_path"
rm -rf $config_path
echo "Removing run directory: $run_path"
rm -rf $run_path
echo "Removing user: ${do_user_name}"
userdel ${do_user_name}
echo "Removing group: ${do_group_name}"
groupdel ${do_group_name}
echo "Done!"

Просмотреть файл

@ -0,0 +1,11 @@
#!/bin/bash
svc_name=@docs_svc_name@
echo "Running pre-removal script for $svc_name"
echo "Stopping and disabling service $svc_name"
systemctl stop $svc_name
systemctl disable $svc_name
echo "Done!"

Просмотреть файл

@ -0,0 +1,29 @@
#pragma once
#include <chrono>
constexpr auto g_mccHostBanInterval = std::chrono::minutes(5);
constexpr auto g_progressTrackerCheckInterval = std::chrono::seconds(10);
constexpr UINT g_progressTrackerMaxNoProgressIntervals = 30;
constexpr UINT g_progressTrackerMaxNoProgressIntervalsWithMCC = 6;
constexpr auto g_progressTrackerMaxRetryDelay = std::chrono::seconds(30);
// Provides a wait time of ~49 days which should be sufficient for uses
// of steady_clock (timing, event waits, condition_variable waits).
constexpr auto g_steadyClockInfiniteWaitTime = std::chrono::milliseconds(MAXUINT);
#define DEF_TRACE_FILE_MAXSIZE_BYTES (8U * 1024 * 1024)
#define DEF_TRACE_FOLDER_MAXSIZE_BYTES (DEF_TRACE_FILE_MAXSIZE_BYTES * 2)
#define DEF_TRACE_LEVEL boost::log::trivial::trace
const char* const ConfigName_AduIoTConnectionString = "ADUC_IoTConnectionString";
const char* const ConfigName_CacheHostFallbackDelayBgSecs = "DODelayCacheServerFallbackBackground";
const char* const ConfigName_CacheHostFallbackDelayFgSecs = "DODelayCacheServerFallbackForeground";
constexpr auto g_cacheHostFallbackDelayDefault = std::chrono::seconds(0); // default: immediate fallback
constexpr auto g_cacheHostFallbackDelayNoFallback = std::chrono::seconds(-1); // fallback to CDN not allowed
const char* const ConfigName_CacheHostServer = "DOCacheHost";
const char* const ConfigName_RestControllerValidateRemoteAddr = "RestControllerValidateRemoteAddr";
constexpr auto g_RestControllerValidateRemoteAddrDefault = true; // default: enabled

Просмотреть файл

@ -0,0 +1,54 @@
#include "do_common.h"
#include "config_manager.h"
#include "config_defaults.h"
#include "do_persistence.h"
#include "string_ops.h"
ConfigManager::ConfigManager() :
ConfigManager(docli::GetAdminConfigFilePath(), docli::GetSDKConfigFilePath())
{
}
// Used by unit tests to override config paths
ConfigManager::ConfigManager(const std::string& adminConfigPath, const std::string& sdkConfigPath) :
_adminConfigs(adminConfigPath),
_sdkConfigs(sdkConfigPath)
{
}
std::chrono::seconds ConfigManager::CacheHostFallbackDelay()
{
std::chrono::seconds returnValue = g_cacheHostFallbackDelayDefault;
// We don't yet differentiate between background and foreground downloads, so check both configs
boost::optional<UINT> delay = _adminConfigs.Get<UINT>(ConfigName_CacheHostFallbackDelayBgSecs);
if (!delay)
{
delay = _adminConfigs.Get<UINT>(ConfigName_CacheHostFallbackDelayFgSecs);
}
if (delay)
{
returnValue = std::chrono::seconds(delay.get());
}
return returnValue;
}
std::string ConfigManager::CacheHostServer()
{
boost::optional<std::string> cacheHostServer = _adminConfigs.Get<std::string>(ConfigName_CacheHostServer);
return boost::get_optional_value_or(cacheHostServer, std::string{});
}
std::string ConfigManager::IoTConnectionString()
{
boost::optional<std::string> connectionString = _sdkConfigs.Get<std::string>(ConfigName_AduIoTConnectionString);
return boost::get_optional_value_or(connectionString, std::string{});
}
bool ConfigManager::RestControllerValidateRemoteAddr()
{
boost::optional<bool> validateRemoteAddr = _adminConfigs.Get<bool>(ConfigName_RestControllerValidateRemoteAddr);
return boost::get_optional_value_or(validateRemoteAddr, g_RestControllerValidateRemoteAddrDefault);
}

Просмотреть файл

@ -0,0 +1,19 @@
#pragma once
#include "do_json_parser.h"
class ConfigManager
{
public:
ConfigManager();
ConfigManager(const std::string& adminConfigPath, const std::string& sdkConfigPath);
std::chrono::seconds CacheHostFallbackDelay();
std::string CacheHostServer();
std::string IoTConnectionString();
bool RestControllerValidateRemoteAddr();
private:
JsonParser _adminConfigs;
JsonParser _sdkConfigs;
};

Просмотреть файл

@ -0,0 +1,105 @@
#include "do_common.h"
#include "mcc_manager.h"
#include "config_defaults.h"
#include "config_manager.h"
static std::string GetHostNameFromIoTConnectionString(const char* connectionString)
{
DoLogDebug("Parsing connection string: %s", connectionString);
// Format: HostName=<iothub_host_name>;DeviceId=<device_id>;SharedAccessKey=<device_key>;GatewayHostName=<edge device hostname>
static const char* toFind = "GatewayHostName=";
const char* start = strcasestr(connectionString, toFind);
if (start == NULL)
{
DoLogDebug("GatewayHostName not found in %s", connectionString);
return {};
}
start = start + strlen(toFind);
std::string hostname;
const char* end = strchr(start, ';');
if (end == NULL)
{
hostname = start;
}
else
{
hostname.assign(start, start + (end - start));
}
return hostname;
}
MCCManager::MCCManager(ConfigManager& sdkConfigs):
_configManager(sdkConfigs)
{
}
std::string MCCManager::NextHost()
{
std::string mccHostName =_configManager.CacheHostServer();
if (mccHostName.empty())
{
const std::string connString = _configManager.IoTConnectionString();
if (!connString.empty())
{
mccHostName = GetHostNameFromIoTConnectionString(connString.data());
}
}
if (!mccHostName.empty())
{
if (_banList.IsBanned(mccHostName))
{
_hosts.erase(mccHostName);
mccHostName.clear();
}
else
{
// Record the time of when we first handed out this host
if (_hosts.find(mccHostName) == _hosts.end())
{
_hosts[mccHostName] = std::chrono::steady_clock::now();
}
}
}
DoLogVerbose("Returning MCC host: [%s]", mccHostName.data());
return mccHostName;
}
bool MCCManager::NoFallback() const
{
return (_configManager.CacheHostFallbackDelay() == g_cacheHostFallbackDelayNoFallback);
}
// Returns true if fallback to original URL is due now, false otherwise
bool MCCManager::ReportHostError(HRESULT hr, const std::string& host)
{
const bool fallbackDue = _IsFallbackDue(host);
DoLogWarningHr(hr, "ACK error from MCC host: [%s], fallback due? %d", host.data(), fallbackDue);
if (fallbackDue)
{
_banList.Report(host, g_mccHostBanInterval);
}
return fallbackDue;
}
bool MCCManager::_IsFallbackDue(const std::string& host) const
{
auto it = _hosts.find(host);
DO_ASSERT(it != _hosts.end());
const auto timeFirstHandedOut = it->second;
const auto fallbackDelay = _configManager.CacheHostFallbackDelay();
if (fallbackDelay == g_cacheHostFallbackDelayNoFallback)
{
// No fallback, so don't ban this host.
// Will have to rework this when there can be multiple MCC hosts.
return false;
}
// Fallback is due if the delay interval has passed since the first time this host was handed out
return (timeFirstHandedOut + fallbackDelay) <= std::chrono::steady_clock::now();
}

Просмотреть файл

@ -0,0 +1,24 @@
#pragma once
#include <chrono>
#include <unordered_map>
#include "ban_list.h"
class ConfigManager;
class MCCManager
{
public:
MCCManager(ConfigManager& configManager);
std::string NextHost();
bool NoFallback() const;
bool ReportHostError(HRESULT hr, const std::string& host);
private:
bool _IsFallbackDue(const std::string& host) const;
ConfigManager& _configManager;
std::unordered_map<std::string, std::chrono::steady_clock::time_point> _hosts;
CBanList _banList;
};

Просмотреть файл

@ -0,0 +1,49 @@
#include "do_common.h"
#include "network_monitor.h"
#include <errno.h>
#include <ifaddrs.h>
static const char* g_publicIfNames[] = { "eth", "wlan" };
bool NetworkMonitor::IsConnected()
{
struct ifaddrs* ifaddr;
if (getifaddrs(&ifaddr) == -1)
{
DoLogError("getifaddrs() failed, errno: %d", errno);
return true;
}
for (struct ifaddrs* ifa = ifaddr; ifa != nullptr; ifa = ifa->ifa_next)
{
if (ifa->ifa_addr == nullptr)
{
continue;
}
int family = ifa->ifa_addr->sa_family;
if ((family != AF_INET) && (family != AF_INET6))
{
continue;
}
for (auto ifname : g_publicIfNames)
{
auto foundPos = strcasestr(ifa->ifa_name, ifname);
if ((foundPos != nullptr) && (foundPos == ifa->ifa_name))
{
DoLogInfo("Network connectivity detected. Interface: %s, address family: %d%s.",
ifa->ifa_name, family,
(family == AF_INET) ? " (AF_INET)" :
(family == AF_INET6) ? " (AF_INET6)" : "");
freeifaddrs(ifaddr);
return true;
}
}
}
DoLogWarning("No network connectivity detected");
freeifaddrs(ifaddr);
return false;
}

Просмотреть файл

@ -0,0 +1,8 @@
#pragma once
class NetworkMonitor
{
public:
static bool IsConnected();
};

Просмотреть файл

@ -0,0 +1,607 @@
#include "do_common.h"
#include "download.h"
#include <boost/filesystem.hpp>
#include <cpprest/uri.h>
#include "do_error.h"
#include "event_data.h"
#include "mcc_manager.h"
#include "network_monitor.h"
#include "http_agent.h"
#include "string_ops.h"
#include "task_thread.h"
#include "telemetry_logger.h"
const std::chrono::seconds Download::_unsetTimeout = std::chrono::seconds(0);
static std::string SwapUrlHostNameForMCC(const std::string& url, const std::string& newHostname, UINT16 port = INTERNET_DEFAULT_PORT);
Download::Download(MCCManager& mccManager, TaskThread& taskThread, std::string url, std::string destFilePath) :
_mccManager(mccManager),
_taskThread(taskThread),
_url(std::move(url)),
_destFilePath(std::move(destFilePath))
{
if (!_url.empty())
{
THROW_HR_IF(INET_E_INVALID_URL, !HttpAgent::ValidateUrl(_url));
}
_id = CreateNewGuid();
DoLogInfo("%s, new download, url: %s, dest: %s", GuidToString(_id).data(), _url.data(), _destFilePath.data());
}
Download::~Download()
{
_CancelTasks();
}
void Download::Start()
{
_PerformStateChange(DownloadState::Transferring);
EventDataDownloadStarted event(*this);
TelemetryLogger::getInstance().TraceDownloadStart(event);
}
void Download::Pause()
{
_PerformStateChange(DownloadState::Paused);
EventDataDownloadPaused event(*this);
TelemetryLogger::getInstance().TraceDownloadPaused(event);
}
void Download::Finalize()
{
_PerformStateChange(DownloadState::Finalized);
EventDataDownloadCompleted event(*this);
TelemetryLogger::getInstance().TraceDownloadCompleted(event);
}
void Download::Abort()
{
_PerformStateChange(DownloadState::Aborted);
EventDataDownloadCanceled event(*this);
TelemetryLogger::getInstance().TraceDownloadCanceled(event);
}
void Download::SetProperty(DownloadProperty key, const std::string& value)
{
DoLogInfo("%s, %d = %s", GuidToString(_id).data(), static_cast<int>(key), value.data());
switch (key)
{
case DownloadProperty::Id:
THROW_HR(DO_E_READ_ONLY_PROPERTY);
break;
case DownloadProperty::Uri:
{
THROW_HR_IF(E_INVALIDARG, value.empty());
THROW_HR_IF(INET_E_INVALID_URL, !HttpAgent::ValidateUrl(value));
THROW_HR_IF(DO_E_INVALID_STATE, (_status.State != DownloadState::Created) && (_status.State != DownloadState::Paused));
const bool fUrlChanged = (value != _url);
_url = value;
if ((_status.State == DownloadState::Paused) && fUrlChanged)
{
DoLogInfo("%s, URL changed, reset progress tracker and proxy list", GuidToString(_id).data());
_progressTracker.Reset();
_proxyList.Refresh(_url);
}
break;
}
case DownloadProperty::LocalPath:
THROW_HR_IF(E_INVALIDARG, value.empty());
THROW_HR_IF(DO_E_INVALID_STATE, _status.State != DownloadState::Created);
_destFilePath = value;
break;
case DownloadProperty::NoProgressTimeoutSeconds:
{
const auto timeout = std::chrono::seconds(docli::string_conversions::ToUInt(value));
THROW_HR_IF(E_INVALIDARG, timeout < g_progressTrackerCheckInterval);
_noProgressTimeout = timeout;
break;
}
default:
DO_ASSERT(false);
break;
}
}
std::string Download::GetProperty(DownloadProperty key) const
{
DoLogInfo("%s, key: %d", GuidToString(_id).data(), static_cast<int>(key));
switch (key)
{
case DownloadProperty::Id:
return GuidToString(_id);
case DownloadProperty::Uri:
return _url;
case DownloadProperty::LocalPath:
return _destFilePath;
case DownloadProperty::NoProgressTimeoutSeconds:
return std::to_string(_noProgressTimeout.count());
default:
DO_ASSERT(false);
return {};
}
}
DownloadStatus Download::GetStatus() const
{
TelemetryLogger::getInstance().TraceDownloadStatus({*this});
return _status;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wswitch"
// This method handles state change requests from external caller (through the REST interface)
void Download::_PerformStateChange(DownloadState newState)
{
DO_ASSERT((newState != DownloadState::Created) && (newState != DownloadState::Transferred));
DoLogVerbose("%s, state change request %d --> %d", GuidToString(_id).data(), _status.State, newState);
if (_status.State == DownloadState::Created)
{
switch (newState)
{
case DownloadState::Transferring:
_Start();
_status._Transferring();
break;
case DownloadState::Paused: // no-op
break;
case DownloadState::Finalized:
THROW_HR(DO_E_INVALID_STATE);
break;
case DownloadState::Aborted:
_Abort();
_status._Aborted();
break;
}
}
else if (_status.State == DownloadState::Transferring)
{
switch (newState)
{
case DownloadState::Transferring: // no-op
break;
case DownloadState::Paused:
_Pause();
_status._Paused();
break;
case DownloadState::Finalized:
THROW_HR(DO_E_INVALID_STATE);
break;
case DownloadState::Aborted:
_Abort();
_status._Aborted();
break;
}
}
else if (_status.State == DownloadState::Transferred)
{
switch (newState)
{
case DownloadState::Transferring:
case DownloadState::Paused:
break; // no-op
case DownloadState::Finalized:
_Finalize();
_status._Finalized();
break;
case DownloadState::Aborted:
_Abort();
_status._Aborted();
break;
}
}
else if (_status.State == DownloadState::Paused)
{
switch (newState)
{
case DownloadState::Transferring:
_Resume();
_status._Transferring();
break;
case DownloadState::Paused: // no-op
break;
case DownloadState::Finalized:
THROW_HR(DO_E_INVALID_STATE);
break;
case DownloadState::Aborted:
_Abort();
_status._Aborted();
break;
}
}
else if (_status.State == DownloadState::Finalized)
{
THROW_HR_IF(DO_E_INVALID_STATE, newState != DownloadState::Finalized);
}
else if (_status.State == DownloadState::Aborted)
{
THROW_HR_IF(DO_E_INVALID_STATE, newState != DownloadState::Aborted);
}
else
{
DO_ASSERT(false);
}
}
#pragma GCC diagnostic pop
void Download::_Start()
{
THROW_HR_IF(DO_E_DOWNLOAD_NO_URI, _url.empty());
THROW_HR_IF(DO_E_FILE_DOWNLOADSINK_UNSPECIFIED, _destFilePath.empty());
// TODO(shishirb) expect file to not exist
_fileStream = std::make_unique<std::fstream>();
_fileStream->exceptions(std::fstream::badbit | std::fstream::failbit);
try
{
_fileStream->open(_destFilePath, (std::fstream::out | std::fstream::binary | std::fstream::trunc));
}
catch (const std::system_error& e)
{
THROW_HR_MSG(E_INVALIDARG, "Error: %d, %s, file: %s", e.code().value(), e.what(), _destFilePath.data());
}
_httpAgent = std::make_unique<HttpAgent>(*this);
_proxyList.Refresh(_url);
DO_ASSERT((_status.BytesTotal == 0) && (_status.BytesTransferred == 0));
_SendHttpRequest();
}
void Download::_Resume()
{
DO_ASSERT(_httpAgent);
DO_ASSERT(_fileStream);
// BytesTotal can be zero if the start request never completed due to an error/pause
DO_ASSERT((_status.BytesTotal != 0) || (_status.BytesTransferred == 0));
try
{
_fileStream->open(_destFilePath, (std::fstream::out | std::fstream::binary | std::fstream::app));
}
catch (const std::system_error& e)
{
THROW_HR_MSG(E_INVALIDARG, "Error: %d, %s, file: %s", e.code().value(), e.what(), _destFilePath.data());
}
if ((_status.BytesTotal != 0) && (_status.BytesTransferred == _status.BytesTotal))
{
// We could get here if download was Paused just as we wrote the last block in OnData and scheduled the status update.
// That would have caused OnComplete to not get called so now we discover that the download was completed.
// Schedule the state update asynchronously here to account for the state change that is done upon returning from here.
DoLogInfo("%s, already transferred %llu out of %llu bytes", GuidToString(_id).data(), _status.BytesTransferred, _status.BytesTotal);
_taskThread.SchedImmediate([this]()
{
_status._Transferred();
}, this);
}
else
{
_SendHttpRequest();
}
}
void Download::_Pause()
{
_httpAgent->Close(); // waits until all callbacks are complete
_timer.Stop();
_fHttpRequestActive = false;
_fileStream->close(); // safe to close now that no callbacks are expected
}
void Download::_Finalize()
{
_httpAgent->Close(); // waits until all callbacks are complete
_fHttpRequestActive = false;
_fileStream.reset(); // safe since no callbacks are expected
_CancelTasks();
}
void Download::_Abort() try
{
if (_httpAgent)
{
_httpAgent->Close();
}
_timer.Stop();
_fileStream.reset();
_CancelTasks();
if (!_destFilePath.empty())
{
boost::filesystem::remove(_destFilePath);
}
} CATCH_LOG()
void Download::_HandleTransientError(HRESULT hr)
{
DO_ASSERT(FAILED(hr));
// Transition into transient error state and schedule retry
const auto retryDelay = std::chrono::seconds(30);
DoLogInfo("%s, transient error: %x, will retry in %lld seconds", GuidToString(_id).data(), hr, retryDelay.count());
_status._Paused(S_OK, hr);
_taskThread.Sched([this]()
{
if (NetworkMonitor::IsConnected())
{
_ResumeAfterTransientError();
}
else
{
// Still no network connectivity, retry again after delay
_HandleTransientError(DO_E_BLOCKED_BY_NO_NETWORK);
}
}, retryDelay, this);
}
void Download::_ResumeAfterTransientError()
{
DoLogInfo("%s, state: %d, error: %x, ext_error: %x", GuidToString(_id).data(), _status.State, _status.Error, _status.ExtendedError);
if (_status.IsTransientError())
{
DO_ASSERT(!_fHttpRequestActive);
_SendHttpRequest();
_status._Transferring();
}
// else nothing to do since we are not in transient error state
}
void Download::_SendHttpRequest()
{
const auto& proxy = _proxyList.Next();
const PCSTR szProxyUrl = !proxy.empty() ? proxy.data() : nullptr;
std::string url = _url;
std::string mccHost = _mccManager.NextHost();
if (!mccHost.empty())
{
DoLogInfo("Found MCC host: %s", mccHost.data());
url = SwapUrlHostNameForMCC(_url, mccHost);
}
if ((_status.State != DownloadState::Created) && (mccHost != _mccHost))
{
DoLogInfo("%s, MCC host changed, reset progress tracker", GuidToString(_id).data());
_progressTracker.Reset();
}
if (_status.BytesTransferred == 0)
{
DoLogInfo("%s, requesting full file from %s", GuidToString(_id).data(), url.data());
THROW_IF_FAILED(_httpAgent->SendRequest(url.data(), szProxyUrl));
}
else
{
DO_ASSERT((_status.BytesTotal != 0) && (_status.BytesTransferred < _status.BytesTotal));
auto range = HttpAgent::MakeRange(_status.BytesTransferred, (_status.BytesTotal - _status.BytesTransferred));
DoLogInfo("%s, requesting range: %s from %s", GuidToString(_id).data(), range.data(), url.data());
THROW_IF_FAILED(_httpAgent->SendRequest(url.data(), szProxyUrl, nullptr, range.data()));
}
_timer.Start();
_fHttpRequestActive = true;
_mccHost = std::move(mccHost);
// Clear error codes, they will get updated once the request completes
_status.Error = S_OK;
_status.ExtendedError = S_OK;
_SchedProgressTracking();
}
void Download::_SchedProgressTracking()
{
if (_taskThread.IsScheduled(&_progressTracker))
{
return;
}
_taskThread.Sched([this]()
{
if (_status.State == DownloadState::Transferring)
{
const bool fTimedOut = _progressTracker.CheckProgress(_status.BytesTransferred, _MaxNoProgressIntervals());
if (fTimedOut)
{
_Pause();
_status._Paused(DO_E_DOWNLOAD_NO_PROGRESS, _status.Error);
}
else
{
_SchedProgressTracking();
}
}
}, g_progressTrackerCheckInterval, &_progressTracker);
}
void Download::_CancelTasks()
{
_taskThread.Unschedule(this);
_taskThread.Unschedule(&_progressTracker);
}
UINT Download::_MaxNoProgressIntervals() const
{
if (_noProgressTimeout == _unsetTimeout)
{
if (!_mccHost.empty() && _mccManager.NoFallback() && HttpAgent::IsClientError(_httpStatusCode))
{
// Special case for MCC without CDN fallback and 4xx errors
return g_progressTrackerMaxNoProgressIntervalsWithMCC;
}
else
{
return g_progressTrackerMaxNoProgressIntervals;
}
}
auto maxNoProgressIntervals = std::round(std::chrono::duration<double>(_noProgressTimeout) / g_progressTrackerCheckInterval);
DO_ASSERT(maxNoProgressIntervals >= 1);
return static_cast<UINT>(maxNoProgressIntervals);
}
// IHttpAgentEvents
HRESULT Download::OnHeadersAvailable(UINT64 httpContext, UINT64) try
{
// Capture relevant data and update internal members asynchronously
UINT httpStatusCode;
std::string responseHeaders;
LOG_IF_FAILED(_httpAgent->QueryStatusCode(httpContext, &httpStatusCode));
LOG_IF_FAILED(_httpAgent->QueryHeaders(httpContext, nullptr, responseHeaders));
// bytesTotal is required for resume after a pause/error
UINT64 bytesTotal;
if (httpStatusCode == HTTP_STATUS_OK)
{
RETURN_IF_FAILED(_httpAgent->QueryContentLength(httpContext, &bytesTotal));
}
else if (httpStatusCode == HTTP_STATUS_PARTIAL_CONTENT)
{
RETURN_IF_FAILED(_httpAgent->QueryContentLengthFromRange(httpContext, &bytesTotal));
}
DoLogInfo("%s, http_status: %d, content_length: %llu, headers:\n%s",
GuidToString(_id).data(), httpStatusCode, bytesTotal, responseHeaders.data());
_taskThread.Sched([this, httpStatusCode, bytesTotal, responseHeaders = std::move(responseHeaders)]()
{
_httpStatusCode = httpStatusCode;
_responseHeaders = std::move(responseHeaders);
_status.BytesTotal = bytesTotal;
}, this);
return S_OK;
} CATCH_RETURN()
HRESULT Download::OnData(_In_reads_bytes_(cbData) BYTE* pData, UINT cbData, UINT64, UINT64) try
{
const auto before = _fileStream->tellp();
_fileStream->write(reinterpret_cast<const char*>(pData), cbData);
const auto after = _fileStream->tellp();
_fileStream->flush();
DO_ASSERT(before < after);
RETURN_HR_IF(HRESULT_FROM_WIN32(ERROR_BAD_LENGTH), (after - before) != cbData);
_taskThread.Sched([this, cbData]()
{
_status.BytesTransferred += cbData;
}, this);
return S_OK;
} CATCH_RETURN()
HRESULT Download::OnComplete(HRESULT hResult, UINT64 httpContext, UINT64)
{
try
{
if (SUCCEEDED(hResult))
{
_taskThread.Sched([this]()
{
_fHttpRequestActive = false;
_timer.Stop();
_status._Transferred();
}, this);
}
else
{
// OnHeadersAvailable might not have been called in the failure case depending on
// when the failure occurred - upon connecting, or while reading response data.
UINT httpStatusCode;
std::string responseHeaders;
LOG_IF_FAILED(_httpAgent->QueryStatusCode(httpContext, &httpStatusCode));
LOG_IF_FAILED(_httpAgent->QueryHeaders(httpContext, nullptr, responseHeaders));
_taskThread.Sched([this, hResult, httpStatusCode, responseHeaders = std::move(responseHeaders)]()
{
_fHttpRequestActive = false;
_httpStatusCode = httpStatusCode;
_responseHeaders = std::move(responseHeaders);
if (!NetworkMonitor::IsConnected())
{
_HandleTransientError(DO_E_BLOCKED_BY_NO_NETWORK);
return;
}
// Fail fast on certain http errors if we are not using MCC.
// Logic differs slightly when MCC is used. See _MaxNoProgressIntervals().
if (_mccHost.empty() && HttpAgent::IsClientError(_httpStatusCode))
{
DoLogInfoHr(hResult, "%s, fatal failure, http_status: %d, headers:\n%s",
GuidToString(_id).data(), _httpStatusCode, _responseHeaders.data());
_Pause();
_status._Paused(hResult);
return;
}
// Make note of the failure and stay in Transferring state for retry
_status.Error = hResult;
_progressTracker.OnDownloadFailure();
std::chrono::seconds retryDelay = _progressTracker.NextRetryDelay();
// Report error to MCC manager if MCC was used
if (!_mccHost.empty())
{
// We were using MCC. If it is time to fallback to original URL, it should happen without delay.
const bool isFallbackDue = _mccManager.ReportHostError(hResult, _mccHost);
if (isFallbackDue)
{
retryDelay = std::chrono::seconds(0);
}
}
DoLogInfoHr(hResult, "%s, failure, will retry in %lld seconds, http_status: %d, headers:\n%s",
GuidToString(_id).data(), retryDelay.count(), _httpStatusCode, _responseHeaders.data());
_taskThread.Sched([this]()
{
// Nothing to do if we moved out of Transferring state in the meantime or
// if the http request was already made by a pause-resume cycle.
if ((_status.State == DownloadState::Transferring) && !_fHttpRequestActive)
{
_SendHttpRequest();
}
}, retryDelay, this);
}, this);
}
} CATCH_LOG()
return S_OK;
}
std::string SwapUrlHostNameForMCC(const std::string& url, const std::string& newHostname, UINT16 port)
{
// Switch the hostname and add the original hostname as a query param
web::uri inputUri(url);
web::uri_builder outputUri(inputUri);
outputUri.set_host(newHostname);
if (port != INTERNET_DEFAULT_PORT)
{
outputUri.set_port(port);
}
outputUri.append_query("cacheHostOrigin", inputUri.host());
return outputUri.to_string();
}

Просмотреть файл

@ -0,0 +1,125 @@
#pragma once
#include <chrono>
#include <memory>
#include "do_guid.h"
#include "download_progress_tracker.h"
#include "download_status.h"
#include "http_agent_interface.h"
#include "proxy_finder.h"
#include "stop_watch.h"
class MCCManager;
class TaskThread;
// Keep this enum in sync with the full blown DO client in order to not
// have separate mappings in the SDK.
// Only Id, Uri, LocalPath and NoProgressTimeoutSeconds are supported in this client.
enum class DownloadProperty
{
Id = 0,
Uri,
ContentId,
DisplayName,
LocalPath,
HttpCustomHeaders,
CostPolicy,
SecurityFlags,
CallbackFreqPercent,
CallbackFreqSeconds,
NoProgressTimeoutSeconds,
ForegroundPriority,
BlockingMode,
CallbackInterface,
StreamInterface,
SecurityContext,
NetworkToken,
CorrelationVector,
DecryptionInfo,
IntegrityCheckInfo,
IntegrityCheckMandatory,
TotalSizeBytes,
Invalid // keep this at the end
};
class Download : public IHttpAgentEvents
{
public:
// Download(TaskThread& taskThread, REFGUID id); TODO implement along with persistence
Download(MCCManager& mccManager, TaskThread& taskThread, std::string url = {}, std::string destFilePath = {});
~Download();
void Start();
void Pause();
void Finalize();
void Abort();
void SetProperty(DownloadProperty key, const std::string& value);
std::string GetProperty(DownloadProperty key) const;
DownloadStatus GetStatus() const;
const GUID& GetId() const { return _id; }
const std::string& GetUrl() const { return _url; }
const std::string& GetDestinationPath() const { return _destFilePath; }
const std::string& GetMCCHost() const { return _mccHost; }
std::chrono::milliseconds GetElapsedTime() const { return _timer.GetElapsedInterval(); }
UINT HttpStatusCode() const { return _httpStatusCode; }
const std::string& ResponseHeaders() const { return _responseHeaders; }
const DownloadStatus& Status() const { return _status; }
private:
static const std::chrono::seconds _unsetTimeout;
MCCManager& _mccManager;
TaskThread& _taskThread;
// _fileStream and _httpAgent members are accessed on both the taskthread
// and http_agent callback thread. See _Pause and _Finalize for special handling.
// Everything else is accessed only on the taskthread.
GUID _id;
std::string _url;
std::string _destFilePath;
std::chrono::seconds _noProgressTimeout { _unsetTimeout };
DownloadStatus _status;
DownloadProgressTracker _progressTracker;
StopWatch _timer;
std::unique_ptr<std::fstream> _fileStream;
std::unique_ptr<IHttpAgent> _httpAgent;
std::string _responseHeaders;
UINT _httpStatusCode { 0 };
ProxyList _proxyList;
// The MCC host name we are using for the current http request, if any
std::string _mccHost;
// This flag will indicate whether we have an outstanding http request or not.
// Need this because we will not move out of Transferring state while waiting before a retry.
bool _fHttpRequestActive { false };
private:
void _PerformStateChange(DownloadState newState);
void _Start();
void _Resume();
void _Pause();
void _Finalize();
void _Abort();
void _HandleTransientError(HRESULT hr);
void _ResumeAfterTransientError();
void _SendHttpRequest();
void _SchedProgressTracking();
void _CancelTasks();
UINT _MaxNoProgressIntervals() const;
// IHttpAgentEvents
HRESULT OnHeadersAvailable(UINT64 httpContext, UINT64) override;
HRESULT OnData(_In_reads_bytes_(cbData) BYTE* pData, UINT cbData, UINT64, UINT64) override;
HRESULT OnComplete(HRESULT hResult, UINT64 httpContext, UINT64) override;
};

Просмотреть файл

@ -0,0 +1,117 @@
#include "do_common.h"
#include "download_manager.h"
#include "do_error.h"
#include "download.h"
DownloadManager::DownloadManager(ConfigManager& config) :
_config(config),
_mccManager(config)
{
}
std::string DownloadManager::CreateDownload(std::string url, std::string destFilePath)
{
auto newDownload = std::make_shared<Download>(_mccManager, _taskThread, url, destFilePath);
const std::string downloadId = newDownload->GetProperty(DownloadProperty::Id);
std::unique_lock<std::shared_timed_mutex> lock(_downloadsMtx);
DO_ASSERT(_downloads.find(downloadId) == _downloads.end());
THROW_HR_IF(DO_E_NO_SERVICE, !_fRunning);
_downloads.emplace(downloadId, newDownload);
return downloadId;
}
void DownloadManager::StartDownload(const std::string& downloadId) const
{
auto download = _GetDownload(downloadId);
_taskThread.SchedBlock([&download]()
{
download->Start();
});
}
void DownloadManager::PauseDownload(const std::string& downloadId) const
{
auto download = _GetDownload(downloadId);
_taskThread.SchedBlock([&download]()
{
download->Pause();
});
}
void DownloadManager::FinalizeDownload(const std::string& downloadId)
{
auto download = _GetDownload(downloadId);
_taskThread.SchedBlock([&download]()
{
download->Finalize();
});
std::unique_lock<std::shared_timed_mutex> lock(_downloadsMtx);
_downloads.erase(downloadId);
// TODO(shishirb) remove from _downloads list regardless of whether Finalize succeeded/failed?
}
void DownloadManager::AbortDownload(const std::string& downloadId)
{
auto download = _GetDownload(downloadId);
_taskThread.SchedBlock([&download]()
{
download->Abort();
});
std::unique_lock<std::shared_timed_mutex> lock(_downloadsMtx);
_downloads.erase(downloadId);
}
void DownloadManager::SetDownloadProperty(const std::string& downloadId, DownloadProperty key, const std::string& value)
{
auto download = _GetDownload(downloadId);
_taskThread.SchedBlock([&download, key, &value]()
{
download->SetProperty(key, value);
});
}
std::string DownloadManager::GetDownloadProperty(const std::string& downloadId, DownloadProperty key) const
{
auto download = _GetDownload(downloadId);
std::string value;
_taskThread.SchedBlock([&download, key, &value]()
{
value = download->GetProperty(key);
});
return value;
}
DownloadStatus DownloadManager::GetDownloadStatus(const std::string& downloadId) const
{
auto download = _GetDownload(downloadId);
// Scheduled to the end of the queue in order to get all the updates
// that might be pending on task thread.
DownloadStatus status;
_taskThread.SchedBlock([&download, &status]()
{
status = download->GetStatus();
}, false);
return status;
}
bool DownloadManager::IsIdle() const
{
// Reset _fRunning if we are idle to disallow new downloads.
// This handles the race between shutdown and new download request coming in.
std::unique_lock<std::shared_timed_mutex> lock(_downloadsMtx);
_fRunning = !_downloads.empty();
return !_fRunning;
}
std::shared_ptr<Download> DownloadManager::_GetDownload(const std::string& downloadId) const
{
std::shared_lock<std::shared_timed_mutex> lock(_downloadsMtx);
auto it = _downloads.find(downloadId);
THROW_HR_IF(E_NOT_SET, it == _downloads.end());
return it->second;
}

Просмотреть файл

@ -0,0 +1,42 @@
#pragma once
#include <shared_mutex>
#include <unordered_map>
#include "mcc_manager.h"
#include "task_thread.h"
enum class DownloadProperty;
class Download;
struct DownloadStatus;
class DownloadManager
{
friend std::shared_ptr<Download> DownloadForId(const DownloadManager& manager, const std::string& id);
public:
DownloadManager(ConfigManager& config);
std::string CreateDownload(std::string url = {}, std::string destFilePath = {});
void StartDownload(const std::string& downloadId) const;
void PauseDownload(const std::string& downloadId) const;
void FinalizeDownload(const std::string& downloadId);
void AbortDownload(const std::string& downloadId);
void SetDownloadProperty(const std::string& downloadId, DownloadProperty key, const std::string& value);
std::string GetDownloadProperty(const std::string& downloadId, DownloadProperty key) const;
DownloadStatus GetDownloadStatus(const std::string& downloadId) const;
bool IsIdle() const;
private:
mutable TaskThread _taskThread;
std::unordered_map<std::string, std::shared_ptr<Download>> _downloads;
mutable bool _fRunning { true };
mutable std::shared_timed_mutex _downloadsMtx;
ConfigManager& _config;
MCCManager _mccManager;
private:
std::shared_ptr<Download> _GetDownload(const std::string& downloadId) const;
};

Просмотреть файл

@ -0,0 +1,48 @@
#pragma once
#include <chrono>
#include "config_defaults.h"
class DownloadProgressTracker
{
public:
bool CheckProgress(UINT64 newBytesTransferred, UINT maxNoProgressIntervals = g_progressTrackerMaxNoProgressIntervals)
{
DO_ASSERT(newBytesTransferred >= _lastSeenBytesTransferred);
if (newBytesTransferred > _lastSeenBytesTransferred)
{
_numNoProgressIntervals = 0;
_lastSeenBytesTransferred = newBytesTransferred;
}
else
{
_numNoProgressIntervals++;
}
DoLogInfo("Bytes transferred so far: %llu, no-progress intervals: [cur %u, max %u]",
_lastSeenBytesTransferred, _numNoProgressIntervals, maxNoProgressIntervals);
return (_numNoProgressIntervals >= maxNoProgressIntervals);
}
void OnDownloadFailure()
{
_nextRetryDelay = std::min(g_progressTrackerMaxRetryDelay, _nextRetryDelay * 2);
}
// Forget the count of no-progress until now, retry delay will start from minimum next time
void Reset()
{
_nextRetryDelay = std::chrono::seconds(1);
_numNoProgressIntervals = 0;
}
auto NextRetryDelay() const
{
return _nextRetryDelay;
}
private:
std::chrono::seconds _nextRetryDelay { 1 };
UINT _numNoProgressIntervals { 0 };
UINT64 _lastSeenBytesTransferred { 0 };
};

Просмотреть файл

@ -0,0 +1,53 @@
#pragma once
enum class DownloadState
{
Created,
Transferring,
Transferred,
Finalized,
Aborted,
Paused,
};
struct DownloadStatus
{
friend class Download;
UINT64 BytesTotal { 0 };
UINT64 BytesTransferred { 0 };
DownloadState State { DownloadState::Created };
HRESULT Error { S_OK };
HRESULT ExtendedError { S_OK };
bool IsTransientError() const noexcept
{
return (State == DownloadState::Paused) && (Error == S_OK) && FAILED(ExtendedError);
}
private:
void _Transferring()
{
State = DownloadState::Transferring;
Error = S_OK;
ExtendedError = S_OK;
}
void _Paused(HRESULT hrError = S_OK, HRESULT hrExtendedError = S_OK)
{
State = DownloadState::Paused;
Error = hrError;
ExtendedError = hrExtendedError;
}
void _Transferred()
{
State = DownloadState::Transferred;
}
void _Finalized()
{
State = DownloadState::Finalized;
}
void _Aborted()
{
State = DownloadState::Aborted;
}
};

Просмотреть файл

@ -0,0 +1,128 @@
// ----------------------------------------------------------------------------
// The agent is built to run as a daemon (aka service) that starts running as root
// and then drops permissions to 'do' user+group after runtime setup steps are completed.
// ----------------------------------------------------------------------------
#include "do_common.h"
#include <signal.h>
#include <chrono>
#include "do_event.h"
#include "do_persistence.h"
#include "do_version.h"
namespace msdoutil = microsoft::deliveryoptimization::util::details;
#include "config_manager.h"
#include "download_manager.h"
#include "proc_launch_helper.h"
#include "rest_http_controller.h"
#include "rest_port_advertiser.h"
#include "trace_sink.h"
using namespace std::chrono_literals; // NOLINT(build/namespaces) how else should we use chrono literals?
class ProcessController
{
public:
ProcessController()
{
// Do init work like registering signal control handler
signal(SIGINT, _SignalHandler);
signal(SIGTERM, _SignalHandler);
}
void WaitForShutdown(const std::function<bool()>& fnIsIdle)
{
constexpr auto idleTimeout = 60s;
while (true)
{
if (_shutdownEvent.Wait(idleTimeout))
{
break;
}
// Use this opportunity to flush logs periodically
TraceConsumer::getInstance().Flush();
if (fnIsIdle())
{
DoLogInfo("Received idle notification. Initiating shutdown.");
break;
}
}
}
private:
static void _SignalHandler(int signalNumber)
{
if ((signalNumber == SIGINT) || (signalNumber == SIGTERM))
{
DoLogInfo("Received signal %d. Initiating shutdown.", signalNumber);
_shutdownEvent.SetEvent();
}
}
static ManualResetEvent _shutdownEvent;
};
ManualResetEvent ProcessController::_shutdownEvent;
HRESULT Run() try
{
InitializeDOPaths();
ConfigManager clientConfigs;
auto downloadManager = std::make_shared<DownloadManager>(clientConfigs);
RestHttpController controller(clientConfigs, downloadManager);
controller.Start();
DoLogInfo("HTTP controller listening at: %s", controller.ServerEndpoint().data());
RestPortAdvertiser portAdvertiser(controller.Port());
DoLogInfo("Port number written to %s", portAdvertiser.OutFilePath().data());
DropPermissions();
RETURN_IF_FAILED(TraceConsumer::getInstance().Initialize());
DoTraceLoggingRegister();
DoLogInfo("Started, %s", msdoutil::ComponentVersion().c_str());
ProcessController procController;
procController.WaitForShutdown([&downloadManager]()
{
// For now, idle-shutdown mechanism is not applicable when running as a service.
// The service will be started on boot and will be restarted automatically on failure.
// SDK can assume docs is running and thus simplifies code for private preview.
return false;
});
DoLogInfo("Exiting...");
return S_OK;
} CATCH_RETURN()
int main(int argc, char** argv) try
{
if (msdoutil::OutputVersionIfNeeded(argc, argv))
{
return 0;
}
const HRESULT hr = LOG_IF_FAILED(Run());
DoTraceLoggingUnregister();
TraceConsumer::getInstance().Finalize();
printf("Reached end of main, hr: %x\n", hr);
return hr;
}
catch (...)
{
const HRESULT hrEx = LOG_CAUGHT_EXCEPTION();
printf("Caught exception in main, hr: %x\n", hrEx);
DoTraceLoggingUnregister();
TraceConsumer::getInstance().Finalize();
return hrEx;
}

Просмотреть файл

@ -0,0 +1,51 @@
#pragma once
#if defined(__x86_64__) || defined(_M_X64) || defined(_M_ARM64) || defined(__aarch64__)
#define DO_ENV_64BIT
#endif
using INT8 = signed char;
using UINT8 = unsigned char;
using INT16 = signed short;
using UINT16 = unsigned short;
using INT32 = signed int;
using UINT32 = unsigned int;
using INT64 = long long signed int;
using UINT64 = long long unsigned int;
using BYTE = UINT8;
using INT = INT32;
using UINT = UINT32;
// We define HRESULT to be a signed 32bit integer to match Windows.
// Note: Can't use long because hexadecimal literals are forced to be unsigned on GCC, per the standard.
// That is, (long)0x80070490L != -2147023728 and ((long)0x80070490L < 0) evaluates to false.
using HRESULT = INT32;
#ifdef DO_ENV_64BIT
typedef INT64 INT_PTR;
typedef UINT64 UINT_PTR;
#else
typedef int INT_PTR;
typedef unsigned int UINT_PTR;
#endif
typedef int BOOL;
typedef char CHAR, *PSTR;
typedef const char *PCSTR;
#ifndef FALSE
#define FALSE 0
#endif
#ifndef TRUE
#define TRUE 1
#endif
#ifndef MAXUINT32
#define MAXUINT32 ((UINT32)~((UINT32)0))
#define MAXUINT64 ((UINT64)~((UINT64)0))
#define MAXUINT ((UINT)~((UINT)0))
#endif

Просмотреть файл

@ -0,0 +1,19 @@
#pragma once
#include "trace_src.h"
#ifdef DEBUG
#define DO_ASSERTMSG(_msg, _exp) \
((!(_exp)) ? \
::LogMessage(EVENT_LEVEL_ERROR, __FUNCTION__, __LINE__, "Assert (%s): %s", #_exp, _msg), \
assert(_exp), TRUE \
: TRUE)
#else // !DEBUG
#define DO_ASSERTMSG(_msg, _exp)
#endif // DEBUG
#define DO_ASSERT(_exp) DO_ASSERTMSG("Failed", _exp)

Просмотреть файл

@ -0,0 +1,47 @@
#pragma once
// Note: Secure C string functions like wcscat_s, swscanf_s require __STDC_WANT_LIB_EXT1__
// to be defined prior to including stdio.h. However GCC hasn't implemented these functions
// yet so we conditionally compile with __STDC_LIB_EXT1__ currently.
// On the windows side, these are always available and we use __STDC_SECURE_LIB__ to test
// for presence of these functions.
#ifndef __STDC_WANT_LIB_EXT1__
#define __STDC_WANT_LIB_EXT1__ 1
#endif
#include <cstddef> // size_t
#include <string>
#include <boost/log/core.hpp>
#include <boost/log/trivial.hpp>
#if !defined(DEBUG) && !defined(NDEBUG)
#define DEBUG
#endif
#ifndef ARRAYSIZE
#define ARRAYSIZE(A) (sizeof(A)/sizeof((A)[0]))
#endif
#ifndef INTERNET_DEFAULT_PORT
#define INTERNET_DEFAULT_PORT 0 // use the protocol-specific default
#endif
#include "sal_undef.h"
// Assign the given value to an optional output parameter.
// Makes code more concise by removing trivial if (outParam) blocks.
template <typename T>
inline void assign_to_opt_param(_Out_opt_ T* outParam, T val)
{
if (outParam != nullptr)
{
*outParam = val;
}
}
#include "basic_types.h"
#include "error_macros.h" // required by headers below
#include "do_assert.h"
#include "hresult_helpers.h"
#include "trace_src.h"

Просмотреть файл

@ -0,0 +1,67 @@
#ifndef __DELIVERYOPTIMIZATION_ERROR_H__
#define __DELIVERYOPTIMIZATION_ERROR_H__
#if defined (_MSC_VER) && (_MSC_VER >= 1020)
#pragma once
#endif
// Definitions of DeliveryOptimization error codes
//
// Error codes are 32 bit values laid out as follows:
//
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
// +---+-+-+-----------------------+-------------------------------+
// |Sev|C|R| Facility | Code |
// +---+-+-+-----------------------+-------------------------------+
// Sev - is the severity code
// 00 - Success
// 01 - Informational
// 10 - Warning
// 11 - Error
// C - is the Customer code flag
// R - is a reserved bit
// Facility - is the facility code
// Code - is the facility's status code
//
// Severity used is 2 (warning). This was chosen to make the codes similar (start with 0x80) to other components like WU.
// Facility code used is 208 (0xD0). See winerror.h for FACILITY_DELIVERY_OPTIMIZATION.
// Note that the uniqueness of facility codes isn't really enforced in Windows.
//
// Historically, error codes were separated into zones with a macro identifying each zone.
// Currently, we only care about the transient error zone so only this macro is defined.
// The error codes are still separated with sufficient buffer left to add errors in each zone.
#define DO_ZONE_MASK 0xF800
#define DO_TRANSIENT_ZONE 0x3800
#define DO_E_NO_SERVICE HRESULT(0x80D01001L) // Delivery Optimization was unable to provide the service
// Download job codes
#define DO_E_DOWNLOAD_NO_PROGRESS HRESULT(0x80D02002L) // Download of a file saw no progress within the defined period
#define DO_E_JOB_NOT_FOUND HRESULT(0x80D02003L) // Job was not found
#define DO_E_NO_DOWNLOADS HRESULT(0x80D02005L) // No downloads currently exist
#define DO_E_JOB_TOO_OLD HRESULT(0x80D0200CL) // Job has neither completed nor has it been cancelled prior to reaching the maximum age threshold
#define DO_E_UNKNOWN_PROPERTY_ID HRESULT(0x80D02011L) // SetProperty() or GetProperty() called with an unknown property ID
#define DO_E_READ_ONLY_PROPERTY HRESULT(0x80D02012L) // Unable to call SetProperty() on a read-only property
#define DO_E_INVALID_STATE HRESULT(0x80D02013L) // The requested action is not allowed in the current job state. The job might have been canceled or completed transferring. It is in a read-only state now.
#define DO_E_FILE_DOWNLOADSINK_UNSPECIFIED HRESULT(0x80D02018L) // Unable to start a download because no download sink (either local file or stream interface) was specified
// IDODownload interface
#define DO_E_DOWNLOAD_NO_URI HRESULT(0x80D02200L) // The download was started without providing a URI
// Transient conditions
#define DO_E_BLOCKED_BY_NO_NETWORK HRESULT(0x80D03805L) // Download paused due to loss of network connectivity
// HTTP
#define DO_E_HTTP_BLOCKSIZE_MISMATCH HRESULT(0x80D05001L) // HTTP server returned a response with data size not equal to what was requested
#define DO_E_HTTP_CERT_VALIDATION HRESULT(0x80D05002L) // The Http server certificate validation has failed
#define DO_E_INVALID_RANGE HRESULT(0x80D05010L) // The specified byte range is invalid
#define DO_E_INSUFFICIENT_RANGE_SUPPORT HRESULT(0x80D05011L) // The server does not support the necessary HTTP protocol. Delivery Optimization (DO) requires that the server support the Range protocol header.
#define DO_E_OVERLAPPING_RANGES HRESULT(0x80D05012L) // The list of byte ranges contains some overlapping ranges, which are not supported
#endif // __DELIVERYOPTIMIZATION_ERROR_H__

Просмотреть файл

@ -0,0 +1,109 @@
#pragma once
#include <boost/system/error_code.hpp>
#include "basic_types.h"
#ifndef _WIN32
#define FACILITY_WIN32 7
#define FACILITY_DELIVERY_OPTIMIZATION 208
#define HRESULT_FACILITY(hr) (((hr) >> 16) & 0x1FFF)
#define HRESULT_CODE(hr) ((hr) & 0xFFFF)
#define ERROR_FILE_NOT_FOUND 2L
#define ERROR_NOT_ENOUGH_MEMORY 8L
#define ERROR_INVALID_DATA 13L
#define ERROR_BAD_LENGTH 24L
#define ERROR_SHARING_VIOLATION 32L
#define ERROR_FILE_EXISTS 80L
#define ERROR_DISK_FULL 112L
#define ERROR_ALREADY_EXISTS 183L
#define ERROR_FILE_TOO_LARGE 223L
#define ERROR_NO_MORE_ITEMS 259L
#define ERROR_UNHANDLED_EXCEPTION 574L
#define ERROR_OPERATION_ABORTED 955L
#define ERROR_IO_PENDING 997L
#define ERROR_NOT_FOUND 1168L
#define ERROR_DISK_QUOTA_EXCEEDED 1295L
#define ERROR_NO_SYSTEM_RESOURCES 1450L
#define ERROR_TIMEOUT 1460L
#define ERROR_INVALID_DATATYPE 1804L
#define ERROR_INVALID_STATE 5023L
#define HTTP_STATUS_OK 200 // request completed
#define HTTP_STATUS_FORBIDDEN 403 // request forbidden
#define HTTP_STATUS_PARTIAL_CONTENT 206 // partial GET fulfilled
inline constexpr HRESULT HRESULT_FROM_WIN32(unsigned long x)
{
return (HRESULT)(x) <= 0 ? (HRESULT)(x) : (HRESULT) (((x) & 0x0000FFFF) | (FACILITY_WIN32 << 16) | 0x80000000);
}
#define S_OK ((HRESULT)0L)
#define S_FALSE ((HRESULT)1L)
#define E_NOTIMPL ((HRESULT)0x80004001L)
#define E_ABORT ((HRESULT)0x80004004L)
#define E_FAIL ((HRESULT)0x80004005L)
#define E_ACCESSDENIED ((HRESULT)0x80070005L)
#define E_UNEXPECTED ((HRESULT)0x8000FFFFL)
#define E_OUTOFMEMORY ((HRESULT)0x8007000EL)
#define E_INVALIDARG ((HRESULT)0x80070057L)
#define E_NOT_SET ((HRESULT)0x80070490L)
#define E_NOT_VALID_STATE HRESULT_FROM_WIN32(ERROR_INVALID_STATE)
#define WININET_E_TIMEOUT ((HRESULT)0x80072EE2L)
#define WININET_E_HEADER_NOT_FOUND ((HRESULT)0x80072F76L)
#define INET_E_INVALID_URL ((HRESULT)0x800C0002L)
#define HTTP_E_STATUS_UNEXPECTED ((HRESULT)0x80190001L)
#define HTTP_E_STATUS_UNEXPECTED_REDIRECTION ((HRESULT)0x80190003L)
#define HTTP_E_STATUS_BAD_REQUEST ((HRESULT)0x80190190L)
#define HTTP_E_STATUS_DENIED ((HRESULT)0x80190191L)
#define HTTP_E_STATUS_FORBIDDEN ((HRESULT)0x80190193L)
#define HTTP_E_STATUS_NOT_FOUND ((HRESULT)0x80190194L)
#define HTTP_E_STATUS_NONE_ACCEPTABLE ((HRESULT)0x80190196L)
#define HTTP_E_STATUS_PROXY_AUTH_REQ ((HRESULT)0x80190197L)
#define HTTP_E_STATUS_REQUEST_TIMEOUT ((HRESULT)0x80190198L)
#define HTTP_E_STATUS_REQUEST_TOO_LARGE ((HRESULT)0x8019019DL)
#define HTTP_E_STATUS_SERVER_ERROR ((HRESULT)0x801901F4L)
#define HTTP_E_STATUS_NOT_SUPPORTED ((HRESULT)0x801901F5L)
#define HTTP_E_STATUS_BAD_GATEWAY ((HRESULT)0x801901F6L)
#define WEB_E_JSON_VALUE_NOT_FOUND ((HRESULT)0x83750009L)
#define SUCCEEDED(hr) (((HRESULT)(hr)) >= 0)
#define FAILED(hr) (((HRESULT)(hr)) < 0)
static_assert(SUCCEEDED(S_OK), "SUCCEEDED macro does not recognize S_OK");
static_assert(SUCCEEDED(S_FALSE), "SUCCEEDED macro does not recognize S_FALSE");
static_assert(FAILED(E_NOT_SET), "FAILED macro does not recognize failure code");
#endif // !_WIN32
#define E_UNSUPPORTED ((HRESULT)0x80070032L) // 0x32 = 50L = ERROR_NOT_SUPPORTED
#ifndef STRSAFE_E_INSUFFICIENT_BUFFER
#define STRSAFE_E_INSUFFICIENT_BUFFER ((HRESULT)0x8007007AL) // 0x7A = 122L = ERROR_INSUFFICIENT_BUFFER
#endif
#ifndef WINHTTP_ERROR_BASE
#define WINHTTP_ERROR_BASE 12000
#define ERROR_WINHTTP_TIMEOUT (WINHTTP_ERROR_BASE + 2)
#define ERROR_WINHTTP_UNRECOGNIZED_SCHEME (WINHTTP_ERROR_BASE + 6)
#define ERROR_WINHTTP_NAME_NOT_RESOLVED (WINHTTP_ERROR_BASE + 7)
#define ERROR_WINHTTP_CANNOT_CONNECT (WINHTTP_ERROR_BASE + 29)
#endif
// Convert std c++ and boost errors to NTSTATUS-like values but with 0xD0 facility (0xC0D00005 for example).
#define HRESULT_FROM_XPLAT_SYSERR(err) (0xC0000000 | (FACILITY_DELIVERY_OPTIMIZATION << 16) | ((HRESULT)(err) & 0x0000FFFF))
inline HRESULT HRESULT_FROM_STDCPP(const std::error_code& ec)
{
return ec ? HRESULT_FROM_XPLAT_SYSERR(ec.value()) : S_OK;
}
inline HRESULT HRESULT_FROM_BOOST(const boost::system::error_code& ec)
{
return ec ? HRESULT_FROM_XPLAT_SYSERR(ec.value()) : S_OK;
}

Просмотреть файл

@ -0,0 +1,137 @@
#pragma once
// SAL is not supported with compilers that aren't msvc, this file defs them as nothing so we can compile with gcc.
#ifndef _WIN32
#undef _In_
#define _In_
#undef _Out_
#define _Out_
#undef _Inout_
#define _Inout_
#undef _Inout_opt_
#define _Inout_opt_
#undef _Inout_updates_
#define _Inout_updates_(c)
#undef _In_z_
#define _In_z_
#undef _Inout_z_
#define _Inout_z_
#undef _In_reads_bytes_
#define _In_reads_bytes_(s)
#undef _In_reads_bytes_opt_
#define _In_reads_bytes_opt_(s)
#undef _Out_writes_
#define _Out_writes_(s)
#undef _Out_opt_
#define _Out_opt_
#undef _Outptr_
#define _Outptr_
#undef _Outptr_result_nullonfailure_
#define _Outptr_result_nullonfailure_
#undef _Out_writes_bytes_to_
#define _Out_writes_bytes_to_(s, c)
#undef _Outptr_result_maybenull_
#define _Outptr_result_maybenull_
#undef _Outptr_result_maybenull_z_
#define _Outptr_result_maybenull_z_
#undef _Out_z_cap_
#define _Out_z_cap_(s)
#undef _Outptr_result_buffer_
#define _Outptr_result_buffer_(s)
#undef _Out_writes_bytes_
#define _Out_writes_bytes_(s)
#undef _Out_writes_opt_
#define _Out_writes_opt_(s)
#undef _Out_writes_bytes_to_opt_
#define _Out_writes_bytes_to_opt_(s, c)
#undef _Out_writes_z_
#define _Out_writes_z_(c)
#undef _Out_writes_opt_z_
#define _Out_writes_opt_z_(c)
#undef _In_reads_
#define _In_reads_(s)
#undef _In_opt_
#define _In_opt_
#undef _In_opt_z_
#define _In_opt_z_
#undef _In_reads_opt_
#define _In_reads_opt_(s)
#undef _In_range_
#define _In_range_(low, hi)
#undef _In_count_
#define _In_count_(c)
#undef _COM_Outptr_
#define _COM_Outptr_
#undef _Printf_format_string_
#define _Printf_format_string_
#undef _Must_inspect_result_
#define _Must_inspect_result_
#undef _Deref_out_range_
#define _Deref_out_range_(low, hi)
#undef __fallthrough
#define __fallthrough
#undef _Requires_shared_lock_held_
#define _Requires_shared_lock_held_(l)
#undef _Requires_exclusive_lock_held_
#define _Requires_exclusive_lock_held_(l)
#undef _Requires_no_locks_held_
#define _Requires_no_locks_held_
#undef _Field_range_
#define _Field_range_(low, hi)
#undef _Always_
#define _Always_(s)
#undef _Post_z_
#define _Post_z_
#undef _Analysis_assume
#define _Analysis_assume_
#undef _Null_terminated_
#define _Null_terminated_
#endif // !_WIN32
#undef _Memberinitializer_
#define _Memberinitializer_

Просмотреть файл

@ -0,0 +1,34 @@
#include "do_common.h"
#include "rest_api_params.h"
#include "string_ops.h"
#define INSERT_REST_API_PARAM(_p) RestApiParameters::_p, #_p
// Maintain the same order as in RestApiParameters
const RestApiParam RestApiParam::_knownParams[] =
{
{ INSERT_REST_API_PARAM(Id), DownloadProperty::Id, RestApiParamTypes::String },
{ INSERT_REST_API_PARAM(Uri), DownloadProperty::Uri, RestApiParamTypes::String },
{ INSERT_REST_API_PARAM(DownloadFilePath), DownloadProperty::LocalPath, RestApiParamTypes::String },
{ INSERT_REST_API_PARAM(NoProgressTimeoutSeconds), DownloadProperty::NoProgressTimeoutSeconds, RestApiParamTypes::UInt },
{ INSERT_REST_API_PARAM(PropertyKey), DownloadProperty::Invalid, RestApiParamTypes::String },
};
const RestApiParam& RestApiParam::Lookup(RestApiParameters paramId) noexcept
{
return _knownParams[static_cast<size_t>(paramId)];
}
const RestApiParam* RestApiParam::Lookup(const char* stringId) noexcept
{
for (const auto& param : _knownParams)
{
if (StringCompareCaseInsensitive(param.stringId, stringId) == 0)
{
return &param;
}
}
DoLogWarning("%s is not a known REST API param", stringId);
return nullptr;
}

Просмотреть файл

@ -0,0 +1,34 @@
#pragma once
#include "download.h"
enum class RestApiParameters
{
Id,
Uri,
DownloadFilePath,
NoProgressTimeoutSeconds,
PropertyKey,
};
enum class RestApiParamTypes
{
UInt,
String,
};
struct RestApiParam
{
RestApiParameters paramId;
const char* stringId;
DownloadProperty downloadPropertyId;
RestApiParamTypes type;
static const RestApiParam& Lookup(RestApiParameters paramId) noexcept;
static const RestApiParam* Lookup(const char* stringId) noexcept;
bool IsUnknownDownloadPropertyId() const { return (downloadPropertyId == DownloadProperty::Invalid); }
private:
static const RestApiParam _knownParams[];
};

Просмотреть файл

@ -0,0 +1,170 @@
#include "do_common.h"
#include "rest_api_parser.h"
#include "string_ops.h"
std::string RestApiParser::ParamToString(RestApiParameters param)
{
return RestApiParam::Lookup(param).stringId;
}
// Parser has lazy-init logic. It takes over the request here and
// uses it later, when requested, to parse the URI, path and JSON body.
RestApiParser::RestApiParser(web::http::http_request request) :
_request(std::move(request))
{
}
RestApiMethods RestApiParser::Method()
{
using api_map_t = std::map<std::string, std::pair<RestApiMethods, const web::http::method*>, case_insensitive_str_less>;
static const api_map_t supportedAPIs =
{
{ "create", std::make_pair(RestApiMethods::Create, &web::http::methods::POST) },
{ "enumerate", std::make_pair(RestApiMethods::Enumerate, &web::http::methods::GET) },
{ "start", std::make_pair(RestApiMethods::Start, &web::http::methods::POST) },
{ "pause", std::make_pair(RestApiMethods::Pause, &web::http::methods::POST) },
{ "finalize", std::make_pair(RestApiMethods::Finalize, &web::http::methods::POST) },
{ "abort", std::make_pair(RestApiMethods::Abort, &web::http::methods::POST) },
{ "getstatus", std::make_pair(RestApiMethods::GetStatus, &web::http::methods::GET) },
{ "getproperty", std::make_pair(RestApiMethods::GetProperty, &web::http::methods::GET) },
{ "setproperty", std::make_pair(RestApiMethods::SetProperty, &web::http::methods::POST) },
};
if (!_methodInitialized)
{
auto paths = web::http::uri::split_path(web::http::uri::decode(_request.relative_uri().path()));
if ((paths.size() == 2) && (StringCompareCaseInsensitive(paths[0].data(), "download") == 0))
{
auto it = supportedAPIs.find(paths[1]);
if ((it != supportedAPIs.end()) && (_request.method() == *(it->second.second)))
{
_method = it->second.first;
_methodInitialized = true;
}
}
}
if (!_methodInitialized)
{
THROW_HR_MSG(E_UNSUPPORTED, "Unsupported API request: (%s) %s", _request.method().data(), _request.relative_uri().to_string().data());
}
return _method;
}
const std::string* RestApiParser::QueryStringParam(RestApiParameters param)
{
auto& queryParams = _QueryParams();
auto it = queryParams.find(&RestApiParam::Lookup(param));
if (it != queryParams.end())
{
return &(it->second);
}
return nullptr;
}
const web::json::value* RestApiParser::BodyParam(RestApiParameters param)
{
auto& bodyData = _Body();
auto it = bodyData.find(&RestApiParam::Lookup(param));
if (it != bodyData.end())
{
return &(it->second);
}
return nullptr;
}
std::string RestApiParser::GetStringParam(RestApiParameters param)
{
const std::string* str = QueryStringParam(param);
if (str != nullptr)
{
return *str;
}
const web::json::value* val = BodyParam(param);
if (val != nullptr)
{
return val->as_string();
}
return {};
}
web::uri RestApiParser::GetUriParam(RestApiParameters param)
{
const std::string* str = QueryStringParam(param);
if (str != nullptr)
{
return web::uri(*str);
}
const web::json::value* val = BodyParam(param);
if (val != nullptr)
{
return web::uri(val->as_string());
}
return {};
}
void RestApiParser::_ParseQueryString()
{
// Search for and store only known parameters.
// Loop required because json::object does not offer case-insensitive finds.
// Use std::lower_bound if this loop becomes a bottleneck.
query_data_t decodedQueryData;
auto queryData = web::http::uri::split_query(_request.request_uri().query());
for (const auto& item : queryData)
{
const RestApiParam* param = RestApiParam::Lookup(item.first.data());
THROW_HR_IF(E_INVALIDARG, param == nullptr);
// Decode individual query params and not the query string as a whole
// because it can contain embedded URI with query string that will not
// get split up correctly if decoded beforehand.
decodedQueryData[param] = web::http::uri::decode(item.second);
}
_queryData = std::move(decodedQueryData);
}
void RestApiParser::_ParseJsonBody(const web::json::value& body)
{
THROW_HR_IF(E_INVALIDARG, !(body.is_null() || body.is_object()));
if (body.is_null())
{
_bodyData.clear();
return;
}
// Search for and store only known parameters.
// Loop required because json::object does not offer case-insensitive finds.
// Use std::lower_bound if this loop becomes a bottleneck.
body_data_t bodyData;
for (const auto& val : body.as_object())
{
const RestApiParam* param = RestApiParam::Lookup(val.first.data());
THROW_HR_IF(E_INVALIDARG, param == nullptr);
bodyData[param] = val.second;
}
_bodyData = std::move(bodyData);
}
const RestApiParser::query_data_t& RestApiParser::_QueryParams()
{
if (!_queryDataInitialized)
{
_ParseQueryString();
_queryDataInitialized = true;
}
return _queryData;
}
const RestApiParser::body_data_t& RestApiParser::_Body()
{
if (!_bodyDataInitialized)
{
auto jsonValue = _request.extract_json().get();
_ParseJsonBody(jsonValue);
_bodyDataInitialized = true;
}
return _bodyData;
}

Просмотреть файл

@ -0,0 +1,54 @@
#pragma once
#include <cpprest/http_msg.h>
#include <cpprest/json.h>
#include "rest_api_params.h"
enum class RestApiMethods
{
Create,
Enumerate,
Start,
Pause,
Finalize,
Abort,
GetStatus,
GetProperty,
SetProperty,
};
class RestApiParser
{
public:
using query_data_t = std::map<const RestApiParam*, std::string>;
using body_data_t = std::map<const RestApiParam*, web::json::value>;
static std::string ParamToString(RestApiParameters param);
RestApiParser(web::http::http_request request);
RestApiMethods Method();
const std::string* QueryStringParam(RestApiParameters param);
const web::json::value* BodyParam(RestApiParameters param);
const body_data_t& Body() { return _Body(); }
std::string GetStringParam(RestApiParameters param);
web::uri GetUriParam(RestApiParameters param);
private:
void _ParseQueryString();
void _ParseJsonBody(const web::json::value& body);
const query_data_t& _QueryParams();
const body_data_t& _Body();
private:
web::http::http_request _request;
// All further data members are lazy-init
RestApiMethods _method;
query_data_t _queryData;
body_data_t _bodyData;
bool _methodInitialized { false };
bool _queryDataInitialized { false };
bool _bodyDataInitialized { false };
};

Просмотреть файл

@ -0,0 +1,216 @@
#include "do_common.h"
#include "rest_api_request.h"
#include "do_error.h"
#include "do_guid.h"
#include "download.h"
#include "download_manager.h"
#include "string_ops.h"
namespace strconv = docli::string_conversions;
static std::string GetUri(RestApiParser& parser)
{
return parser.GetStringParam(RestApiParameters::Uri);
}
static std::string GetDownloadFilePath(RestApiParser& parser)
{
return parser.GetStringParam(RestApiParameters::DownloadFilePath);
}
static std::string GetDownloadId(RestApiParser& parser)
{
const std::string* str = parser.QueryStringParam(RestApiParameters::Id);
if (str != nullptr)
{
return *str;
}
return {};
}
static PCSTR DownloadStateToString(DownloadState state)
{
#define RETURN_DOWNLOAD_STATE_STR(state) \
case DownloadState::state: return #state;
switch (state)
{
RETURN_DOWNLOAD_STATE_STR(Created);
RETURN_DOWNLOAD_STATE_STR(Transferring);
RETURN_DOWNLOAD_STATE_STR(Transferred);
RETURN_DOWNLOAD_STATE_STR(Finalized);
RETURN_DOWNLOAD_STATE_STR(Aborted);
RETURN_DOWNLOAD_STATE_STR(Paused);
default:
DO_ASSERT(false);
return nullptr;
}
}
// Entry point for request parsing. The only parsing that is done here is to determine the request method type.
// Other parsing, like the query string and request JSON body is done when the Process() method is invoked.
// Process() is called asynchronously to unblock the HTTP listener thread.
RestApiRequestBase::RestApiRequestBase(web::http::http_request clientRequest) :
_parser(std::move(clientRequest))
{
switch (_parser.Method())
{
case RestApiMethods::Create: _apiRequest = std::make_unique<RestApiCreateRequest>(); break;
case RestApiMethods::Enumerate: _apiRequest = std::make_unique<RestApiEnumerateRequest>(); break;
case RestApiMethods::Start:
case RestApiMethods::Pause:
case RestApiMethods::Finalize:
case RestApiMethods::Abort:
_apiRequest = std::make_unique<RestApiDownloadStateChangeRequest>();
break;
case RestApiMethods::GetStatus: _apiRequest = std::make_unique<RestApiGetStatusRequest>(); break;
case RestApiMethods::GetProperty: _apiRequest = std::make_unique<RestApiGetPropertyRequest>(); break;
case RestApiMethods::SetProperty: _apiRequest = std::make_unique<RestApiSetPropertyRequest>(); break;
default:
DO_ASSERT(false);
THROW_HR(E_UNEXPECTED);
}
}
HRESULT RestApiRequestBase::Process(DownloadManager& downloadManager, web::json::value& responseBody) try
{
return _apiRequest->ParseAndProcess(downloadManager, _parser, responseBody);
} CATCH_RETURN()
HRESULT RestApiCreateRequest::ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser, web::json::value& responseBody)
{
std::string uri = GetUri(parser);
std::string filePath = GetDownloadFilePath(parser);
auto downloadId = downloadManager.CreateDownload(uri, filePath);
responseBody[RestApiParser::ParamToString(RestApiParameters::Id)] = web::json::value(downloadId);
return S_OK;
}
HRESULT RestApiEnumerateRequest::ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser,
web::json::value& responseBody)
{
std::string filePath = GetDownloadFilePath(parser);
std::string uri = GetUri(parser);
DoLogInfo("%s, %s", filePath.data(), uri.data());
return E_NOTIMPL;
}
HRESULT RestApiDownloadStateChangeRequest::ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser,
web::json::value& responseBody)
{
DoLogInfo("Download state change: %d", static_cast<int>(parser.Method()));
auto downloadId = GetDownloadId(parser);
switch(parser.Method())
{
case RestApiMethods::Start:
downloadManager.StartDownload(downloadId);
break;
case RestApiMethods::Pause:
downloadManager.PauseDownload(downloadId);
break;
case RestApiMethods::Finalize:
downloadManager.FinalizeDownload(downloadId);
break;
case RestApiMethods::Abort:
downloadManager.AbortDownload(downloadId);
break;
default:
DO_ASSERT(false);
return E_NOTIMPL;
}
return S_OK;
}
HRESULT RestApiGetStatusRequest::ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser,
web::json::value& responseBody)
{
DoLogInfo("");
auto status = downloadManager.GetDownloadStatus(GetDownloadId(parser));
responseBody["Status"] = web::json::value::string(DownloadStateToString(status.State));
responseBody["BytesTotal"] = static_cast<uint64_t>(status.BytesTotal);
responseBody["BytesTransferred"] = static_cast<uint64_t>(status.BytesTransferred);
responseBody["ErrorCode"] = static_cast<int32_t>(status.Error);
responseBody["ExtendedErrorCode"] = static_cast<int32_t>(status.ExtendedError);
return S_OK;
}
HRESULT RestApiGetPropertyRequest::ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser,
web::json::value& responseBody)
{
const std::string* propKey = parser.QueryStringParam(RestApiParameters::PropertyKey);
RETURN_HR_IF_EXPECTED(E_INVALIDARG, (propKey == nullptr) || (propKey->empty()));
const RestApiParam* param = RestApiParam::Lookup(propKey->data());
RETURN_HR_IF(DO_E_UNKNOWN_PROPERTY_ID, (param == nullptr) || param->IsUnknownDownloadPropertyId());
const auto downloadId = GetDownloadId(parser);
std::string val = downloadManager.GetDownloadProperty(downloadId, param->downloadPropertyId);
web::json::value propVal;
switch (param->type)
{
case RestApiParamTypes::UInt:
propVal = web::json::value(strconv::ToUInt(val));
break;
case RestApiParamTypes::String:
propVal = web::json::value(val);
break;
default:
DO_ASSERT(false);
break;
}
responseBody[*propKey] = std::move(propVal);
return S_OK;
}
HRESULT RestApiSetPropertyRequest::ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser,
web::json::value& responseBody)
{
auto downloadId = GetDownloadId(parser);
std::vector<std::pair<DownloadProperty, std::string>> propertiesToSet;
const auto& requestBody = parser.Body();
for (const auto& item : requestBody)
{
const RestApiParam* param = item.first;
RETURN_HR_IF(DO_E_UNKNOWN_PROPERTY_ID, param->IsUnknownDownloadPropertyId());
const web::json::value& propVal = item.second;
std::string propValue;
switch (param->type)
{
case RestApiParamTypes::UInt:
RETURN_HR_IF(E_INVALIDARG, !propVal.as_number().is_uint32());
propValue = std::to_string(propVal.as_number().to_uint32());
break;
case RestApiParamTypes::String:
propValue = propVal.as_string();
break;
default:
DO_ASSERT(false);
break;
}
DO_ASSERT(!propValue.empty());
propertiesToSet.emplace_back(param->downloadPropertyId, std::move(propValue));
}
for (const auto& prop : propertiesToSet)
{
downloadManager.SetDownloadProperty(downloadId, prop.first, prop.second);
}
return S_OK;
}

Просмотреть файл

@ -0,0 +1,62 @@
#pragma once
#include <memory>
#include <cpprest/http_msg.h>
#include <cpprest/json.h>
#include "rest_api_parser.h"
class DownloadManager;
class IRestApiRequest
{
public:
virtual ~IRestApiRequest() = default;
virtual HRESULT ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser, web::json::value& responseBody) = 0;
};
class RestApiRequestBase
{
public:
RestApiRequestBase(web::http::http_request clientRequest);
HRESULT Process(DownloadManager& downloadManager, web::json::value& responseBody);
private:
std::unique_ptr<IRestApiRequest> _apiRequest;
RestApiParser _parser;
};
class RestApiCreateRequest : public IRestApiRequest
{
private:
HRESULT ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser, web::json::value& responseBody) override;
};
class RestApiEnumerateRequest : public IRestApiRequest
{
private:
HRESULT ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser, web::json::value& responseBody) override;
};
class RestApiDownloadStateChangeRequest : public IRestApiRequest
{
private:
HRESULT ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser, web::json::value& responseBody) override;
};
class RestApiGetStatusRequest : public IRestApiRequest
{
private:
HRESULT ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser, web::json::value& responseBody) override;
};
class RestApiGetPropertyRequest : public IRestApiRequest
{
private:
HRESULT ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser, web::json::value& responseBody) override;
};
class RestApiSetPropertyRequest : public IRestApiRequest
{
private:
HRESULT ParseAndProcess(DownloadManager& downloadManager, RestApiParser& parser, web::json::value& responseBody) override;
};

Просмотреть файл

@ -0,0 +1,147 @@
#include "do_common.h"
#include "rest_http_controller.h"
#include <boost/asio/ip/address.hpp>
#include <cpprest/json.h>
#include <pplx/pplxtasks.h>
#include "config_manager.h"
#include "download_manager.h"
#include "rest_api_request.h"
RestHttpController::RestHttpController(ConfigManager& config, std::shared_ptr<DownloadManager> downloadManager) :
_config(config),
_downloadManager(std::move(downloadManager))
{
}
RestHttpController::~RestHttpController()
{
_listener.Stop();
(void)_callTracker.Wait();
}
void RestHttpController::Start()
{
_listener.Start("http://127.0.0.1");
_listener.AddHandler(web::http::methods::GET, std::bind(&RestHttpController::_Handler, this, std::placeholders::_1));
_listener.AddHandler(web::http::methods::POST, std::bind(&RestHttpController::_Handler, this, std::placeholders::_1));
}
std::string RestHttpController::ServerEndpoint() const
{
return _listener.Endpoint();
}
uint16_t RestHttpController::Port() const
{
return _listener.Port();
}
void RestHttpController::_Handler(web::http::http_request request)
{
HRESULT hr = S_OK;
try
{
if (_config.RestControllerValidateRemoteAddr() && !_IsValidRemoteAddress(request.remote_address()))
{
request.reply(web::http::status_codes::BadRequest);
return;
}
// shared_ptr because the lambda below needs to be copyable
auto apiRequest = std::make_shared<RestApiRequestBase>(request);
// Handle the request asynchronously and then reply to the client request
// Note: the tracker is moved to only the last task-based lambda because it
// will always get executed whereas a value-based lambda can be skipped due
// to exceptions or cancellations.
auto tracker = _callTracker.Enter();
pplx::create_task([this, request, apiRequest]()
{
auto response = web::json::value::object();
THROW_IF_FAILED(apiRequest->Process(*_downloadManager, response));
(void)request.reply(web::http::status_codes::OK, response);
}).then([request, tracker = std::move(tracker)](pplx::task<void> t)
{
HRESULT hr = S_OK;
try
{
// get() inside a 'then' handler will re-throw any exception that resulted
// from previous task(s). Thus, it allows exceptions to be seen and handled.
t.get();
}
catch (...)
{
hr = LOG_CAUGHT_EXCEPTION();
}
if (FAILED(hr))
{
_OnFailure(request, hr);
}
});
}
catch (...)
{
hr = LOG_CAUGHT_EXCEPTION();
}
if (FAILED(hr))
{
_OnFailure(request, hr);
}
}
bool RestHttpController::_IsValidRemoteAddress(const std::string& addr)
{
bool fValidAddress = true;
try
{
const auto remoteAddrAsIP = boost::asio::ip::address::from_string(addr);
if (!remoteAddrAsIP.is_loopback())
{
// Log at verbose level to avoid flooding the log (attacker trying to DoS us).
DoLogVerbose("Request unexpected from non-loopback address: %s", addr.c_str());
fValidAddress = false;
}
}
catch (...)
{
DoLogVerboseHr(docli::ResultFromCaughtException(), "Exception in trying to validate remote address");
fValidAddress = false;
}
return fValidAddress;
}
void RestHttpController::_OnFailure(const web::http::http_request& clientRequest, HRESULT hr) try
{
auto response = web::json::value::object();
response["ErrorCode"] = web::json::value::number(hr);
(void)clientRequest.reply(_HttpStatusFromHRESULT(hr), response);
} CATCH_LOG()
web::http::status_code RestHttpController::_HttpStatusFromHRESULT(HRESULT hr)
{
web::http::status_code status;
switch (hr)
{
case S_OK:
case S_FALSE:
status = web::http::status_codes::OK;
break;
case E_NOT_SET:
status = web::http::status_codes::NotFound;
break;
case E_OUTOFMEMORY:
status = web::http::status_codes::ServiceUnavailable;
break;
case HRESULT_FROM_WIN32(ERROR_UNHANDLED_EXCEPTION):
status = web::http::status_codes::InternalError;
break;
default:
status = web::http::status_codes::BadRequest;
break;
}
return status;
}

Просмотреть файл

@ -0,0 +1,35 @@
#pragma once
#include <cpprest/http_msg.h>
#include "rest_http_listener.h"
#include "waitable_counter.h"
class ConfigManager;
class DownloadManager;
class RestApiRequestBase;
// Controller for the REST-over-HTTP interface in DO client.
// This interface is used as the inter-process communication
// mechanism for our clients to create and manage download requests.
class RestHttpController
{
public:
RestHttpController(ConfigManager& config, std::shared_ptr<DownloadManager> downloadManager);
~RestHttpController();
void Start();
std::string ServerEndpoint() const;
uint16_t Port() const;
private:
void _Handler(web::http::http_request request);
static bool _IsValidRemoteAddress(const std::string& addr);
static void _OnFailure(const web::http::http_request& clientRequest, HRESULT hr);
static web::http::status_code _HttpStatusFromHRESULT(HRESULT hr);
private:
ConfigManager& _config;
std::shared_ptr<DownloadManager> _downloadManager;
RestHttpListener _listener;
WaitableCounter _callTracker;
};

Просмотреть файл

@ -0,0 +1,60 @@
#include "do_common.h"
#include "rest_http_listener.h"
using cpprest_http_listener_t = web::http::experimental::listener::http_listener;
void RestHttpListener::AddHandler(const web::http::method& method, const std::function<void(web::http::http_request)>& handler)
{
_listener->support(method, handler);
}
void RestHttpListener::Start(const std::string& listenUrl)
{
web::uri_builder endpointBuilder{ web::uri(listenUrl) };
// IANA suggests ephemeral ports can be in range [49125, 65535].
// Linux suggests [32768, 60999] while Windows says [1025, 65535].
// We just choose a range that lies within all three implementations.
uint16_t restPort = 50000;
constexpr uint16_t restPortLimit = 60999;
std::unique_ptr<cpprest_http_listener_t> tmpListener;
while (true)
{
endpointBuilder.set_port(restPort);
tmpListener = std::make_unique<cpprest_http_listener_t>(endpointBuilder.to_uri());
try
{
tmpListener->open().wait(); // wait for completion and check for exceptions
break; // break because listening was successfully started
}
catch (const boost::system::system_error& ex)
{
if ((ex.code().value() != EADDRINUSE) || (restPort == restPortLimit))
{
throw;
}
++restPort;
}
}
DO_ASSERT(tmpListener);
_listener = std::move(tmpListener);
}
void RestHttpListener::Stop()
{
if (_listener)
{
_listener->close().wait();
_listener.reset();
}
}
std::string RestHttpListener::Endpoint() const
{
return _listener->uri().to_string();
}
uint16_t RestHttpListener::Port() const
{
return _listener->uri().port();
}

Просмотреть файл

@ -0,0 +1,18 @@
#pragma once
#include <memory>
#include <cpprest/http_listener.h>
#include <cpprest/http_msg.h>
class RestHttpListener
{
public:
void AddHandler(const web::http::method& method, const std::function<void(web::http::http_request)>& handler);
void Start(const std::string& listenUrl);
void Stop();
std::string Endpoint() const;
uint16_t Port() const;
private:
std::unique_ptr<web::http::experimental::listener::http_listener> _listener;
};

Просмотреть файл

@ -0,0 +1,77 @@
#pragma once
#include <errno.h> // errno
#include <fcntl.h> // open, write
#include <unistd.h> // getpid
#include <sstream>
#include <boost/filesystem.hpp>
#include <gsl/gsl_util>
#include "do_persistence.h"
#include "error_macros.h"
// RAII wrapper for creating, writing and deleting the file that will
// contain the port number for the REST interface.
class RestPortAdvertiser
{
public:
RestPortAdvertiser(uint16_t port)
{
// We cannot remove the port file on shutdown because we have already dropped permissions.
// Clean it up now when we are still running as root.
_DeleteOlderPortFiles();
std::stringstream ss;
ss << docli::GetRuntimeDirectory() << '/' << _restPortFileNamePrefix << '.' << getpid();
_outFilePath = ss.str();
// If file already exists, it is truncated. Probably from an old instance that terminated abnormally.
// Allow file to only be read by others.
int fd = open(_outFilePath.data(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IRGRP | S_IROTH);
if (fd == -1)
{
THROW_HR_MSG(E_FAIL, "Failed to open file %s, errno: %d", _outFilePath.data(), errno);
}
try
{
const auto writeStr = std::to_string(port) + '\n';
const auto cbWrite = gsl::narrow_cast<ssize_t>(writeStr.size() * sizeof(char));
const ssize_t written = write(fd, writeStr.data(), cbWrite);
if (written != cbWrite)
{
THROW_HR_MSG(E_FAIL, "Failed to write port, written: %zd, errno: %d", written, errno);
}
}
catch (...)
{
(void)close(fd);
throw;
}
(void)close(fd);
}
const std::string& OutFilePath() const { return _outFilePath; }
private:
void _DeleteOlderPortFiles() try
{
auto& runtimeDirectory = docli::GetRuntimeDirectory();
for (boost::filesystem::directory_iterator itr(runtimeDirectory); itr != boost::filesystem::directory_iterator(); ++itr)
{
auto& dirEntry = itr->path();
if (dirEntry.filename().string().find(_restPortFileNamePrefix) != std::string::npos)
{
boost::system::error_code ec;
boost::filesystem::remove(dirEntry, ec);
if (ec)
{
DoLogWarning("Failed to delete old port file (%d, %s) %s", ec.value(), ec.message().data(), dirEntry.string().data());
}
}
}
} CATCH_LOG()
std::string _outFilePath;
static constexpr const char* const _restPortFileNamePrefix = "restport";
};

Просмотреть файл

@ -0,0 +1,98 @@
#include "do_common.h"
#include "do_event.h"
#include <chrono>
#ifdef DEBUG
inline void _VerifyWaitTime(std::chrono::milliseconds timeout)
{
// Wait time must be small enough to not overflow when added to now()
const auto now = std::chrono::steady_clock::now();
DO_ASSERT((now + (timeout)) >= now);
}
#else
#define _VerifyWaitTime(t)
#endif
// AutoResetEvent
AutoResetEvent::AutoResetEvent(bool isSignaled) :
_isSignaled(isSignaled)
{
}
void AutoResetEvent::SetEvent() noexcept
{
std::lock_guard<std::mutex> lock(_mutex);
_isSignaled = true;
_cv.notify_one();
}
void AutoResetEvent::ResetEvent() noexcept
{
std::lock_guard<std::mutex> lock(_mutex);
_isSignaled = false;
}
bool AutoResetEvent::IsSignaled() const noexcept
{
std::lock_guard<std::mutex> lock(_mutex);
return _isSignaled;
}
bool AutoResetEvent::Wait(std::chrono::milliseconds timeout) noexcept
{
_VerifyWaitTime(timeout);
std::unique_lock<std::mutex> lock(_mutex);
if (!_isSignaled)
{
if (!_cv.wait_for(lock, timeout, [this] { return _isSignaled; }))
{
return false;
}
}
_isSignaled = false;
return true;
}
// ManualResetEvent
ManualResetEvent::ManualResetEvent(bool isSignaled) :
_isSignaled(isSignaled)
{
}
void ManualResetEvent::SetEvent() noexcept
{
std::lock_guard<std::mutex> lock(_mutex);
_isSignaled = true;
_cv.notify_all();
}
void ManualResetEvent::ResetEvent() noexcept
{
std::lock_guard<std::mutex> lock(_mutex);
_isSignaled = false;
}
bool ManualResetEvent::IsSignaled() const noexcept
{
std::lock_guard<std::mutex> lock(_mutex);
return _isSignaled;
}
bool ManualResetEvent::Wait(std::chrono::milliseconds timeout) noexcept
{
_VerifyWaitTime(timeout);
std::unique_lock<std::mutex> lock(_mutex);
if (!_isSignaled)
{
if (!_cv.wait_for(lock, timeout, [this] { return _isSignaled; }))
{
return false;
}
}
return true;
}

Просмотреть файл

@ -0,0 +1,39 @@
#pragma once
#include <chrono>
#include <condition_variable>
#include <mutex>
#include "config_defaults.h"
#include "do_noncopyable.h"
class AutoResetEvent : private DONonCopyable
{
public:
AutoResetEvent(bool isSignaled = false);
void SetEvent() noexcept;
void ResetEvent() noexcept;
bool IsSignaled() const noexcept;
bool Wait(std::chrono::milliseconds timeout = g_steadyClockInfiniteWaitTime) noexcept;
private:
mutable std::mutex _mutex;
std::condition_variable _cv;
bool _isSignaled { false };
};
class ManualResetEvent : private DONonCopyable
{
public:
ManualResetEvent(bool isSignaled = false);
void SetEvent() noexcept;
void ResetEvent() noexcept;
bool IsSignaled() const noexcept;
bool Wait(std::chrono::milliseconds timeout = g_steadyClockInfiniteWaitTime) noexcept;
private:
mutable std::mutex _mutex;
std::condition_variable _cv;
bool _isSignaled { false };
};

Просмотреть файл

@ -0,0 +1,92 @@
#include "do_common.h"
#include "task_queue.h"
void TaskQueue::_Add(std::unique_ptr<Task>&& spTask, const duration_t& delay)
{
const auto adjustedDelay = std::max(duration_t(0), delay);
auto opTime = clock_t::now() + adjustedDelay;
_ops.emplace(opTime, std::move(spTask));
}
void TaskQueue::_AddFront(std::unique_ptr<Task>&& spTask)
{
timepoint_t now = clock_t::now();
timepoint_t earliest = NextTime();
timepoint_t opTime;
if (now < earliest)
{
opTime = now - duration_t(1);
}
else
{
opTime = earliest - duration_t(1);
}
_ops.emplace(opTime, std::move(spTask));
}
std::unique_ptr<TaskQueue::Task> TaskQueue::popNextReady(_Out_opt_ const void** tagp)
{
if (tagp != nullptr)
{
*tagp = nullptr;
}
std::unique_ptr<Task> rval;
OpsMap::iterator it = _ops.begin();
if ((it != _ops.end()) && (it->first <= clock_t::now()))
{
rval = std::move(it->second);
if (tagp)
{
*tagp = rval->Tag();
}
_ops.erase(it);
}
return rval;
}
void TaskQueue::Remove(_In_opt_ const void* tag)
{
if (tag != nullptr)
{
// This is why we want boost::multi_index_container
OpsMap::iterator it = _ops.begin();
while (it != _ops.end())
{
if (it->second->Tag() == tag)
{
it = _ops.erase(it);
}
else
{
++it;
}
}
}
}
bool TaskQueue::Exists(_In_opt_ const void* tag) const
{
bool result = false;
if (tag != nullptr)
{
for (const auto& entry : _ops)
{
if (entry.second->Tag() == tag)
{
result = true;
break;
}
}
}
return result;
}
TaskQueue::timepoint_t TaskQueue::NextTime() const
{
OpsMap::const_iterator it = _ops.begin();
return (it != _ops.end()) ? it->first : timepoint_t::max();
}

Просмотреть файл

@ -0,0 +1,76 @@
#pragma once
#include <chrono>
#include <map>
#include <memory>
// Note: Locking is left to the queue's owner
class TaskQueue
{
public:
using clock_t = std::chrono::steady_clock;
using timepoint_t = clock_t::time_point;
using duration_t = std::chrono::milliseconds;
class Task
{
public:
virtual ~Task() {}
virtual void Run() = 0;
virtual const void* Tag() = 0;
};
private:
template <typename TLambda>
class Op : public Task, std::remove_reference_t<TLambda>
{
Op(const Op&) = delete;
Op& operator=(const Op&) = delete;
public:
Op(TLambda&& lambda, const void* tag) :
std::remove_reference_t<TLambda>(std::forward<TLambda>(lambda)),
_tag(tag)
{
}
void Run() override
{
(*this)();
}
const void* Tag() override
{
return _tag;
}
private:
const void* _tag;
};
void _Add(std::unique_ptr<Task>&& spTask, const duration_t& delay);
void _AddFront(std::unique_ptr<Task>&& spTask);
typedef std::multimap<timepoint_t, std::unique_ptr<Task>> OpsMap;
OpsMap _ops;
public:
// Note: If tag == nullptr, the op can't be removed. remove(nullptr) is a no-op.
template <typename TLambda, typename TDuration>
void Add(TLambda&& func, TDuration delay, _In_opt_ const void* tag = nullptr)
{
_Add(std::make_unique<Op<TLambda>>(std::forward<TLambda>(func), tag), std::chrono::duration_cast<duration_t>(delay));
}
template <typename TLambda>
void AddFront(TLambda&& func, _In_opt_ const void* tag = nullptr)
{
_AddFront(std::make_unique<Op<TLambda>>(std::forward<TLambda>(func), tag));
}
std::unique_ptr<Task> popNextReady(_Out_opt_ const void** tagp = nullptr);
void Remove(_In_opt_ const void* tag);
bool Exists(_In_opt_ const void* tag) const;
timepoint_t NextTime() const;
};

Просмотреть файл

@ -0,0 +1,103 @@
#include "do_common.h"
#include "task_thread.h"
#include <memory>
#include "do_event.h"
TaskThread::TaskThread()
{
_thread = std::thread([this]() { _DoPoll(); });
}
TaskThread::~TaskThread()
{
SchedImmediate([this]()
{
_fRunning = false;
});
_thread.join();
}
void TaskThread::Unschedule(_In_opt_ const void* tag)
{
if (tag != nullptr)
{
std::unique_lock<std::mutex> lock(_taskQMutex);
_taskQ.Remove(tag);
_taskQCond.notify_all();
}
}
void TaskThread::_DoPoll()
{
while (_fRunning)
{
std::unique_lock<std::mutex> lock(_taskQMutex);
const auto next = _taskQ.NextTime();
const auto now = TaskQueue::clock_t::now();
if (next <= now)
{
std::unique_ptr<TaskQueue::Task> spTask = _taskQ.popNextReady();
if (spTask)
{
lock.unlock();
spTask->Run();
}
}
else if (next == TaskQueue::timepoint_t::max())
{
_taskQCond.wait(lock);
}
else
{
_taskQCond.wait_for(lock, next - now);
}
}
DoLogInfo("TaskThread exit");
}
void TaskThread::SchedBlock(const std::function<void()>& func, bool immediate)
{
if (IsCurrentThread())
{
func();
return;
}
AutoResetEvent completionEvent;
HRESULT hr = S_OK;
auto execOp = [&completionEvent, &func, &hr]()
{
try
{
func();
}
catch (...)
{
hr = LOG_CAUGHT_EXCEPTION();
}
completionEvent.SetEvent();
};
if (immediate)
{
SchedImmediate(std::move(execOp));
}
else
{
Sched(std::move(execOp));
}
(void)completionEvent.Wait();
THROW_IF_FAILED(hr);
}
bool TaskThread::IsScheduled(_In_opt_ const void* tag) const
{
std::unique_lock<std::mutex> lock(_taskQMutex);
return _taskQ.Exists(tag);
}

Просмотреть файл

@ -0,0 +1,104 @@
#pragma once
#include <condition_variable>
#include <chrono>
#include <mutex>
#include <thread>
#include "do_noncopyable.h"
#include "task_queue.h"
class TaskThread : public DONonCopyable
{
public:
TaskThread();
~TaskThread();
void Unschedule(_In_opt_ const void* tag);
template <typename TLambda, typename TDuration>
void Sched(TLambda&& func, TDuration delay, _In_opt_ const void* tag)
{
std::unique_lock<std::mutex> lock(_taskQMutex);
_taskQ.Add(std::forward<TLambda>(func), delay, tag);
_taskQCond.notify_all();
}
template <typename TLambda>
void Sched(TLambda&& func, _In_opt_ const void* tag = nullptr)
{
Sched(std::forward<TLambda>(func), std::chrono::milliseconds(0), tag);
}
// Schedules a function on the core thread or executes it if we're already on the core thread
template <typename TLambda>
void SchedOrRun(TLambda&& func, _In_opt_ const void* tag)
{
if (IsCurrentThread())
{
func();
}
else
{
Sched(std::forward<TLambda>(func), tag);
}
}
// Removes any existing scheduled task with the specified tag and then schedules a new one.
// Use this instead of calling unschedule() and sched() in sequence to avoid waking up corethread twice.
template <typename TLambda, typename TDuration>
void SchedReplace(TLambda&& func, TDuration delay, _In_ const void* tag)
{
DO_ASSERT(tag != nullptr);
std::unique_lock<std::mutex> lock(_taskQMutex);
_taskQ.Remove(tag);
_taskQ.Add(std::forward<TLambda>(func), delay, tag);
_taskQCond.notify_all();
}
template <typename TLambda>
void SchedReplace(TLambda&& func, _In_ const void* tag)
{
SchedReplace(std::forward<TLambda>(func), std::chrono::milliseconds(0), tag);
}
template <typename TLambda>
void SchedImmediate(TLambda&& func, _In_opt_ const void* tag = nullptr)
{
std::unique_lock<std::mutex> lock(_taskQMutex);
_taskQ.AddFront(std::forward<TLambda>(func), tag);
_taskQCond.notify_all();
}
// Like schedImmediate but the schedule is optional, i.e., 'func' may not be executed in case of
// out of memory error while scheduling. Any other error will result in failfast exception.
template <typename TLambda>
void SchedImmediateOpt(TLambda&& func, _In_opt_ const void* tag = nullptr) try
{
SchedImmediate(std::forward<TLambda>(func), tag);
} CATCH_LOG()
void SchedBlock(const std::function<void()>& func, bool immediate = true);
bool IsScheduled(_In_opt_ const void* tag) const;
bool IsCurrentThread() const noexcept
{ return ThreadId() == std::this_thread::get_id(); }
std::thread::id ThreadId() const noexcept
{ return _thread.get_id(); }
private:
void _DoPoll();
// A queue of scheduled operations to execute on the core thread
TaskQueue _taskQ;
mutable std::mutex _taskQMutex;
std::condition_variable _taskQCond;
// Don't init here because _DoPoll depends on _fRunning.
// Non-trivial init like starting a thread should really be
// done in the constructor body.
std::thread _thread;
bool _fRunning { true };
};

Просмотреть файл

@ -0,0 +1,112 @@
#pragma once
#include <chrono>
#include <shared_mutex>
#include "config_defaults.h"
#include "do_event.h"
#include "do_noncopyable.h"
class WaitableCounter : private DONonCopyable
{
public:
WaitableCounter() = default;
~WaitableCounter()
{
// Destruction can happen immediately after _Leave signals _event, which
// means _lock might not exist when _Leave gets around to releasing it.
// Acquire the lock here to ensure _Leave can release it safely.
std::unique_lock<std::shared_timed_mutex> lock(_lock);
}
bool Wait(std::chrono::milliseconds timeout = g_steadyClockInfiniteWaitTime) const noexcept
{
// Returns true when _count is zero
return _event.Wait(timeout);
}
private:
class scope_exit
{
public:
scope_exit(WaitableCounter* pCounter) : _pCounter(pCounter)
{
DO_ASSERT(_pCounter != nullptr);
}
// Lambdas in http_agent.cpp require this to be copyable
scope_exit(const scope_exit& other) : _pCounter(nullptr)
{
*this = other;
}
scope_exit& operator=(const scope_exit& other)
{
_Reset();
auto counter = other._pCounter;
if (counter != nullptr)
{
*this = std::move(counter->Enter());
}
return *this;
}
scope_exit(scope_exit&& other) noexcept
{
_pCounter = other._pCounter;
other._pCounter = nullptr;
}
scope_exit& operator=(scope_exit&& other) noexcept
{
_Reset();
_pCounter = other._pCounter;
other._pCounter = nullptr;
return *this;
}
~scope_exit()
{
_Reset();
}
private:
void _Reset() noexcept
{
if (_pCounter != nullptr)
{
_pCounter->_Leave();
_pCounter = nullptr;
}
}
WaitableCounter *_pCounter;
};
public:
scope_exit Enter() noexcept
{
std::unique_lock<std::shared_timed_mutex> lock(_lock);
++_count;
if (_count == 1)
{
_event.ResetEvent();
}
return scope_exit(this);
}
private:
void _Leave() noexcept
{
std::unique_lock<std::shared_timed_mutex> lock(_lock);
DO_ASSERT(_count > 0);
--_count;
if (_count == 0)
{
_event.SetEvent();
}
}
mutable ManualResetEvent _event { true };
std::shared_timed_mutex _lock;
UINT _count { 0 };
};

Просмотреть файл

@ -0,0 +1,42 @@
#include "do_common.h"
#include "event_data.h"
#include "download.h"
TelDataDownloadInfo::TelDataDownloadInfo(const Download& download) :
_guid(download.GetId()),
_status(download.GetStatus()),
_url(download.GetUrl()),
_destinationPath(download.GetDestinationPath()),
_mccHost(download.GetMCCHost())
{
}
EventDataDownloadStarted::EventDataDownloadStarted(const Download& download) :
_commonData(download)
{
}
EventDataDownloadCompleted::EventDataDownloadCompleted(const Download& download) :
_commonData(download),
_elapsedTime(download.GetElapsedTime())
{
}
EventDataDownloadPaused::EventDataDownloadPaused(const Download& download) :
_commonData(download)
{
}
EventDataDownloadCanceled::EventDataDownloadCanceled(const Download& download) :
_commonData(download)
{
}
EventDataDownloadStatus::EventDataDownloadStatus(const Download& download) :
id(download.GetId()),
status(download.Status()),
httpStatusCode(download.HttpStatusCode())
{
}

Просмотреть файл

@ -0,0 +1,59 @@
#pragma once
#include <chrono>
#include <string>
#include "do_guid.h"
#include "download_status.h"
class Download;
struct TelDataDownloadInfo
{
TelDataDownloadInfo(const Download& download);
TelDataDownloadInfo() = default;
GUID _guid;
DownloadStatus _status;
std::string _url;
std::string _destinationPath;
std::string _mccHost;
};
struct EventDataDownloadStarted
{
EventDataDownloadStarted(const Download& download);
TelDataDownloadInfo _commonData;
};
struct EventDataDownloadCompleted
{
EventDataDownloadCompleted(const Download& download);
TelDataDownloadInfo _commonData;
std::chrono::milliseconds _elapsedTime;
};
struct EventDataDownloadPaused
{
EventDataDownloadPaused(const Download& download);
TelDataDownloadInfo _commonData;
};
struct EventDataDownloadCanceled
{
EventDataDownloadCanceled(const Download& download);
TelDataDownloadInfo _commonData;
};
struct EventDataDownloadStatus
{
EventDataDownloadStatus(const Download& download);
GUID id;
DownloadStatus status;
UINT httpStatusCode;
};

Просмотреть файл

@ -0,0 +1,47 @@
#include "do_common.h"
#include "telemetry_logger.h"
TelemetryLogger& TelemetryLogger::getInstance()
{
static TelemetryLogger myInstance;
return myInstance;
}
void TelemetryLogger::TraceDownloadStart(const EventDataDownloadStarted& eventData)
{
DoLogInfoHr(eventData._commonData._status.Error, "id: %s, url: %s, filePath: %s, mccHost: %s",
GuidToString(eventData._commonData._guid).data(), eventData._commonData._url.c_str(),
eventData._commonData._destinationPath.c_str(), eventData._commonData._mccHost.c_str());
}
void TelemetryLogger::TraceDownloadCompleted(const EventDataDownloadCompleted& eventData)
{
DoLogInfo("id: %s, url: %s, mccHost: %s, filePath: %s, bytes: [total: %ld, down: %ld], timeMS: %ld",
GuidToString(eventData._commonData._guid).data(), eventData._commonData._url.c_str(), eventData._commonData._mccHost.c_str(),
eventData._commonData._destinationPath.c_str(), eventData._commonData._status.BytesTotal,
eventData._commonData._status.BytesTransferred, eventData._elapsedTime.count());
}
void TelemetryLogger::TraceDownloadPaused(const EventDataDownloadPaused& eventData)
{
DoLogInfoHr(eventData._commonData._status.Error, "id: %s, extError: %x, cdnUrl: %s, mccHost: %s, filePath: %s, bytes: [total: %ld, down: %ld]",
GuidToString(eventData._commonData._guid).data(), eventData._commonData._status.ExtendedError, eventData._commonData._url.c_str(),
eventData._commonData._mccHost.c_str(), eventData._commonData._destinationPath.c_str(), eventData._commonData._status.BytesTotal,
eventData._commonData._status.BytesTransferred);
}
void TelemetryLogger::TraceDownloadCanceled(const EventDataDownloadCanceled& eventData)
{
DoLogInfoHr(eventData._commonData._status.Error, "id: %s, extError: %x, cdnUrl: %s, mccHost: %s, filePath: %s, bytes: [total: %ld, down: %ld]",
GuidToString(eventData._commonData._guid).data(), eventData._commonData._status.ExtendedError, eventData._commonData._url.c_str(),
eventData._commonData._destinationPath.c_str(), eventData._commonData._mccHost.c_str(), eventData._commonData._status.BytesTotal,
eventData._commonData._status.BytesTransferred);
}
void TelemetryLogger::TraceDownloadStatus(const EventDataDownloadStatus& eventData)
{
DoLogVerbose("id: %s, %d, codes: [%u, 0x%x, 0x%x], %llu / %llu", GuidToString(eventData.id).c_str(), eventData.status.State,
eventData.httpStatusCode, eventData.status.Error, eventData.status.ExtendedError, eventData.status.BytesTransferred,
eventData.status.BytesTotal);
}

Просмотреть файл

@ -0,0 +1,18 @@
#pragma once
#include "do_noncopyable.h"
#include "event_data.h"
// TODO: Instrument telemetry provider
// Until telemetry is instrumented, this class serves as a wrapper for logging telemetry events with no data being sent
class TelemetryLogger : DONonCopyable
{
public:
static TelemetryLogger& getInstance();
void TraceDownloadStart(const EventDataDownloadStarted& eventData);
void TraceDownloadCompleted(const EventDataDownloadCompleted& eventData);
void TraceDownloadPaused(const EventDataDownloadPaused& eventData);
void TraceDownloadCanceled(const EventDataDownloadCanceled& eventData);
void TraceDownloadStatus(const EventDataDownloadStatus& eventData);
};

Просмотреть файл

@ -0,0 +1,103 @@
#include "do_common.h"
#include "trace_sink.h"
#include <unistd.h> // getpid
#include <sys/syscall.h> // SYS_gettid
#include <boost/log/expressions.hpp>
#include "config_defaults.h"
#include "do_date_time.h"
#include "do_persistence.h"
namespace expr = boost::log::expressions;
namespace keywords = boost::log::keywords;
namespace logging = boost::log;
namespace sinks = boost::log::sinks;
static void DOLogFormatter(const logging::record_view& rec, logging::formatting_ostream& stream)
{
static const pid_t pid = getpid();
const auto tid = static_cast<int>(syscall(SYS_gettid)); // using syscall because glibc wrapper is unavailable
const char* sev = boost::log::trivial::to_string(rec[logging::trivial::severity].get());
if (sev == nullptr)
{
DO_ASSERT(false);
sev = "invalid";
}
const auto timeStr = SysTimePointToUTCString(wall_clock_t::now());
// Timestamp ProcessID ThreadID severity message
std::array<char, 128> prefixBuf;
snprintf(prefixBuf.data(), prefixBuf.size(), "%s %-5d %-5d %-8s ", timeStr.data(), pid, tid, sev);
const auto msg = rec[expr::smessage];
stream << prefixBuf.data() << msg;
#ifdef DEBUG
// Log to console. Better do this here than in trace_src.cpp in order to get the fully formatted log line.
// If a sink is not registered with the boost.log core, then it will by default dump all logs to the console,
// without coming through here.
printf("%s %s\n", prefixBuf.data(), msg.get().data());
#endif
}
TraceConsumer& TraceConsumer::getInstance()
{
static TraceConsumer myInstance;
return myInstance;
}
logging::trivial::severity_level TraceConsumer::Level()
{
return DEF_TRACE_LEVEL;
}
HRESULT TraceConsumer::Initialize() noexcept try
{
std::unique_lock<std::shared_timed_mutex> lock(_traceSessionLock);
RETURN_HR_IF(S_OK, _spSink);
const std::string logPath = docli::GetPersistenceDirectory() + "/log";
const std::string logNameFormat = logPath + "/do-agent.%Y%m%d_%H%M%S.log";
auto spSink = boost::make_shared<textfile_sink>(
keywords::file_name = logNameFormat,
keywords::rotation_size = DEF_TRACE_FILE_MAXSIZE_BYTES);
spSink->set_formatter(&DOLogFormatter);
auto level = Level();
logging::core::get()->set_filter(logging::trivial::severity >= level);
// set_file_collector creates the target dir, recursively, if not exists
spSink->locked_backend()->set_file_collector(sinks::file::make_collector(
keywords::target = logPath.data(),
keywords::max_size = DEF_TRACE_FOLDER_MAXSIZE_BYTES));
// Scan the directory for previously used files so the collector knows about them
spSink->locked_backend()->scan_for_files();
// Register the sink in the logging core
logging::core::get()->add_sink(spSink);
_spSink = spSink;
return S_OK;
} CATCH_RETURN()
void TraceConsumer::Finalize()
{
std::unique_lock<std::shared_timed_mutex> lock(_traceSessionLock);
if (_spSink)
{
_spSink->flush();
logging::core::get()->remove_sink(_spSink);
_spSink.reset();
}
}
void TraceConsumer::Flush() noexcept try
{
std::unique_lock<std::shared_timed_mutex> lock(_traceSessionLock);
if (_spSink)
{
_spSink->flush();
}
} CATCH_LOG()

Просмотреть файл

@ -0,0 +1,26 @@
#pragma once
#include <shared_mutex>
#include <boost/log/sinks.hpp>
#include "do_noncopyable.h"
class TraceConsumer : DONonCopyable
{
public:
// Returns a static instance of this class
static TraceConsumer& getInstance();
static boost::log::trivial::severity_level Level();
HRESULT Initialize() noexcept; // Start logging sink
void Finalize(); // Flush and close logging sink
void Flush() noexcept;
protected:
TraceConsumer() = default;
using textfile_sink = boost::log::sinks::synchronous_sink<boost::log::sinks::text_file_backend>;
boost::shared_ptr<textfile_sink> _spSink;
std::shared_timed_mutex _traceSessionLock;
};

Просмотреть файл

@ -0,0 +1,147 @@
#include "do_common.h"
#include "trace_src.h"
#include <cstdarg> // va_start, etc.
#include <memory>
#include "string_ops.h"
#include "trace_sink.h"
void DoTraceLoggingRegister()
{
docli::SetResultLoggingCallback(LogRuntimeFailure);
}
void DoTraceLoggingUnregister()
{
docli::SetResultLoggingCallback(nullptr);
}
bool IsLoggingEnabled(boost::log::trivial::severity_level level)
{
return (level >= TraceConsumer::Level());
}
bool IsVerboseLoggingEnabled()
{
return IsLoggingEnabled(EVENT_LEVEL_VERBOSE);
}
void LogRuntimeFailure(const docli::FailureInfo& failure) noexcept
{
// Runtime failures, including caught exceptions, are logged here.
// By default, pszFunction is present in debug code, not retail.
// Similarly, pszCode (string version of macro contents) is debug only.
// pszMessage is only present when using a _MSG macro, such as LOG_IF_FAILED_MSG.
if (failure.pszMessage != nullptr)
{
LogResult(EVENT_LEVEL_ERROR, failure.pszFunction, failure.uLineNumber, failure.hr, "%s [%s, %d]", failure.pszMessage, failure.pszFile, failure.uLineNumber);
}
else if (failure.pszCode != nullptr)
{
LogResult(EVENT_LEVEL_ERROR, failure.pszFunction, failure.uLineNumber, failure.hr, "%s [%s, %d]", failure.pszCode, failure.pszFile, failure.uLineNumber);
}
else
{
LogResult(EVENT_LEVEL_ERROR, failure.pszFunction, failure.uLineNumber, failure.hr, "[%s, %d]", failure.pszFile, failure.uLineNumber);
}
}
static HRESULT _LogMessage(_In_ PSTR pszBuffer, size_t cchBuffer, boost::log::trivial::severity_level level,
_In_opt_ PCSTR pszFunc, UINT nLine, HRESULT hrIn, _In_ _Printf_format_string_ PCSTR pszMsg, _In_ va_list argList)
{
HRESULT hr = S_OK;
int cchWritten = 0;
// Note: pszFunc may not be available. Example is logging from error_macros.cpp in release builds.
if (hrIn != HRESULT(-1))
{
if (pszFunc != nullptr)
{
hr = StringPrintf(pszBuffer, cchBuffer, &cchWritten, "{%s} (hr:%X) ", pszFunc, hrIn);
}
else
{
hr = StringPrintf(pszBuffer, cchBuffer, &cchWritten, "(hr:%X) ", hrIn);
}
}
else
{
if (pszFunc != nullptr)
{
hr = StringPrintf(pszBuffer, cchBuffer, &cchWritten, "{%s} ", pszFunc);
}
}
// Append the user provided message
if (SUCCEEDED(hr))
{
hr = StringPrintfV(pszBuffer + cchWritten, (cchBuffer - cchWritten), pszMsg, argList);
}
if (SUCCEEDED(hr))
{
try
{
BOOST_LOG_SEV(::boost::log::trivial::logger::get(), level) << pszBuffer;
}
catch (const std::exception& ex)
{
#ifdef DEBUG
printf("Logging exception: %s\n", ex.what());
#endif
}
}
return hr;
}
void LogMessageV(boost::log::trivial::severity_level level, _In_ PCSTR pszFunc, UINT nLine, HRESULT hrIn,
_In_ _Printf_format_string_ PCSTR pszMsg, _In_ va_list argList)
{
// TBD save and restore errno?
// Make a copy of argList to allow its reuse
va_list argCopy;
va_copy(argCopy, argList);
// Build the message text
char szMessage[200];
size_t cchMessage = ARRAYSIZE(szMessage);
// Try first with the stack buffer
HRESULT hr = _LogMessage(szMessage, cchMessage, level, pszFunc, nLine, hrIn, pszMsg, argCopy);
while (hr == STRSAFE_E_INSUFFICIENT_BUFFER)
{
// Use a heap buffer
cchMessage *= 2;
std::unique_ptr<char[]> spMessage;
spMessage.reset(new (std::nothrow) char[cchMessage]);
hr = spMessage ? S_OK : E_OUTOFMEMORY;
if (SUCCEEDED(hr))
{
va_copy(argCopy, argList);
hr = _LogMessage(spMessage.get(), cchMessage, level, pszFunc, nLine, hrIn, pszMsg, argCopy);
}
}
}
void LogMessage(boost::log::trivial::severity_level level, _In_ PCSTR pszFunc, UINT nLine, _In_ _Printf_format_string_ PCSTR pszMsg, ...)
{
if (IsLoggingEnabled(level))
{
va_list marker;
va_start(marker, pszMsg);
LogMessageV(level, pszFunc, nLine, HRESULT(-1), pszMsg, marker);
va_end(marker);
}
}
void LogResult(boost::log::trivial::severity_level level, _In_ PCSTR pszFunc, UINT nLine, HRESULT hr, _In_ _Printf_format_string_ PCSTR pszMsg, ...)
{
if (IsLoggingEnabled(level))
{
va_list marker;
va_start(marker, pszMsg);
LogMessageV(level, pszFunc, nLine, hr, pszMsg, marker);
va_end(marker);
}
}

Просмотреть файл

@ -0,0 +1,46 @@
#pragma once
void DoTraceLoggingRegister();
void DoTraceLoggingUnregister();
bool IsLoggingEnabled(boost::log::trivial::severity_level level);
bool IsVerboseLoggingEnabled();
void LogRuntimeFailure(const docli::FailureInfo& failure) noexcept;
#define EVENT_LEVEL_ERROR boost::log::trivial::error
#define EVENT_LEVEL_WARNING boost::log::trivial::warning
#define EVENT_LEVEL_INFO boost::log::trivial::info
#define EVENT_LEVEL_VERBOSE boost::log::trivial::trace
// '##' is required before __VA_ARGS__ to allow an empty arg list to the variadic macro.
// MSVC supports this by default but GCC requires '##'. C++2a has added VA_OPT macro
// to officially support this behavior.
#define DoLogMessage(level, msg, ...) ::LogMessage((level), __FUNCTION__, __LINE__, (msg), ##__VA_ARGS__)
#define DoLogResult(lvl, hr, msg, ...) ::LogResult((lvl), __FUNCTION__, __LINE__, (hr), (msg), ##__VA_ARGS__)
#define DoLogError(msg, ...) DoLogMessage(EVENT_LEVEL_ERROR, (msg), ##__VA_ARGS__)
#define DoLogWarning(msg, ...) DoLogMessage(EVENT_LEVEL_WARNING, (msg), ##__VA_ARGS__)
#define DoLogInfo(msg, ...) DoLogMessage(EVENT_LEVEL_INFO, (msg), ##__VA_ARGS__)
#define DoLogVerbose(msg, ...) DoLogMessage(EVENT_LEVEL_VERBOSE, (msg), ##__VA_ARGS__)
#ifdef DEBUG
#define DoLogDebug(msg, ...) DoLogMessage(EVENT_LEVEL_VERBOSE, (msg), ##__VA_ARGS__)
#else
#define DoLogDebug(msg, ...)
#endif
#define DoLogErrorHr(hr, msg, ...) DoLogResult(EVENT_LEVEL_ERROR, (hr), (msg), ##__VA_ARGS__)
#define DoLogWarningHr(hr, msg, ...) DoLogResult(EVENT_LEVEL_WARNING, (hr), (msg), ##__VA_ARGS__)
#define DoLogInfoHr(hr, msg, ...) DoLogResult(EVENT_LEVEL_INFO, (hr), (msg), ##__VA_ARGS__)
#define DoLogVerboseHr(hr, msg, ...) DoLogResult(EVENT_LEVEL_VERBOSE, (hr), (msg), ##__VA_ARGS__)
void LogMessageV(boost::log::trivial::severity_level level, _In_ PCSTR pszFunc, UINT nLine, HRESULT hrIn,
_In_ _Printf_format_string_ PCSTR pszMsg, _In_ va_list argList);
void LogMessage(boost::log::trivial::severity_level level, _In_ PCSTR pszFunc, UINT uLine,
_In_ _Printf_format_string_ PCSTR pszMsg, ...);
void LogResult(boost::log::trivial::severity_level level, _In_ PCSTR pszFunc, UINT uLine, HRESULT hr,
_In_ _Printf_format_string_ PCSTR pszMsg, ...);

Просмотреть файл

@ -0,0 +1,30 @@
#include "do_common.h"
#include "ban_list.h"
void CBanList::Report(const std::string& name, std::chrono::milliseconds banInterval)
{
_banList[name] = std::chrono::system_clock::now() + banInterval;
DoLogInfo("%s banned for %lld ms", name.data(), static_cast<INT64>(banInterval.count()));
}
bool CBanList::IsBanned(const std::string& name)
{
auto it = _banList.find(name);
if (it == _banList.end())
{
return false;
}
const auto unbanTime = it->second;
const auto now = std::chrono::system_clock::now();
if (unbanTime > now)
{
const auto diff = std::chrono::duration_cast<std::chrono::milliseconds>(unbanTime - now);
DoLogVerbose("%s will be unbanned after %lld ms", it->first.data(), static_cast<INT64>(diff.count()));
return true;
}
_banList.erase(it);
DoLogVerbose("%s removed from ban list", name.data());
return false;
}

Просмотреть файл

@ -0,0 +1,18 @@
#pragma once
#include <chrono>
#include <string>
#include <unordered_map>
// Simple banning functionality - ban on first offence until specified time point.
// Banning using a moving window is left undone until we need to support banning
// only after repeated offences threshold and phasing out of the offence counts over time.
class CBanList
{
public:
void Report(const std::string& name, std::chrono::milliseconds banInterval);
bool IsBanned(const std::string& name);
private:
std::unordered_map<std::string, std::chrono::system_clock::time_point> _banList;
};

Просмотреть файл

@ -0,0 +1,22 @@
#include "do_common.h"
#include <chrono>
using filetime_duration_t = std::chrono::duration<INT64, std::ratio<1,10000000>>;
using wall_clock_t = std::chrono::system_clock;
#define filetime_cast(d) std::chrono::duration_cast<filetime_duration_t>(d)
#define seconds_cast(d) std::chrono::duration_cast<std::chrono::seconds>(d)
inline std::array<char, 30> SysTimePointToUTCString(wall_clock_t::time_point timePoint)
{
const auto tt = wall_clock_t::to_time_t(timePoint);
struct tm st = {};
gmtime_r(&tt, &st);
auto ft = filetime_cast(timePoint.time_since_epoch());
auto fractionalSeconds = ft - seconds_cast(ft);
std::array<char, 30> timebuf = {};
snprintf(timebuf.data(), timebuf.size(), "%04d-%02d-%02dT%02d:%02d:%02d.%07dZ",
st.tm_year + 1900, st.tm_mon + 1, st.tm_mday, st.tm_hour, st.tm_min, st.tm_sec, static_cast<int>(fractionalSeconds.count()));
return timebuf;
}

Просмотреть файл

@ -0,0 +1,52 @@
#include "do_common.h"
#include "do_guid.h"
#include <boost/algorithm/string.hpp>
#include <boost/uuid/uuid.hpp>
#include <boost/uuid/uuid_generators.hpp>
#include "string_ops.h"
GUID CreateNewGuid()
{
static_assert(sizeof(GUID) == boost::uuids::uuid::static_size());
boost::uuids::uuid id = boost::uuids::random_generator()();
GUID newGuid;
memcpy(&newGuid, &id, sizeof(newGuid));
return newGuid;
}
bool StringToGuid(PCSTR guidStr, GUID* guidVal)
{
constexpr size_t GUIDSTR_MIN = GUIDSTR_MAX - 2 - 1; // without braces and null-terminator
constexpr int GUID_SEGMENTS = 11;
const size_t len = strlen(guidStr);
if ((GUIDSTR_MIN <= len) && (len <= (GUIDSTR_MAX - 1)))
{
std::string localGuidStr(guidStr);
StringCleanup(localGuidStr, "{}");
boost::algorithm::to_lower(localGuidStr);
if (localGuidStr.size() == GUIDSTR_MIN)
{
GUID tempGuid;
const int ret = StringScanf(localGuidStr.data(), "%8x-%4hx-%4hx-%2hhx%2hhx-%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx",
&tempGuid.Data1, &tempGuid.Data2, &tempGuid.Data3,
&tempGuid.Data4[0], &tempGuid.Data4[1], &tempGuid.Data4[2], &tempGuid.Data4[3],
&tempGuid.Data4[4], &tempGuid.Data4[5], &tempGuid.Data4[6], &tempGuid.Data4[7]);
if (ret == GUID_SEGMENTS)
{
assign_to_opt_param(guidVal, tempGuid);
return true;
}
}
}
return false;
}
std::string GuidToString(REFGUID guid)
{
std::array<char, GUIDSTR_MAX> guidStr = {};
StringPrintf(guidStr.data(), guidStr.size(), "%08x-%04hx-%04hx-%02x%02x-%02x%02x%02x%02x%02x%02x",
guid.Data1, guid.Data2, guid.Data3, guid.Data4[0], guid.Data4[1],
guid.Data4[2], guid.Data4[3], guid.Data4[4], guid.Data4[5], guid.Data4[6], guid.Data4[7]);
return guidStr.data();
}

Просмотреть файл

@ -0,0 +1,19 @@
#pragma once
typedef struct
{
uint32_t Data1; // Can't use unsigned long here because it is 8bytes on linux
uint16_t Data2;
uint16_t Data3;
uint8_t Data4[8];
} GUID, IID;
using REFGUID = const GUID&;
#ifndef GUIDSTR_MAX
#define GUIDSTR_MAX (1 + 8 + 1 + 4 + 1 + 4 + 1 + 4 + 1 + 12 + 1 + 1)
#endif
GUID CreateNewGuid();
bool StringToGuid(PCSTR guidStr, GUID* guidVal = nullptr);
std::string GuidToString(REFGUID guid);

Просмотреть файл

@ -0,0 +1,45 @@
#include "do_common.h"
#include "do_json_parser.h"
#include <boost/filesystem.hpp>
#include <boost/property_tree/json_parser.hpp>
std::chrono::seconds JsonParser::RefreshInterval = std::chrono::seconds(60);
// Stores file path provided. Loads from the file later when a value is queried for.
JsonParser::JsonParser(const std::string& jsonFilePath) :
_jsonFilePath(jsonFilePath)
{
}
void JsonParser::_TryRefresh()
{
if (std::chrono::steady_clock::now() < _nextRefreshTime)
{
return;
}
if (boost::filesystem::exists(_jsonFilePath))
{
try
{
boost::property_tree::read_json(_jsonFilePath, _tree);
DoLogInfo("Read json config file %s", _jsonFilePath.data());
}
catch (const std::exception& ex)
{
DoLogWarning("Could not read json config file %s: %s", _jsonFilePath.data(), ex.what());
}
catch (...)
{
DoLogWarning("Caught unexpected exception when reading json config file %s", _jsonFilePath.data());
}
}
else
{
DoLogVerbose("json file not found at %s", _jsonFilePath.data());
_tree.clear();
}
_nextRefreshTime = std::chrono::steady_clock::now() + RefreshInterval;
}

Просмотреть файл

@ -0,0 +1,39 @@
#pragma once
#include <chrono>
#include <string>
#include <boost/optional.hpp>
#include <boost/property_tree/ptree.hpp>
class JsonParser
{
public:
static std::chrono::seconds RefreshInterval;
JsonParser(const std::string& jsonFilePath);
template<typename T>
boost::optional<T> Get(const std::string& key)
{
_TryRefresh();
boost::optional<T> value;
try
{
value = _tree.get<T>(key);
}
catch (...)
{
LOG_CAUGHT_EXCEPTION();
}
return value;
}
private:
void _TryRefresh();
const std::string _jsonFilePath;
boost::property_tree::ptree _tree;
std::chrono::steady_clock::time_point _nextRefreshTime{};
};

Просмотреть файл

@ -0,0 +1,12 @@
#pragma once
// Handy base class to create non-copyable classes
class DONonCopyable
{
public:
DONonCopyable(const DONonCopyable&) = delete;
DONonCopyable& operator=(const DONonCopyable&) = delete;
protected:
DONonCopyable() {}
};

Просмотреть файл

@ -0,0 +1,37 @@
#include "do_common.h"
#include "do_persistence.h"
namespace docli
{
const std::string& GetPersistenceDirectory()
{
static std::string myDirectory(DO_PERSISTENCE_DIRECTORY_PATH);
return myDirectory;
}
const std::string& GetRuntimeDirectory()
{
static std::string runDirectory(DO_RUN_DIRECTORY_PATH);
return runDirectory;
}
const std::string& GetConfigDirectory()
{
static std::string configDirectory(DO_CONFIG_DIRECTORY_PATH);
return configDirectory;
}
const std::string& GetSDKConfigFilePath()
{
static std::string configFilePath(DO_CONFIG_DIRECTORY_PATH "/sdk-config.json");
return configFilePath;
}
const std::string& GetAdminConfigFilePath()
{
static std::string configFilePath(DO_CONFIG_DIRECTORY_PATH "/admin-config.json");
return configFilePath;
}
} // namespace docli

Просмотреть файл

@ -0,0 +1,10 @@
#pragma once
namespace docli
{
const std::string& GetPersistenceDirectory();
const std::string& GetRuntimeDirectory();
const std::string& GetConfigDirectory();
const std::string& GetSDKConfigFilePath();
const std::string& GetAdminConfigFilePath();
}

Просмотреть файл

@ -0,0 +1,265 @@
#include "do_common.h"
#include "error_macros.h"
#include <boost/system/system_error.hpp>
#include <cpprest/http_msg.h> // web::http::http_exception
#include <cpprest/json.h> // web::json::json_exception
#include <pplx/pplxtasks.h>
#include "string_ops.h"
static docli::logging_callback_type g_pfnLoggingCallback = nullptr;
namespace docli
{
void SetResultLoggingCallback(logging_callback_type callback)
{
g_pfnLoggingCallback = callback;
}
void LogFailure(__R_FN_PARAMS_FULL, FailureType type, HRESULT hr, _In_opt_ PCSTR message, _Out_ FailureInfo* failure) DO_NOEXCEPT
{
memset(failure, 0, sizeof(*failure));
failure->type = type;
failure->hr = hr;
failure->pszMessage = ((message != nullptr) && (message[0] != '\0')) ? message : nullptr;
__R_IF_FILE(failure->pszFile = fileName);
__R_IF_LINE(failure->uLineNumber = lineNumber);
__R_IF_CODE(failure->pszCode = code);
__R_IF_FUNCTION(failure->pszFunction = functionName);
#if (RESULT_INCLUDE_CALLER_RETURNADDRESS == 1)
failure->returnAddress = returnAddress;
failure->callerReturnAddress = callerReturnAddress;
#endif
if (g_pfnLoggingCallback)
{
g_pfnLoggingCallback(*failure);
}
if (SUCCEEDED(failure->hr))
{
// TODO(shishirb) support fail-fast
// Caller bug: Leaking a success code into a failure-only function
//FAIL_FAST_IMMEDIATE_IF(type != FailureType::FailFast);
failure->hr = E_UNEXPECTED;
}
}
void ReportFailure(__R_FN_PARAMS_FULL, FailureType type, HRESULT hr, _In_opt_ PCSTR message)
{
FailureInfo failure;
LogFailure(__R_FN_CALL_FULL, type, hr, message, &failure);
if (type == FailureType::FailFast)
{
// TODO(shishirb) support fail-fast
// WilFailFast(const_cast<FailureInfo&>(failure));
}
else if (type == FailureType::Exception)
{
throw DOResultException(failure);
}
}
void ReportFailure_Msg(__R_FN_PARAMS_FULL, FailureType type, HRESULT hr, _Printf_format_string_ PCSTR formatString, va_list argList)
{
char message[1024];
PrintLoggingMessage(message, ARRAYSIZE(message), formatString, argList);
ReportFailure(__R_FN_CALL_FULL, type, hr, message);
}
void Return_Hr(__R_FN_PARAMS_FULL, HRESULT hr) DO_NOEXCEPT
{
ReportFailure(__R_FN_CALL_FULL, FailureType::Return, hr);
}
void Return_HrMsg(__R_FN_PARAMS_FULL, HRESULT hr, _Printf_format_string_ PCSTR formatString, ...) DO_NOEXCEPT
{
va_list argList;
va_start(argList, formatString);
ReportFailure_Msg(__R_FN_CALL_FULL, FailureType::Return, hr, formatString, argList);
}
inline void MaybeGetExceptionString(const DOResultException& exception, _Out_writes_opt_(debugStringChars) PSTR debugString, size_t debugStringChars)
{
if (debugString)
{
const auto& failureInfo = exception.GetFailureInfo();
#if (RESULT_DIAGNOSTICS_LEVEL >= 5)
// pszCode is available only in this case
StringPrintf(debugString, debugStringChars, "DO failure: %s (hr:0x%X) [%s, %d], {%s}",
failureInfo.pszMessage, failureInfo.hr, failureInfo.pszFile, failureInfo.uLineNumber, failureInfo.pszCode);
#elif (RESULT_DIAGNOSTICS_LEVEL >= 3)
StringPrintf(debugString, debugStringChars, "DO failure: %s (hr:0x%X) [%s, %d]",
failureInfo.pszMessage, failureInfo.hr, failureInfo.pszFile, failureInfo.uLineNumber);
#elif (RESULT_DIAGNOSTICS_LEVEL >= 2)
// pszFile not available in this case
StringPrintf(debugString, debugStringChars, "DO failure: %s (hr:0x%X) [%d]",
failureInfo.pszMessage, failureInfo.hr, failureInfo.uLineNumber);
#else
// uLineNumber also is not available in this case
StringPrintf(debugString, debugStringChars, "DO failure: %s (hr:0x%X)",
failureInfo.pszMessage, failureInfo.hr);
#endif
}
}
inline void MaybeGetExceptionString(const std::exception& exception, _Out_writes_opt_(debugStringChars) PSTR debugString, size_t debugStringChars)
{
if (debugString)
{
StringPrintf(debugString, debugStringChars, "std::exception: %s", exception.what());
}
}
HRESULT ResultFromCaughtExceptionInternal(_Out_writes_opt_(debugStringChars) PSTR debugString, size_t debugStringChars) DO_NOEXCEPT
{
if (debugString)
{
*debugString = '\0';
}
// Note: If we need to handle other exceptions via callbacks, this is where to enable it
// if (g_pfnResultFromCaughtException)
try
{
throw;
}
catch (const DOResultException& exception)
{
MaybeGetExceptionString(exception, debugString, debugStringChars);
return exception.GetErrorCode();
}
catch (const pplx::task_canceled&)
{
// Either the request failed and we already reported it
// or the caller did not want to continue with the request.
return E_ABORT;
}
catch (const web::http::http_exception& httpEx)
{
if (debugString)
{
StringPrintf(debugString, debugStringChars, "http_exception: %s", httpEx.what());
}
if (httpEx.error_code() == std::errc::operation_canceled)
{
return E_ABORT;
}
return HRESULT_FROM_STDCPP(httpEx.error_code());
}
catch (const web::json::json_exception& jsonEx)
{
if (debugString)
{
StringPrintf(debugString, debugStringChars, "json_exception: %s", jsonEx.what());
}
return E_INVALIDARG; // json_exception doesn't have an error code
}
catch (const boost::system::system_error& sysEx)
{
if (debugString)
{
StringPrintf(debugString, debugStringChars, "boost_exception: %s", sysEx.what());
}
return HRESULT_FROM_BOOST(sysEx.code());
}
catch (const std::bad_alloc& exception)
{
MaybeGetExceptionString(exception, debugString, debugStringChars);
return E_OUTOFMEMORY;
}
catch (const std::exception& exception)
{
MaybeGetExceptionString(exception, debugString, debugStringChars);
return HRESULT_FROM_WIN32(ERROR_UNHANDLED_EXCEPTION);
}
catch (...)
{
}
// Tell the caller that we were unable to map the exception by succeeding...
return S_OK;
}
inline HRESULT ReportFailure_CaughtExceptionCommon(__R_FN_PARAMS_FULL, FailureType type, PSTR debugString, size_t cchDebugString)
{
const auto length = strlen(debugString);
assert(length < cchDebugString);
HRESULT hr = ResultFromCaughtExceptionInternal(debugString + length, cchDebugString - length);
const bool known = (FAILED(hr));
if (!known)
{
hr = HRESULT_FROM_WIN32(ERROR_UNHANDLED_EXCEPTION);
type = FailureType::FailFast;
}
ReportFailure(__R_FN_CALL_FULL, type, hr, debugString);
return hr;
}
HRESULT ReportFailure_CaughtException(__R_FN_PARAMS_FULL, FailureType type)
{
char message[1024];
message[0] = '\0';
return ReportFailure_CaughtExceptionCommon(__R_FN_CALL_FULL, type, message, ARRAYSIZE(message));
}
HRESULT ReportFailure_CaughtExceptionMsg(__R_FN_PARAMS_FULL, FailureType type, _Printf_format_string_ PCSTR formatString, va_list argList)
{
// Pre-populate the buffer with our message, the exception message will be added to it...
char message[1024];
PrintLoggingMessage(message, ARRAYSIZE(message), formatString, argList);
(void)StringConcatenate(message, ARRAYSIZE(message), " -- ");
return ReportFailure_CaughtExceptionCommon(__R_FN_CALL_FULL, type, message, ARRAYSIZE(message));
}
void Throw_HrMsg(__R_FN_PARAMS_FULL, HRESULT hr, _Printf_format_string_ PCSTR formatString, ...)
{
va_list argList;
va_start(argList, formatString);
ReportFailure_Msg(__R_FN_CALL_FULL, FailureType::Exception, hr, formatString, argList);
}
void Throw_CaughtExceptionMsg(__R_FN_PARAMS_FULL, _Printf_format_string_ PCSTR formatString, ...)
{
va_list argList;
va_start(argList, formatString);
ReportFailure_CaughtExceptionMsg(__R_FN_CALL_FULL, FailureType::Exception, formatString, argList);
}
HRESULT ResultFromCaughtException() DO_NOEXCEPT
{
const HRESULT hr = ResultFromCaughtExceptionInternal(nullptr, 0);
if (FAILED(hr))
{
return hr;
}
// Caller bug: an unknown exception was thrown
// TODO(shishirb) fail fast
return HRESULT_FROM_WIN32(ERROR_UNHANDLED_EXCEPTION);
}
HRESULT Log_IfFailed(__R_FN_PARAMS_FULL, HRESULT hr) DO_NOEXCEPT
{
if (FAILED(hr))
{
ReportFailure(__R_FN_CALL_FULL, FailureType::Log, hr, nullptr);
}
return hr;
}
HRESULT Log_IfFailedMsg(__R_FN_PARAMS_FULL, HRESULT hr, _Printf_format_string_ PCSTR formatString, ...) DO_NOEXCEPT
{
if (FAILED(hr))
{
va_list argList;
va_start(argList, formatString);
ReportFailure_Msg(__R_FN_CALL_FULL, FailureType::Log, hr, formatString, argList);
}
return hr;
}
} // namespace docli

Просмотреть файл

@ -0,0 +1,461 @@
#pragma once
#include <cstdarg> // va_start, etc.
#include "basic_types.h"
#include "hresult_helpers.h"
#include "sal_undef.h"
#include "string_ops.h"
//*****************************************************************************
// This is a port of some of the result macros from WIL
// https://osgwiki.com/wiki/Windows_Internal_Libraries_(wil)
//*****************************************************************************
#if (DBG || defined(DEBUG) || defined(_DEBUG)) && !defined(NDEBUG)
#define RESULT_DEBUG
#endif
#ifndef RESULT_DIAGNOSTICS_LEVEL
#if (defined(RESULT_DEBUG) || defined(RESULT_DEBUG_INFO)) && !defined(RESULT_SUPPRESS_DEBUG_INFO)
#define RESULT_DIAGNOSTICS_LEVEL 5
#else
#define RESULT_DIAGNOSTICS_LEVEL 3
#endif
#endif
// DO customization: Return addresses inclusion is disabled until we figure out how to do this in GCC
#define RESULT_INCLUDE_CALLER_RETURNADDRESS 0
#define RESULT_INCLUDE_RETURNADDRESS 0
//*****************************************************************************
// Helpers to setup the macros and functions used below... do not directly use.
//*****************************************************************************
#define __R_COMMA ,
// The following macros assemble the varying amount of data we want to collect from the macros, treating it uniformly
#if (RESULT_DIAGNOSTICS_LEVEL >= 2) // line number
#define __R_IF_LINE(term) term
#define __R_IF_NOT_LINE(term)
#define __R_IF_COMMA ,
#define __R_LINE_VALUE static_cast<unsigned short>(__LINE__)
#else
#define __R_IF_LINE(term)
#define __R_IF_NOT_LINE(term) term
#define __R_IF_COMMA
#define __R_LINE_VALUE static_cast<unsigned short>(0)
#endif
#if (RESULT_DIAGNOSTICS_LEVEL >= 3) // line number + file name
#define __R_IF_FILE(term) term
#define __R_IF_NOT_FILE(term)
#define __R_FILE_VALUE __FILE__
#else
#define __R_IF_FILE(term)
#define __R_IF_NOT_FILE(term) term
#define __R_FILE_VALUE nullptr
#endif
#if (RESULT_DIAGNOSTICS_LEVEL >= 4) // line number + file name + function name
#define __R_IF_FUNCTION(term) term
#define __R_IF_NOT_FUNCTION(term)
#else
#define __R_IF_FUNCTION(term)
#define __R_IF_NOT_FUNCTION(term) term
#endif
#if (RESULT_DIAGNOSTICS_LEVEL >= 5) // line number + file name + function name + macro code
#define __R_IF_CODE(term) term
#define __R_IF_NOT_CODE(term)
#else
#define __R_IF_CODE(term)
#define __R_IF_NOT_CODE(term) term
#endif
#if (RESULT_INCLUDE_CALLER_RETURNADDRESS == 1)
#define __R_IF_CALLERADDRESS(term) term
#define __R_IF_NOT_CALLERADDRESS(term)
#define __R_CALLERADDRESS_VALUE _ReturnAddress()
#else
#define __R_IF_CALLERADDRESS(term)
#define __R_IF_NOT_CALLERADDRESS(term) term
#define __R_CALLERADDRESS_VALUE nullptr
#endif
#if (RESULT_INCLUDE_CALLER_RETURNADDRESS == 1) || (RESULT_DIAGNOSTICS_LEVEL >= 2)
#define __R_IF_TRAIL_COMMA ,
#else
#define __R_IF_TRAIL_COMMA
#endif
#define __R_ENABLE_IF_IS_CLASS(ptrType) typename std::enable_if_t<std::is_class<ptrType>::value, void*> = (void*)0
#define __R_ENABLE_IF_IS_NOT_CLASS(ptrType) typename std::enable_if_t<!std::is_class<ptrType>::value, void*> = (void*)0
// Assemble the varying amounts of data into a single macro
#define __R_INFO_ONLY(CODE) __R_IF_CALLERADDRESS(_ReturnAddress() __R_IF_COMMA) __R_IF_LINE(__R_LINE_VALUE) __R_IF_FILE(__R_COMMA __R_FILE_VALUE) __R_IF_FUNCTION(__R_COMMA __FUNCTION__) __R_IF_CODE(__R_COMMA CODE)
#define __R_INFO(CODE) __R_INFO_ONLY(CODE) __R_IF_TRAIL_COMMA
#define __R_FN_PARAMS_FULL __R_IF_LINE(unsigned int lineNumber __R_IF_COMMA) __R_IF_FILE(_In_opt_ PCSTR fileName) __R_IF_FUNCTION(__R_COMMA _In_opt_ PCSTR functionName) __R_IF_CODE(__R_COMMA _In_opt_ PCSTR code)
#define __R_FN_CALL_FULL __R_IF_LINE(lineNumber __R_IF_COMMA) __R_IF_FILE(fileName) __R_IF_FUNCTION(__R_COMMA functionName) __R_IF_CODE(__R_COMMA code)
// Helpers for return macros
#define __RETURN_HR_MSG(hr, str, fmt, ...) do { HRESULT __hr = (hr); if (FAILED(__hr)) { docli::Return_HrMsg(__R_INFO(str) __hr, fmt, ##__VA_ARGS__); } return __hr; } while (0)
#define __RETURN_HR_MSG_FAIL(hr, str, fmt, ...) do { HRESULT __hr = (hr); docli::Return_HrMsg(__R_INFO(str) __hr, fmt, ##__VA_ARGS__); return __hr; } while (0)
#define __RETURN_HR(hr, str) do { HRESULT __hr = (hr); if (FAILED(__hr)) { docli::Return_Hr(__R_INFO(str) __hr); } return __hr; } while (0)
#define __RETURN_HR_FAIL(hr, str) do { HRESULT __hr = (hr); docli::Return_Hr(__R_INFO(str) __hr); return __hr; } while (0)
//*****************************************************************************
// Macros for returning failures as HRESULTs
//*****************************************************************************
// '##' is required before __VA_ARGS__ to allow an empty arg list to the variadic macro.
// MSVC supports this by default but GCC requires '##'. C++2a has added VA_OPT macro
// to officially support this behavior.
// Always returns a known result (HRESULT) - always logs failures
#define RETURN_HR(hr) __RETURN_HR(docli::verify_hresult(hr), #hr)
// Always returns a known failure (HRESULT) - always logs a var-arg message on failure
#define RETURN_HR_MSG(hr, fmt, ...) __RETURN_HR_MSG(docli::verify_hresult(hr), #hr, fmt, ##__VA_ARGS__)
// Conditionally returns failures (HRESULT) - always logs failures
#define RETURN_IF_FAILED(hr) do { HRESULT __hrRet = docli::verify_hresult(hr); if (FAILED(__hrRet)) { __RETURN_HR_FAIL(__hrRet, #hr); }} while (0)
#define RETURN_HR_IF(hr, condition) do { if (docli::verify_bool(condition)) { __RETURN_HR(docli::verify_hresult(hr), #condition); }} while (0)
#define RETURN_HR_IF_NULL(hr, ptr) do { if ((ptr) == nullptr) { __RETURN_HR(docli::verify_hresult(hr), #ptr); }} while (0)
// Conditionally returns failures (HRESULT) - always logs a var-arg message on failure
#define RETURN_IF_FAILED_MSG(hr, fmt, ...) do { auto __hrRet = docli::verify_hresult(hr); if (FAILED(__hrRet)) { __RETURN_HR_MSG_FAIL(__hrRet, #hr, fmt, ##__VA_ARGS__); }} while (0)
#define RETURN_HR_IF_MSG(hr, condition, fmt, ...) do { if (docli::verify_bool(condition)) { __RETURN_HR_MSG(docli::verify_hresult(hr), #condition, fmt, ##__VA_ARGS__); }} while (0)
// Conditionally returns failures (HRESULT) - use for failures that are expected in common use - failures are not logged - macros are only for control flow pattern
#define RETURN_IF_FAILED_EXPECTED(hr) do { auto __hrRet = docli::verify_hresult(hr); if (FAILED(__hrRet)) { return __hrRet; }} while (0)
#define RETURN_HR_IF_EXPECTED(hr, condition) do { if (docli::verify_bool(condition)) { return docli::verify_hresult(hr); }} while (0)
// DEPRECATED: Use RETURN_HR_IF(hr, !condition)
#define RETURN_HR_IF_FALSE(hr, condition) RETURN_HR_IF(hr, !(docli::verify_bool(condition)))
// DEPRECATED: Use RETURN_HR_IF_EXPECTED(hr, !condition)
#define RETURN_HR_IF_FALSE_EXPECTED(hr, condition) RETURN_HR_IF_EXPECTED(hr, !(docli::verify_bool(condition)))
//*****************************************************************************
// Macros to throw exceptions on failure
//*****************************************************************************
// Always throw a known failure
#define THROW_HR(hr) docli::Throw_Hr(__R_INFO(#hr) docli::verify_hresult(hr))
#define THROW_HR_IF(hr, condition) docli::Throw_HrIf(__R_INFO(#condition) docli::verify_hresult(hr), docli::verify_bool(condition))
#define THROW_HR_IF_NULL(hr, ptr) docli::Throw_HrIfNull(__R_INFO(#ptr) docli::verify_hresult(hr), ptr)
// Conditionally throw failures - returns parameter value
#define THROW_IF_FAILED(hr) docli::Throw_IfFailed(__R_INFO(#hr) docli::verify_hresult(hr))
// Always throw a known failure - throw a var-arg message on failure
#define THROW_HR_MSG(hr, fmt, ...) docli::Throw_HrMsg(__R_INFO(#hr) docli::verify_hresult(hr), fmt, ##__VA_ARGS__)
//*****************************************************************************
// Macros to catch and convert exceptions on failure
//*****************************************************************************
// Use these macros *within* a catch (...) block to handle exceptions
#define RETURN_CAUGHT_EXCEPTION() return docli::Return_CaughtException(__R_INFO_ONLY(nullptr))
#define RETURN_CAUGHT_EXCEPTION_MSG(fmt, ...) return docli::Return_CaughtExceptionMsg(__R_INFO(nullptr) fmt, ##__VA_ARGS__)
#define LOG_CAUGHT_EXCEPTION() docli::Log_CaughtException(__R_INFO_ONLY(nullptr))
#define LOG_CAUGHT_EXCEPTION_MSG(fmt, ...) docli::Log_CaughtExceptionMsg(__R_INFO(nullptr) fmt, ##__VA_ARGS__)
#define THROW_NORMALIZED_CAUGHT_EXCEPTION_MSG(fmt, ...) docli::Throw_CaughtExceptionMsg(__R_INFO(nullptr) fmt, ##__VA_ARGS__)
// Use these macros in place of a catch block to handle exceptions
#define CATCH_RETURN() catch (...) { RETURN_CAUGHT_EXCEPTION(); }
#define CATCH_RETURN_MSG(fmt, ...) catch (...) { RETURN_CAUGHT_EXCEPTION_MSG(fmt, ##__VA_ARGS__); }
#define CATCH_LOG() catch (...) { LOG_CAUGHT_EXCEPTION(); }
#define CATCH_THROW_NORMALIZED_MSG(fmt, ...) catch (...) { THROW_NORMALIZED_CAUGHT_EXCEPTION_MSG(fmt, ##__VA_ARGS__); }
//*****************************************************************************
// Macros for logging failures (ignore or pass-through)
//*****************************************************************************
// Always logs a known failure - logs a var-arg message on failure
#define LOG_HR_MSG(hr, fmt, ...) docli::Log_HrMsg(__R_INFO(#hr) docli::verify_hresult(hr), fmt, ##__VA_ARGS__)
// Conditionally logs failures - returns parameter value
#define LOG_HR_IF(hr, condition) docli::Log_HrIf(__R_INFO(#condition) docli::verify_hresult(hr), docli::verify_bool(condition))
// Conditionally logs failures - returns parameter value
#define LOG_IF_FAILED(hr) docli::Log_IfFailed(__R_INFO(#hr) docli::verify_hresult(hr))
// Conditionally logs failures - returns parameter value - logs a var-arg message on failure
#define LOG_IF_FAILED_MSG(hr, fmt, ...) docli::Log_IfFailedMsg(__R_INFO(#hr) docli::verify_hresult(hr), fmt, ##__VA_ARGS__)
namespace docli
{
#define DO_NOEXCEPT noexcept
// Indicates the kind of message / failure type that was used to produce a given error
enum class FailureType
{
Exception, // THROW_...
Return, // RETURN_..._LOG or RETURN_..._MSG
Log, // LOG_...
FailFast, // FAIL_FAST_...
};
// Represents all context information about a given failure
// No constructors, destructors or virtual members should be contained within
struct FailureInfo
{
FailureType type;
HRESULT hr;
PCSTR pszMessage; // Message is only present for _MSG logging (it's the sprintf message)
PCSTR pszCode; // [debug only] Capture code from the macro
PCSTR pszFunction; // [debug only] The function name
PCSTR pszFile;
unsigned int uLineNumber;
};
// A RAII wrapper around the storage of a FailureInfo struct (which is normally meant to be consumed
// on the stack or from the caller). The storage of FailureInfo needs to copy the pszMessage string
// to maintain its lifetime. The other string members in FailureInfo are always read-only strings
// from the binary image so there is no need to copy them (DO customization).
class StoredFailureInfo
{
public:
StoredFailureInfo() DO_NOEXCEPT
{
memset(&_failureInfo, 0, sizeof(_failureInfo));
}
StoredFailureInfo(const FailureInfo& failureInfo) DO_NOEXCEPT
{
_failureInfo = failureInfo;
try
{
if (failureInfo.pszMessage)
{
_msg = failureInfo.pszMessage;
_failureInfo.pszMessage = _msg.data();
}
}
catch (std::bad_alloc&)
{
// ignore, can't do anything here
}
}
const FailureInfo& Get() const DO_NOEXCEPT
{
return _failureInfo;
}
private:
FailureInfo _failureInfo;
std::string _msg;
};
// This is the exception class thrown from all THROW_XXX macros.
// This class stores all of the FailureInfo context that is available when the exception is thrown.
// It's also caught by exception guards for automatic conversion to HRESULT.
// Note: DO customization: what() is not overriden.
class DOResultException : public std::exception
{
public:
DOResultException(const FailureInfo& failure) DO_NOEXCEPT :
_failureInfo(failure)
{
}
// Returns the failed HRESULT that this exception represents.
HRESULT GetErrorCode() const DO_NOEXCEPT
{
return _failureInfo.Get().hr;
}
// Get a reference to the stored FailureInfo.
const FailureInfo& GetFailureInfo() const DO_NOEXCEPT
{
return _failureInfo.Get();
}
// Sets the stored FailureInfo (use primarily only when constructing custom exception types).
void SetFailureInfo(const FailureInfo& failure) DO_NOEXCEPT
{
_failureInfo = failure;
}
// Relies upon auto-generated copy constructor and assignment operator
protected:
StoredFailureInfo _failureInfo; // The failure information for this exception
};
// Observe all errors flowing through the system with this callback (set with docli::SetResultLoggingCallback); use with custom logging
using logging_callback_type = void(*)(const docli::FailureInfo& failure) DO_NOEXCEPT;
void SetResultLoggingCallback(logging_callback_type callback);
template <typename T>
HRESULT verify_hresult(T hr)
{
static_assert(sizeof(T) == 4, "Wrong Size: HRESULT expected to be 4 bytes");
#ifdef _WIN32
static_assert(std::is_same<T, long>::value, "Wrong Type: HRESULT expected");
#else
static_assert(std::is_same<T, int32_t>::value, "Wrong Type: HRESULT expected");
#endif
return hr;
}
// Type Validation
// Helpers to validate variable types to prevent accidental, but allowed type conversions.
// These helpers are most useful when building macros that accept a particular type. Putting these functions around the types accepted
// prior to pushing that type through to a function (or using it within the macro) allows the macro to add an additional layer of type
// safety that would ordinarily be stripped away by C++ implicit conversions. This system is extensively used in the error handling helper
// macros to validate the types given to various macro parameters.
// Verify that val can be evaluated as a logical bool.
// Other types will generate an intentional compilation error. Allowed types for a logical bool are bool, BOOL,
// boolean, BOOLEAN, and classes with an explicit bool cast.
// Returns a C++ bool representing the evaluation of val.
template <typename T, __R_ENABLE_IF_IS_CLASS(T)>
bool verify_bool(const T& val)
{
return static_cast<bool>(val);
}
template <typename T, __R_ENABLE_IF_IS_NOT_CLASS(T)>
bool verify_bool(T val)
{
static_assert(!std::is_same<T, T>::value, "Wrong Type: bool/BOOL/BOOLEAN/boolean expected");
return static_cast<bool>(val);
}
template <>
inline bool verify_bool<bool>(bool val)
{
return val;
}
template <>
inline bool verify_bool<int>(int val)
{
return (val != 0);
}
template <>
inline bool verify_bool<unsigned char>(unsigned char val)
{
return !!val;
}
// TODO why does WIL mark some functions as inline + __declspec(noinline) and some as just inline?
void LogFailure(__R_FN_PARAMS_FULL, FailureType type, HRESULT hr, _In_opt_ PCSTR message, _Out_ FailureInfo* failure) DO_NOEXCEPT;
void ReportFailure(__R_FN_PARAMS_FULL, FailureType type, HRESULT hr, _In_opt_ PCSTR message = nullptr);
void ReportFailure_Msg(__R_FN_PARAMS_FULL, FailureType type, HRESULT hr, _Printf_format_string_ PCSTR formatString, va_list argList);
inline void PrintLoggingMessage(PSTR pszDest, size_t cchDest, _In_opt_ _Printf_format_string_ PCSTR formatString, _In_opt_ va_list argList) DO_NOEXCEPT
{
if (formatString == nullptr)
{
pszDest[0] = '\0';
}
else
{
StringPrintfV(pszDest, cchDest, formatString, argList);
}
}
void Return_Hr(__R_FN_PARAMS_FULL, HRESULT hr) DO_NOEXCEPT;
void Return_HrMsg(__R_FN_PARAMS_FULL, HRESULT hr, _Printf_format_string_ PCSTR formatString, ...) DO_NOEXCEPT;
inline void Throw_Hr(__R_FN_PARAMS_FULL, HRESULT hr)
{
docli::ReportFailure(__R_FN_CALL_FULL, FailureType::Exception, hr);
}
inline bool Throw_HrIf(__R_FN_PARAMS_FULL, HRESULT hr, bool condition)
{
if (condition)
{
Throw_Hr(__R_FN_CALL_FULL, hr);
}
return condition;
}
template <typename PointerT, __R_ENABLE_IF_IS_NOT_CLASS(PointerT)>
PointerT Throw_HrIfNull(__R_FN_PARAMS_FULL, HRESULT hr, PointerT pointer)
{
if (pointer == nullptr)
{
Throw_Hr(__R_FN_CALL_FULL, hr);
}
return pointer;
}
template <typename PointerT, __R_ENABLE_IF_IS_CLASS(PointerT)>
void Throw_HrIfNull(__R_FN_PARAMS_FULL, HRESULT hr, const PointerT& pointer)
{
if (pointer == nullptr)
{
Throw_Hr(__R_FN_CALL_FULL, hr);
}
}
inline HRESULT Throw_IfFailed(__R_FN_PARAMS_FULL, HRESULT hr)
{
if (FAILED(hr))
{
Throw_Hr(__R_FN_CALL_FULL, hr);
}
return hr;
}
HRESULT ReportFailure_CaughtException(__R_FN_PARAMS_FULL, FailureType type);
HRESULT ReportFailure_CaughtExceptionMsg(__R_FN_PARAMS_FULL, FailureType type, _Printf_format_string_ PCSTR formatString, va_list argList);
inline HRESULT Return_CaughtException(__R_FN_PARAMS_FULL) DO_NOEXCEPT
{
return docli::ReportFailure_CaughtException(__R_FN_CALL_FULL, FailureType::Return);
}
inline HRESULT Return_CaughtExceptionMsg(__R_FN_PARAMS_FULL, _Printf_format_string_ PCSTR formatString, ...) DO_NOEXCEPT
{
va_list argList;
va_start(argList, formatString);
return docli::ReportFailure_CaughtExceptionMsg(__R_FN_CALL_FULL, FailureType::Return, formatString, argList);
}
inline HRESULT Log_CaughtException(__R_FN_PARAMS_FULL) DO_NOEXCEPT
{
return docli::ReportFailure_CaughtException(__R_FN_CALL_FULL, FailureType::Log);
}
inline HRESULT Log_CaughtExceptionMsg(__R_FN_PARAMS_FULL, _Printf_format_string_ PCSTR formatString, ...) DO_NOEXCEPT
{
va_list argList;
va_start(argList, formatString);
return docli::ReportFailure_CaughtExceptionMsg(__R_FN_CALL_FULL, FailureType::Log, formatString, argList);
}
inline HRESULT Log_HrMsg(__R_FN_PARAMS_FULL, HRESULT hr, _Printf_format_string_ PCSTR formatString, ...) DO_NOEXCEPT
{
va_list argList;
va_start(argList, formatString);
ReportFailure_Msg(__R_FN_CALL_FULL, FailureType::Log, hr, formatString, argList);
return hr;
}
inline bool Log_HrIf(__R_FN_PARAMS_FULL, HRESULT hr, bool condition) DO_NOEXCEPT
{
if (condition)
{
ReportFailure(__R_FN_CALL_FULL, FailureType::Log, hr);
}
return condition;
}
void Throw_HrMsg(__R_FN_PARAMS_FULL, HRESULT hr, _Printf_format_string_ PCSTR formatString, ...);
void Throw_CaughtExceptionMsg(__R_FN_PARAMS_FULL, _Printf_format_string_ PCSTR formatString, ...);
// ResultFromCaughtException is a function that is meant to be called from within a catch(...) block. Internally
// it re-throws and catches the exception to convert it to an HRESULT. If an exception is of an unrecognized type
// the function will fail fast.
HRESULT ResultFromCaughtException() DO_NOEXCEPT;
HRESULT Log_IfFailed(__R_FN_PARAMS_FULL, HRESULT hr) DO_NOEXCEPT;
HRESULT Log_IfFailedMsg(__R_FN_PARAMS_FULL, HRESULT hr, _Printf_format_string_ PCSTR formatString, ...) DO_NOEXCEPT;
}

Просмотреть файл

@ -0,0 +1,399 @@
#include "do_common.h"
#include "http_agent.h"
#include <sstream>
#include <cpprest/http_client.h>
#include <cpprest/http_msg.h>
#include <cpprest/uri.h>
#include <gsl/gsl_util>
#include "safe_int.h"
// TBD version
#define DO_USER_AGENT_STR "Microsoft-Delivery-Optimization-Lite/10.0.0.1"
HttpAgent::HttpAgent(IHttpAgentEvents& callback) :
_callback(callback)
{
}
HttpAgent::~HttpAgent()
{
Close();
}
// Determine if the status code is a 4xx code
bool HttpAgent::IsClientError(UINT httpStatusCode)
{
return (400 <= httpStatusCode) && (httpStatusCode < 500);
}
std::array<char, DO_HTTP_RANGEREQUEST_STR_LEN> HttpAgent::MakeRange(UINT64 startOffset, UINT64 lengthBytes)
{
std::array<char, DO_HTTP_RANGEREQUEST_STR_LEN> range;
const auto endOffset = UInt64Sub(UInt64Add(startOffset, lengthBytes), 1);
(void)StringPrintf(range.data(), range.size(), "%llu-%llu", startOffset, endOffset);
return range;
}
bool HttpAgent::ValidateUrl(const std::string& url)
{
if (!web::uri::validate(url))
{
return false;
}
web::uri uri{url};
if ((StringCompareCaseInsensitive(uri.scheme().data(), "http") != 0)
&& (StringCompareCaseInsensitive(uri.scheme().data(), "https") != 0))
{
return false;
}
if (uri.host().empty())
{
return false;
}
return true;
}
// IHttpAgent
HRESULT HttpAgent::SendRequest(PCSTR szUrl, PCSTR szProxyUrl, PCSTR szPostData, PCSTR szRange, UINT64 callerContext) try
{
RETURN_IF_FAILED(_CreateClient(szUrl, szProxyUrl));
(void)_SubmitRequestTask(_CreateRequest(szPostData, szRange), callerContext);
return S_OK;
} CATCH_RETURN()
void HttpAgent::Close()
{
// Cancelling the token notifies any pending/running pplx tasks.
// Call tracker will then wait for the tasks acknowledge the cancel and/or complete.
// Too bad the task_group concept from PPL isn't supported in the cpprestsdk's version.
_cts.cancel();
_callTracker.Wait();
// Clients may now make new requests if they choose
std::unique_lock<std::recursive_mutex> lock(_requestLock);
_client.reset();
_cts = pplx::cancellation_token_source();
}
// The Query* functions are supposed to be called only from within the IHttpAgentEvents callbacks
// function because the httpContext (which is the request handle) must be valid.
HRESULT HttpAgent::QueryStatusCode(UINT64 httpContext, _Out_ UINT* pStatusCode) const
{
auto pResponse = reinterpret_cast<web::http::http_response*>(httpContext);
*pStatusCode = pResponse->status_code();
return S_OK;
}
HRESULT HttpAgent::QueryContentLength(UINT64 httpContext, _Out_ UINT64* pContentLength)
{
auto pResponse = reinterpret_cast<web::http::http_response*>(httpContext);
*pContentLength = pResponse->headers().content_length();
return S_OK;
}
HRESULT HttpAgent::QueryContentLengthFromRange(UINT64 httpContext, _Out_ UINT64* pContentLength) try
{
*pContentLength = 0;
std::string rangeHeader;
RETURN_IF_FAILED_EXPECTED(QueryHeadersByType(httpContext, HttpAgentHeaders::Range, rangeHeader));
// attempt to extract the content length from the content range format:
// Content-Range: bytes <start>-<end>/<length>
auto marker = strchr(rangeHeader.data(), L'/');
RETURN_HR_IF_EXPECTED(E_NOT_SET, (marker == nullptr));
*pContentLength = std::stoull(marker + 1);
return S_OK;
} CATCH_RETURN()
HRESULT HttpAgent::QueryHeaders(UINT64 httpContext, PCSTR pszName, std::string& headers) const noexcept
{
headers.clear();
auto pResponse = reinterpret_cast<web::http::http_response*>(httpContext);
const auto& reponseHeaders = pResponse->headers();
if (pszName == nullptr)
{
// Accumulate all headers into the output string
std::stringstream ss;
if (!reponseHeaders.empty())
{
for (const auto& item : reponseHeaders)
{
ss << item.first << ':' << item.second << "\r\n";
}
ss << "\r\n";
}
headers = ss.str();
}
else
{
auto it = reponseHeaders.find(pszName);
RETURN_HR_IF_EXPECTED(E_NOT_SET, (it == reponseHeaders.end()));
headers = it->second;
}
return S_OK;
}
HRESULT HttpAgent::QueryHeadersByType(UINT64 httpContext, HttpAgentHeaders type, std::string& headers) noexcept
{
PCSTR headerName;
switch (type)
{
case HttpAgentHeaders::Range:
headerName = "Content-Range";
break;
default:
return E_UNEXPECTED;
}
return QueryHeaders(httpContext, headerName, headers);
}
HRESULT HttpAgent::_CreateClient(PCSTR szUrl, PCSTR szProxyUrl) try
{
std::unique_lock<std::recursive_mutex> lock(_requestLock);
// We must recreate the client if either the url or the proxy url has changed
if ((szUrl != nullptr) && _client)
{
const std::string currentUrl = _client->base_uri().to_string();
if (StringCompareCaseInsensitive(currentUrl.data(), szUrl) != 0)
{
_client.reset();
}
}
else if ((szProxyUrl != nullptr) && _client)
{
const std::string currentProxy = _client->client_config().proxy().address().to_string();
if (StringCompareCaseInsensitive(currentProxy.data(), szProxyUrl) != 0)
{
_client.reset();
}
}
if (!_client)
{
RETURN_HR_IF(E_INVALIDARG, (szUrl == nullptr));
std::string url(szUrl);
RETURN_HR_IF(INET_E_INVALID_URL, !ValidateUrl(url));
web::http::client::http_client_config clientConfig;
_SetWebProxyFromProxyUrl(clientConfig, szProxyUrl);
web::http::uri remoteUri(url);
_client = std::make_unique<web::http::client::http_client>(remoteUri, clientConfig);
DoLogVerbose("New http_client for %s", remoteUri.to_string().data());
}
return S_OK;
} CATCH_RETURN()
web::http::http_request HttpAgent::_CreateRequest(_In_opt_ PCSTR szPostData, _In_opt_ PCSTR szRange)
{
web::http::http_request request;
if (szPostData != nullptr)
{
request.set_method(web::http::methods::POST);
request.set_body(utf8string(szPostData));
}
web::http::http_headers& headers = request.headers();
headers["User-Agent"] = DO_USER_AGENT_STR;
if (szRange != nullptr)
{
std::string rangeHeader("bytes=");
rangeHeader += szRange;
headers["Range"] = rangeHeader;
}
return request;
}
pplx::task<void> HttpAgent::_SubmitRequestTask(const web::http::http_request& request, UINT64 callerContext)
{
// Note: the tracker is moved to only the last task-based lambda because it
// will always get executed whereas a value-based lambda can be skipped due
// to exceptions or cancellations.
auto cancellationToken = _cts.get_token();
auto tracker = _callTracker.Enter();
auto responseHolder = std::make_shared<web::http::http_response>();
return _client->request(request, cancellationToken).then(
[this, callerContext, cancellationToken, responseHolder](web::http::http_response response)
{
*responseHolder = std::move(response);
HRESULT hrRequest = _ResultFromStatusCode(responseHolder->status_code());
if (SUCCEEDED(hrRequest))
{
hrRequest = _callback.OnHeadersAvailable(reinterpret_cast<uint64_t>(responseHolder.get()), callerContext);
if (hrRequest == S_FALSE)
{
hrRequest = E_ABORT;
}
}
THROW_IF_FAILED(hrRequest);
// Start async loop to read incoming data corresponding to the request
return _DoReadBodyData(responseHolder->body(), std::make_shared<ReadDataBuffer>(),
cancellationToken, responseHolder, callerContext);
}).then([this, callerContext, responseHolder, tracker = std::move(tracker)](pplx::task<void> previousTask)
{
HRESULT hr = S_OK;
try
{
previousTask.get(); // check for exceptions
}
catch (...)
{
hr = LOG_CAUGHT_EXCEPTION();
DoLogWarningHr(hr, "Url: %s, host: %s", _client->base_uri().to_string().data(), _client->base_uri().host().data());
}
// Report success and failure. Ignore cancellations.
if (hr != E_ABORT)
{
(void)_callback.OnComplete(hr, reinterpret_cast<UINT64>(responseHolder.get()), callerContext);
}
});
}
pplx::task<void> HttpAgent::_DoReadBodyData(Concurrency::streams::istream bodyStream, const std::shared_ptr<ReadDataBuffer>& bodyStorage,
pplx::cancellation_token cancellationToken, const std::shared_ptr<web::http::http_response>& response, UINT64 callerContext)
{
if (cancellationToken.is_canceled())
{
pplx::cancel_current_task();
}
// Rewind the stream to the beginning for the next read operation
// TODO(shishirb) check return value and throw
(void)bodyStorage->streambuf.seekoff(0, std::ios_base::beg, std::ios_base::in|std::ios_base::out);
return bodyStream.read(bodyStorage->streambuf, bodyStorage->storage.size()).then(
[this, bodyStream, bodyStorage, cancellationToken, response, callerContext](size_t bytesRead) mutable
{
if (bytesRead == 0)
{
return pplx::create_task([](){});
}
HRESULT hr = _callback.OnData(bodyStorage->storage.data(), gsl::narrow<UINT>(bytesRead), reinterpret_cast<UINT64>(response.get()), callerContext);
if (hr == S_FALSE)
{
hr = E_ABORT;
}
THROW_IF_FAILED(hr);
return _DoReadBodyData(bodyStream, bodyStorage, cancellationToken, response, callerContext);
});
}
HRESULT HttpAgent::_ResultFromStatusCode(web::http::status_code code)
{
using status = web::http::status_codes;
HRESULT hr = HTTP_E_STATUS_UNEXPECTED;
switch (code)
{
case status::OK:
case status::Created:
case status::Accepted:
case status::NoContent:
case status::NonAuthInfo:
case status::PartialContent:
hr = S_OK;
break;
case status::MultipleChoices:
case status::InternalError:
case status::ServiceUnavailable:
hr = HTTP_E_STATUS_SERVER_ERROR;
break;
case status::MovedPermanently:
case status::Found:
case status::SeeOther:
hr = HTTP_E_STATUS_UNEXPECTED_REDIRECTION;
break;
case status::NotFound:
case status::Gone:
hr = HTTP_E_STATUS_NOT_FOUND;
break;
case status::UseProxy:
case status::BadGateway:
case status::GatewayTimeout:
hr = HTTP_E_STATUS_BAD_GATEWAY;
break;
case status::BadRequest:
case status::LengthRequired:
case status::PreconditionFailed:
case status::RequestUriTooLarge:
case status::UnsupportedMediaType:
case status::MethodNotAllowed:
case status::Conflict:
hr = HTTP_E_STATUS_BAD_REQUEST;
break;
case status::Unauthorized:
hr = HTTP_E_STATUS_DENIED;
break;
case status::NotAcceptable:
hr = HTTP_E_STATUS_NONE_ACCEPTABLE;
break;
case status::Forbidden:
hr = HTTP_E_STATUS_FORBIDDEN;
break;
case status::ProxyAuthRequired:
hr = HTTP_E_STATUS_PROXY_AUTH_REQ;
break;
case status::RequestTimeout:
hr = HTTP_E_STATUS_REQUEST_TIMEOUT;
break;
case status::RequestEntityTooLarge:
hr = HTTP_E_STATUS_REQUEST_TOO_LARGE;
break;
case status::NotImplemented:
case status::HttpVersionNotSupported:
hr = HTTP_E_STATUS_NOT_SUPPORTED;
break;
}
return hr;
}
void HttpAgent::_SetWebProxyFromProxyUrl(web::http::client::http_client_config& config, _In_opt_ PCSTR szProxyUrl)
{
if (szProxyUrl == nullptr)
{
return;
}
web::uri_builder proxyFullAddress(szProxyUrl);
web::credentials creds = [&]()
{
// User info will be of the form <user>:<password>.
// TODO(jimson): Ensure this works even when running as the 'do' user after DropPermissions() is in play.
auto credStrings = StringPartition(proxyFullAddress.user_info(), ':');
return (credStrings.size() == 2) ? web::credentials{credStrings[0], credStrings[1]} : web::credentials{};
}();
// cpprest does not make use of creds embedded in proxy address. Must set it via web_proxy::set_credentials.
proxyFullAddress.set_user_info("");
web::web_proxy proxy(proxyFullAddress.to_uri());
proxy.set_credentials(std::move(creds));
config.set_proxy(proxy);
DoLogInfo("Using proxy %s", config.proxy().address().to_string().data()); // do not log credentials
}

Просмотреть файл

@ -0,0 +1,72 @@
#pragma once
#include <memory>
#include <cpprest/http_msg.h>
#include <cpprest/rawptrstream.h>
#include <pplx/pplxtasks.h>
#include "do_event.h"
#include "http_agent_interface.h"
#include "waitable_counter.h"
#define DO_HTTP_REQ_BUF_SIZE (128 * 1024) // default request buffer size per request
#define DO_HTTP_RANGEREQUEST_STR_LEN 48 // two 64bit numbers plus a '-' character (20 digits in UINT64)
namespace web::http::client
{
class http_client;
class http_client_config;
}
class HttpAgent : public IHttpAgent
{
public:
HttpAgent(IHttpAgentEvents& callback);
~HttpAgent();
static bool IsClientError(UINT httpStatusCode);
static std::array<char, DO_HTTP_RANGEREQUEST_STR_LEN> MakeRange(UINT64 startOffset, UINT64 lengthBytes);
static bool ValidateUrl(const std::string& url);
// IHttpAgent
HRESULT SendRequest(PCSTR szUrl = nullptr, PCSTR szProxyUrl = nullptr, PCSTR szPostData = nullptr, PCSTR szRange = nullptr,
UINT64 callerContext = 0) override;
void Close() override;
// The Query* functions are supposed to be called only from within the IHttpAgentEvents callbacks
// function because the httpContext (which is the request handle) must be valid.
HRESULT QueryStatusCode(UINT64 httpContext, _Out_ UINT* pStatusCode) const override;
HRESULT QueryContentLength(UINT64 httpContext, _Out_ UINT64* pContentLength) override;
HRESULT QueryContentLengthFromRange(UINT64 httpContext, _Out_ UINT64* pContentLength) override;
HRESULT QueryHeaders(UINT64 httpContext, _In_opt_z_ PCSTR pszName, std::string& headers) const noexcept override;
HRESULT QueryHeadersByType(UINT64 httpContext, HttpAgentHeaders type, std::string& headers) noexcept override;
private:
struct ReadDataBuffer
{
std::vector<BYTE> storage {};
Concurrency::streams::rawptr_buffer<BYTE> streambuf;
ReadDataBuffer() :
storage(DO_HTTP_REQ_BUF_SIZE),
streambuf(storage.data(), storage.size())
{
}
};
std::unique_ptr<web::http::client::http_client> _client;
pplx::cancellation_token_source _cts;
mutable std::recursive_mutex _requestLock;
IHttpAgentEvents& _callback;
WaitableCounter _callTracker;
private:
HRESULT _CreateClient(PCSTR szUrl = nullptr, PCSTR szProxyUrl = nullptr);
static web::http::http_request _CreateRequest(PCSTR szPostData, PCSTR szRange);
pplx::task<void> _SubmitRequestTask(const web::http::http_request& request, UINT64 callerContext);
pplx::task<void> _DoReadBodyData(Concurrency::streams::istream bodyStream, const std::shared_ptr<ReadDataBuffer>& bodyStorage,
pplx::cancellation_token cancellationToken, const std::shared_ptr<web::http::http_response>& response, UINT64 callerContext);
static HRESULT _ResultFromStatusCode(web::http::status_code code);
static void _SetWebProxyFromProxyUrl(web::http::client::http_client_config& config, _In_opt_ PCSTR szProxyUrl);
};

Просмотреть файл

@ -0,0 +1,29 @@
#pragma once
enum class HttpAgentHeaders
{
Range,
};
class IHttpAgent
{
public:
virtual ~IHttpAgent() = default;
virtual HRESULT SendRequest(PCSTR url, PCSTR proxyUrl = nullptr, PCSTR postData = nullptr, PCSTR range = nullptr,
UINT64 callerContext = 0) = 0;
virtual void Close() = 0;
virtual HRESULT QueryStatusCode(UINT64 httpContext, _Out_ UINT *statusCode) const = 0;
virtual HRESULT QueryContentLength(UINT64 httpContext, _Out_ UINT64 *contentLength) = 0;
virtual HRESULT QueryContentLengthFromRange(UINT64 httpContext, _Out_ UINT64 *contentLength) = 0;
virtual HRESULT QueryHeaders(UINT64 httpContext, _In_opt_z_ PCSTR name, std::string& headers) const noexcept = 0;
virtual HRESULT QueryHeadersByType(UINT64 httpContext, HttpAgentHeaders type, std::string& headers) noexcept = 0;
};
class IHttpAgentEvents
{
public:
virtual ~IHttpAgentEvents() = default;
virtual HRESULT OnHeadersAvailable(UINT64 httpContext, UINT64 callerContext) = 0;
virtual HRESULT OnData(_In_reads_bytes_(cbData) BYTE* pData, UINT cbData, UINT64 httpContext, UINT64 callerContext) = 0;
virtual HRESULT OnComplete(HRESULT hResult, UINT64 httpContext, UINT64 callerContext) = 0;
};

Просмотреть файл

@ -0,0 +1,101 @@
#pragma once
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <pwd.h>
#include <grp.h>
#include <boost/filesystem.hpp>
#include "do_common.h"
#include "do_persistence.h"
inline gid_t GetGroupIdByName(const char *name)
{
struct group *grp = getgrnam(name); // don't free, see getgrnam() for details
if (grp == nullptr)
{
THROW_HR_MSG(E_FAIL, "Failed to get gid from %s, errno: %d", name, errno);
}
return grp->gr_gid;
}
inline uid_t GetUserIdByName(const char *name)
{
struct passwd *pwd = getpwnam(name); // don't free, see getpwnam() for details
if (pwd == nullptr)
{
THROW_HR_MSG(E_FAIL, "Failed to get gid from %s, errno: %d", name, errno);
}
return pwd->pw_uid;
}
inline void SetDOPathPermissions(const std::string& path, mode_t mode)
{
uid_t userid = GetUserIdByName("do");
gid_t groupid = GetGroupIdByName("do");
int err = chown(path.c_str(), userid, groupid);
if (err < 0)
{
DoLogWarning("Failed to set DO ownership of path %s, error code: %d", path.c_str(), err);
}
err = chmod(path.c_str(), mode);
if (err < 0)
{
DoLogWarning("Failed to modify permissions for path %s, error code: %d", path.c_str(), err);
}
}
inline void InitializePath(const std::string& path, mode_t mode = 0) try
{
boost::filesystem::path dirPath(path);
if (!boost::filesystem::exists(dirPath))
{
DoLogInfo("Creating directory at %s", path.c_str());
boost::filesystem::create_directory(dirPath);
if (mode != 0)
{
SetDOPathPermissions(path, mode);
}
}
} CATCH_LOG()
inline void InitializeDOPaths()
{
// Config directory may have caller setting configs - therefor it should have group write permissions bit S_IWGRP set
InitializePath(docli::GetConfigDirectory(), S_IRWXU | S_IRGRP | S_IWGRP | S_IXGRP | S_IROTH | S_IXOTH);
// No external process or linux user will be using the do persistence directory, so no need for S_IWGRP
InitializePath(docli::GetPersistenceDirectory(), S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
InitializePath(docli::GetRuntimeDirectory());
}
inline void DropPermissions()
{
uid_t userid = GetUserIdByName("do");
gid_t groupid = GetGroupIdByName("do");
// process is running as root, drop privileges
if (getuid() == 0)
{
if (initgroups("do", groupid) != 0)
{
THROW_HR_MSG(E_FAIL, "initgroups: Unable to initialize supplementary group access list: errno: %d", errno);
}
if (setgid(groupid) != 0)
{
THROW_HR_MSG(E_FAIL, "setgid: Unable to drop group privileges: %u, errno: %d", groupid, errno);
}
if (setuid(userid) != 0)
{
THROW_HR_MSG(E_FAIL, "setuid: Unable to drop user privileges: %u, errno: %d", userid, errno);
}
}
else
{
THROW_HR_MSG(E_FAIL, "Attempting to drop permissions while not Root, uid: %u", getuid());
}
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше