From 20c802afd4a40865071aad12052c3e836c7fba4c Mon Sep 17 00:00:00 2001 From: Scott McKay Date: Fri, 6 Sep 2024 08:28:14 +1000 Subject: [PATCH] Add better native nuget package readme (#21889) ### Description Request from Nuget team to add a better readme to the nuget package so it is displayed nicely on nuget.org. Previously we were using the ORT repo readme.md but that a) doesn't display correctly due to limited markdown support on nuget.org, and b) has a lot of irrelevant info like build pipeline status. - Created a generic readme.md that includes the ORT description from the main readme, includes the ORT logo via an acceptable link, and lists the native nuget packages so the file can be included in any of them as-is. - Updated the nuget packaging script to add the `readme` tag and use this file. ### Motivation and Context Request from MS Nuget team to MS package owners to add. --- .../nuget/generate_nuspec_for_native_nuget.py | 9 +++- tools/nuget/nupkg.README.md | 52 +++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 tools/nuget/nupkg.README.md diff --git a/tools/nuget/generate_nuspec_for_native_nuget.py b/tools/nuget/generate_nuspec_for_native_nuget.py index be477bb2932..56e739f5ff3 100644 --- a/tools/nuget/generate_nuspec_for_native_nuget.py +++ b/tools/nuget/generate_nuspec_for_native_nuget.py @@ -213,6 +213,10 @@ def generate_repo_url(line_list, repo_url, commit_id): line_list.append('') +def generate_readme(line_list): + line_list.append("README.md") + + def add_common_dependencies(xml_text, package_name, version): xml_text.append('') if package_name == "Microsoft.ML.OnnxRuntime.Gpu": @@ -327,6 +331,7 @@ def generate_metadata(line_list, args): generate_license(metadata_list) generate_project_url(metadata_list, "https://github.com/Microsoft/onnxruntime") generate_repo_url(metadata_list, "https://github.com/Microsoft/onnxruntime.git", args.commit_id) + generate_readme(metadata_list) generate_dependencies(metadata_list, args.package_name, args.package_version) generate_release_notes(metadata_list, args.sdk_info) metadata_list.append("") @@ -1045,7 +1050,9 @@ def generate_files(line_list, args): ) # README - files_list.append("') + files_list.append( + "' + ) # Process License, ThirdPartyNotices, Privacy files_list.append("') diff --git a/tools/nuget/nupkg.README.md b/tools/nuget/nupkg.README.md new file mode 100644 index 00000000000..e48480787c7 --- /dev/null +++ b/tools/nuget/nupkg.README.md @@ -0,0 +1,52 @@ +## About + +![ONNX Runtime Logo](https://raw.githubusercontent.com/microsoft/onnxruntime/main/docs/images/ONNX_Runtime_logo_dark.png) + +**ONNX Runtime is a cross-platform machine-learning inferencing accelerator**. + +**ONNX Runtime** can enable faster customer experiences and lower costs, supporting models from deep learning frameworks such as PyTorch and TensorFlow/Keras as well as classical machine learning libraries such as scikit-learn, LightGBM, XGBoost, etc. +ONNX Runtime is compatible with different hardware, drivers, and operating systems, and provides optimal performance by leveraging hardware accelerators where applicable alongside graph optimizations and transforms. + +Learn more → [here](https://www.onnxruntime.ai/docs) + +## NuGet Packages + +### ONNX Runtime Native packages + +#### Microsoft.ML.OnnxRuntime + - Native libraries for all supported platforms + - CPU Execution Provider + - CoreML Execution Provider on macOS/iOS + - https://onnxruntime.ai/docs/execution-providers/CoreML-ExecutionProvider.html + - XNNPACK Execution Provider on Android/iOS + - https://onnxruntime.ai/docs/execution-providers/Xnnpack-ExecutionProvider.html + +#### Microsoft.ML.OnnxRuntime.Gpu + - Windows and Linux + - TensorRT Execution Provider + - https://onnxruntime.ai/docs/execution-providers/TensorRT-ExecutionProvider.html + - CUDA Execution Provider + - https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html + - CPU Execution Provider + +#### Microsoft.ML.OnnxRuntime.DirectML + - Windows + - DirectML Execution Provider + - https://onnxruntime.ai/docs/execution-providers/DirectML-ExecutionProvider.html + - CPU Execution Provider + +#### Microsoft.ML.OnnxRuntime.QNN + - 64-bit Windows + - QNN Execution Provider + - https://onnxruntime.ai/docs/execution-providers/QNN-ExecutionProvider.html + - CPU Execution Provider + + +### Other packages + +#### Microsoft.ML.OnnxRuntime.Managed + - C# language bindings + +#### Microsoft.ML.OnnxRuntime.Extensions + - Custom operators for pre/post processing on all supported platforms. + - https://github.com/microsoft/onnxruntime-extensions