This commit is contained in:
Ubuntu 2024-09-17 10:41:06 +00:00
Родитель 80697264ee de2f9fdc8a
Коммит ef20d51d2c
233 изменённых файлов: 10215 добавлений и 24352 удалений

2
.github/CODEOWNERS поставляемый
Просмотреть файл

@ -1 +1 @@
* @vibhansa-msft @souravgupta-msft @ashruti-msft
* @vibhansa-msft @souravgupta-msft @ashruti-msft @syeleti-msft @jainakanksha-msft

63
.github/template/generate_page/action.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,63 @@
name: generate_page
description: "Generate github page for performance benchmark"
inputs:
TEST:
required: true
description: "Test to run"
TYPE:
required: true
description: "Type of storage account"
TOKEN:
required: true
description: "Token for checkin"
runs:
using: "composite"
steps:
# Pre-run cleanup
- name: "Cleanup before test"
shell: bash
run: |
rm -rf /mnt/blob_mnt/*
rm -rf /mnt/tempcache/*
# Run the benchmark script
- name: "Run Benchmark Script : ${{ inputs.TEST }}"
shell: bash
run: |
./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt ${{ inputs.TEST }}
# Push the bandwidth results and publish the graphs
- name: "Update Bandwidth Results : ${{ inputs.TEST }}"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: ${{ inputs.TEST }}/bandwidth_results.json
tool: 'customBiggerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ inputs.TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ inputs.TYPE }}/bandwidth/${{ inputs.TEST }}
# Push the latency results and publish the graphs
- name: "Update Latency Results : ${{ inputs.TEST }}"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: ${{ inputs.TEST }}/latency_results.json
tool: 'customSmallerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ inputs.TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ inputs.TYPE }}/latency/${{ inputs.TEST }}

252
.github/workflows/benchmark.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,252 @@
name: Benchmark
on:
schedule:
- cron: '0 4 * * SUN'
push:
branches:
- main
jobs:
PerfTesting:
strategy:
max-parallel: 1
matrix:
TestType: ["premium", "standard"]
# TestType: ["premium", "standard", "premium_hns", "standard_hns"]
runs-on: [self-hosted, 1ES.Pool=blobfuse2-benchmark]
timeout-minutes: 360
permissions:
id-token: write
contents: write
pages: write
steps:
# Print the host info
- name: 'Host info'
run: hostnamectl
# Install Fuse3
- name: "Install Fuse3"
run: |
sudo apt-get update
sudo apt-get install fuse3 libfuse3-dev gcc -y
# Install Tools
- name: "Install Tools"
run: |
sudo apt-get install fio jq python3 -y
# Checkout main branch
- name: 'Checkout Blobfuse2'
uses: actions/checkout@v4.1.1
# with:
# ref: vibhansa/perftestrunner
# Install GoLang
- name: "Install Go"
run: |
./go_installer.sh ../
go version
# Build Blobfuse2
- name: "Build Blobfuse2"
run: |
./build.sh
# Run binary and validate the version
- name: "Validate Version"
run: |
sudo cp ./blobfuse2 /usr/bin/
which blobfuse2
blobfuse2 --version
- name: "Create Env variables for account name and key"
run: |
if [ "${{ matrix.TestType }}" == "standard" ]; then
echo "Create standard account env"
echo "AZURE_STORAGE_ACCOUNT=${{ secrets.STANDARD_ACCOUNT }}" >> $GITHUB_ENV
echo "AZURE_STORAGE_ACCESS_KEY=${{ secrets.STANDARD_KEY }}" >> $GITHUB_ENV
elif [ "${{ matrix.TestType }}" == "premium" ]; then
echo "Create premium account env"
echo "AZURE_STORAGE_ACCOUNT=${{ secrets.PREMIUM_ACCOUNT }}" >> $GITHUB_ENV
echo "AZURE_STORAGE_ACCESS_KEY=${{ secrets.PREMIUM_KEY }}" >> $GITHUB_ENV
elif [ "${{ matrix.TestType }}" == "standard_hns" ]; then
echo "Create standard hns account env"
echo "AZURE_STORAGE_ACCOUNT=${{ secrets.STANDARD_HNS_ACCOUNT }}" >> $GITHUB_ENV
echo "AZURE_STORAGE_ACCESS_KEY=${{ secrets.STANDARD_HNS_KEY }}" >> $GITHUB_ENV
elif [ "${{ matrix.TestType }}" == "premium_hns" ]; then
echo "Create premium hns account env"
echo "AZURE_STORAGE_ACCOUNT=${{ secrets.PREMIUM_HNS_ACCOUNT }}" >> $GITHUB_ENV
echo "AZURE_STORAGE_ACCESS_KEY=${{ secrets.PREMIUM_HNS_KEY }}" >> $GITHUB_ENV
fi
# Create the config file for testing
- name: "Create config file for account type: ${{ matrix.TestType }}"
run: |
blobfuse2 gen-test-config --config-file=azure_block_bench.yaml --container-name=${{ secrets.BENCH_CONTAINER }} --output-file=./config.yaml
cat ./config.yaml
# Create the config file for testing
- name: "Create mount path"
run: |
sudo mkdir -p /mnt/blob_mnt
sudo mkdir -p /mnt/tempcache
sudo chmod 777 /mnt/blob_mnt
sudo chmod 777 /mnt/tempcache
# ---------------------------------------------------------------------------------------------------------------------------------------------------
# Run the basic tests using FIO
# Run the Write tests
- name: "Read Test"
uses: "./.github/template/generate_page"
with:
TEST: "read"
TYPE: ${{ matrix.TestType }}
TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Run the Write tests with high number of threads
- name: "High threads Test"
uses: "./.github/template/generate_page"
with:
TEST: "highlyparallel"
TYPE: ${{ matrix.TestType }}
TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Run the Write tests
- name: "Write Test"
uses: "./.github/template/generate_page"
with:
TEST: "write"
TYPE: ${{ matrix.TestType }}
TOKEN: ${{ secrets.GITHUB_TOKEN }}
# Run the Create tests
- name: "Create File Test"
uses: "./.github/template/generate_page"
with:
TEST: "create"
TYPE: ${{ matrix.TestType }}
TOKEN: ${{ secrets.GITHUB_TOKEN }}
# ---------------------------------------------------------------------------------------
# Below tests needs to run seperatly as output is different
# ---------------------------------------------------------------------------------------------------
# Run the List tests
# this shall always runs after create tests
- name: "List File Test"
shell: bash
run: |
rm -rf /mnt/blob_mnt/*
rm -rf /mnt/tempcache/*
./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt list
- name: "Update Benchmark Results : List"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: list/list_results.json
tool: 'customSmallerIsBetter'
alert-threshold: "500%"
max-items-in-chart: 100
github-token: ${{ secrets.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ matrix.TestType }}/time/list
# ---------------------------------------------------------------------------------------
# Run App baseed tests
# This needs to run seperatly as output is different
- name: "App based Test"
shell: bash
run: |
rm -rf /mnt/blob_mnt/*
rm -rf /mnt/tempcache/*
./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt app
- name: "Update Bandwidth Results : App"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: app/app_bandwidth.json
tool: 'customBiggerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ secrets.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ matrix.TestType }}/bandwidth/app
- name: "Update Latency Results : App"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: app/app_time.json
tool: 'customSmallerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ secrets.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ matrix.TestType }}/time/app
- name: "Update Bandwidth Results : High Speed App"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: app/highapp_bandwidth.json
tool: 'customBiggerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ secrets.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ matrix.TestType }}/bandwidth/highapp
- name: "Update Latency Results : High Speed App"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: app/highapp_time.json
tool: 'customSmallerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ secrets.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ matrix.TestType }}/time/highapp
# ---------------------------------------------------------------------------------------
# Run Rename tests
# This needs to run seperatly as output is different
- name: "Rename Test"
shell: bash
run: |
rm -rf /mnt/blob_mnt/*
rm -rf /mnt/tempcache/*
./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt rename
- name: "Update Latency Results : Rename"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: rename/rename_time.json
tool: 'customSmallerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ secrets.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ matrix.TestType }}/time/rename
# ---------------------------------------------------------------------------------------

9
.github/workflows/codeql-analysis.yml поставляемый
Просмотреть файл

@ -35,13 +35,14 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v2
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
@ -50,7 +51,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
uses: github/codeql-action/autobuild@v3
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
@ -64,4 +65,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
uses: github/codeql-action/analyze@v3

Просмотреть файл

@ -1,3 +1,78 @@
## 2.3.3 (Unreleased)
**Bug Fixes**
## 2.3.2 (2024-09-03)
**Bug Fixes**
- Fixed the case where file creation using SAS on HNS accounts was returning back wrong error code.
- [#1402](https://github.com/Azure/azure-storage-fuse/issues/1402) Fixed proxy URL parsing.
- In flush operation, the blocks will be committed only if the handle is dirty.
- Fixed an issue in File-Cache that caused upload to fail due to insufficient permissions.
**Data Integrity Fixes**
- Fixed block-cache read of small files in direct-io mode, where file size is not multiple of kernel buffer size.
- Fixed race condition in block-cache random write flow where a block is being uploaded and written to in parallel.
- Fixed issue in block-cache random read/write flow where a uncommitted block, which is deleted from local cache, is reused.
- Sparse file data integrity issues fixed.
**Other Changes**
- LFU policy in file cache has been removed.
- Default values, if not assigned in config, for the following parameters in block-cache are calculated as follows:
- Memory preallocated for Block-Cache is 80% of free memory
- Disk Cache Size is 80% of free disk space
- Prefetch is 2 times number of CPU cores
- Parallelism is 3 times the number of CPU cores
- Default value of Disk Cache Size in File Cache is 80% of free disk space
## 2.3.0 (2024-05-16)
**Bug Fixes**
- For fuse minor version check rely on the fusermount3 command output rather then one exposed from fuse_common.
- Fixed large number of threads from TLRU causing crash during disk eviction in block-cache.
- Fixed issue where get attributes was failing for directories in blob accounts when CPK flag was enabled.
**Features**
- Added support for authentication using Azure CLI.
**Other Changes**
- Added support in
- Ubuntu 24.04 (x86_64 and ARM64)
- Rocky Linux 8 and 9
- Alma Linux 8 and 9
- Added support for FIPS based Linux systems.
- Updated dependencies to address security vulnerabilities.
## 2.3.0~preview.1 (2024-04-04)
**Bug Fixes**
- [#1057](https://github.com/Azure/azure-storage-fuse/issues/1057) Fixed the issue where user-assigned identity is not used to authenticate when system-assigned identity is enabled.
- Listing blobs is now supported for blob names that contain characters that aren't valid in XML (U+FFFE or U+FFFF).
- [#1359](https://github.com/Azure/azure-storage-fuse/issues/1359), [#1368](https://github.com/Azure/azure-storage-fuse/issues/1368) Fixed RHEL 8.6 mount failure
**Features**
- Migrated to the latest [azblob SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob).
- Migrated to the latest [azdatalake SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake).
- Migrated from deprecated ADAL to MSAL through the latest [azidentity SDK](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity).
- Added support for uploading blobs in cold and premium tier.
- Support CPK for adls storage accounts.
- Lazy-write support for async flush and close file call. Actual upload will be scheduled in background when this feature is enabled.
## 2.2.1 (2024-02-28)
**Bug Fixes**
- Fixed panic while truncating a file to a very large size.
- Fixed block-cache panic on flush of a file which has no active changeset
- Fixed block-cache panic on renaming a file and then flushing older handle
- Fixed block-cache flush resulting in invalid-block-list error
## 2.2.0 (2024-01-24)
**Bug Fixes**
- Invalidate attribute cache entry on `PathAlreadyExists` error in create directory operation.
- When `$HOME` environment variable is not present, use the current directory.
- Fixed mount failure on nonempty mount path for fuse3.
**Features**
- Support CPK for block storage accounts.
- Added support to write files using block-cache
- Optimized for sequential writing
- Editing/Appending existing files works only if files were originally created using block-cache with the same block size
## 2.1.2 (2023-11-17)
**Bug Fixes**
- [#1243](https://github.com/Azure/azure-storage-fuse/issues/1243) Fixed issue where symlink was not working for ADLS accounts.
@ -24,7 +99,7 @@
**Features**
- Sync in stream mode will force upload the file to storage container.
- Fail `Open` and `Write` operations with file-cache if the file size exceeds the high threshold set with local cache limits.
- Fail `Open` and `Write` operations with file-cache if the file size exceeds the high threshold set with local cache limits.
## 2.1.0 (2023-08-31)
**Features**

Просмотреть файл

@ -8,7 +8,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

19661
NOTICE

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,12 +1,34 @@
# Blobfuse2 - A Microsoft supported Azure Storage FUSE driver
## About
Blobfuse2 is an open source project developed to provide a virtual filesystem backed by the Azure Storage. It uses the libfuse open source library (fuse3) to communicate with the Linux FUSE kernel module, and implements the filesystem operations using the Azure Storage REST APIs.
This is the next generation [blobfuse](https://github.com/Azure/azure-storage-fuse)
This is the next generation [blobfuse](https://github.com/Azure/azure-storage-fuse).
Blobfuse2 is stable, and is ***supported by Microsoft*** provided that it is used within its limits documented here. Blobfuse2 supports both reads and writes however, it does not guarantee continuous sync of data written to storage using other APIs or other mounts of Blobfuse2. For data integrity it is recommended that multiple sources do not modify the same blob/file. Please submit an issue [here](https://github.com/azure/azure-storage-fuse/issues) for any issues/feature requests/questions.
## About Data Consistency and Concurrency
Blobfuse2 is stable and ***supported by Microsoft*** when used within its [documented limits](#un-supported-file-system-operations). Blobfuse2 supports high-performance reads and writes with strong consistency; however, it is recommended that multiple clients do not modify the same blob/file simultaneously to ensure data integrity. Blobfuse2 does not guarantee continuous synchronization of data written to the same blob/file using multiple clients or across multiple mounts of Blobfuse2 concurrently. If you modify an existing blob/file with another client while also reading that object, Blobfuse2 will not return the most up-to-date data. To ensure your reads see the newest blob/file data, disable all forms of caching at kernel (using `direct-io`) as well as at Blobfuse2 level, and then re-open the blob/file.
Please submit an issue [here](https://github.com/azure/azure-storage-fuse/issues) for any issues/feature requests/questions.
[This](#config-guide) section will help you choose the correct config for Blobfuse2.
## NOTICE
- We have seen some customer issues around files getting corrupted when `streaming` is used in write mode. Kindly avoid using this feature for write while we investigate and resolve it.
- Due to known data consistency issues when using Blobfuse2 in `block-cache` mode, it is strongly recommended that all Blobfuse2 installations be upgraded to version 2.3.2. For more information, see [this](https://github.com/Azure/azure-storage-fuse/wiki/Blobfuse2-Known-issues).
- As of version 2.3.0, blobfuse has updated its authentication methods. For Managed Identity, Object-ID based OAuth is solely accessible via CLI-based login, requiring Azure CLI on the system. For a dependency-free option, users may utilize Application/Client-ID or Resource ID based authentication.
- `streaming` mode is being deprecated.
## Limitations in Block Cache
- Concurrent write operations on the same file using multiple handles is not checked for data consistency and may lead to incorrect data being written.
- A read operation on a file that is being written to simultaneously by another process or handle will not return the most up-to-date data.
- When copying files with trailing null bytes using `cp` utility to a Blobfuse2 mounted path, use `--sparse=never` parameter to avoid data being trimmed. For example, `cp --sparse=never src dest`.
- In write operations, data written is persisted (or committed) to the Azure Storage container only when close, sync or flush operations are called by user application.
- Files cannot be modified if they were originally created with block-size different than the one configured.
## Recommendations in Block Cache
- User applications must check the returned code (success/failure) for filesystem calls like read, write, close, flush, etc. If error is returned, the application must abort their respective operation.
- User applications must ensure that there is only one writer at a time for a given file.
- When dealing with very large files (in TiB), the block-size must be configured accordingly. Azure Storage supports only [50,000 blocks](https://learn.microsoft.com/en-us/rest/api/storageservices/put-block-list?tabs=microsoft-entra-id#remarks) per blob.
## Blobfuse2 Benchmarks
[This](https://azure.github.io/azure-storage-fuse/) page lists various benchmarking results for HNS and FNS Storage account.
## Supported Platforms
Visit [this](https://github.com/Azure/azure-storage-fuse/wiki/Blobfuse2-Supported-Platforms) page to see list of supported linux distros.
@ -16,7 +38,7 @@ Visit [this](https://github.com/Azure/azure-storage-fuse/wiki/Blobfuse2-Supporte
- Basic file system operations such as mkdir, opendir, readdir, rmdir, open,
read, create, write, close, unlink, truncate, stat, rename
- Local caching to improve subsequent access times
- Streaming to support reading AND writing large files
- Streaming/Block-Cache to support reading AND writing large files
- Parallel downloads and uploads to improve access time for large files
- Multiple mounts to the same container for read-only workloads
@ -43,7 +65,7 @@ One of the biggest BlobFuse2 features is our brand new health monitor. It allows
- CLI to check or update a parameter in the encrypted config
- Set MD5 sum of a blob while uploading
- Validate MD5 sum on download and fail file open on mismatch
- Large file writing through write streaming
- Large file writing through write streaming/Block-Cache
## Blobfuse2 performance compared to blobfuse(v1.x.x)
- 'git clone' operation is 25% faster (tested with vscode repo cloning)
@ -112,6 +134,8 @@ To learn about a specific command, just include the name of the command (For exa
* `--secure-config=true` : Config file is encrypted suing 'blobfuse2 secure` command.
* `--passphrase=<STRING>` : Passphrase used to encrypt/decrypt config file.
* `--wait-for-mount=<TIMEOUT IN SECONDS>` : Let parent process wait for given timeout before exit to ensure child has started.
* `--block-cache` : To enable block-cache instead of file-cache. This works only when mounted without any config file.
* `--lazy-write` : To enable async close file handle call and schedule the upload in background.
- Attribute cache options
* `--attr-cache-timeout=<TIMEOUT IN SECONDS>`: The timeout for the attribute cache entries.
* `--no-symlinks=true`: To improve performance disable symlink support.
@ -122,10 +146,11 @@ To learn about a specific command, just include the name of the command (For exa
* `--subdirectory=<path>` : Subdirectory to mount instead of entire container.
* `--disable-compression:false` : Disable content encoding negotiation with server. If blobs have 'content-encoding' set to 'gzip' then turn on this flag.
* `--use-adls=false` : Specify configured storage account is HNS enabled or not. This must be turned on when HNS enabled account is mounted.
* `--cpk-enabled=true`: Allows mounting containers with cpk. Use config file or env variables to set cpk encryption key and cpk encryption key sha.
- File cache options
* `--file-cache-timeout=<TIMEOUT IN SECONDS>`: Timeout for which file is cached on local system.
* `--tmp-path=<PATH>`: The path to the file cache.
* `--cache-size-mb=<SIZE IN MB>`: Amount of disk cache that can be used by blobfuse.
* `--cache-size-mb=<SIZE IN MB>`: Amount of disk cache that can be used by blobfuse. Default - 80% of free disk space.
* `--high-disk-threshold=<PERCENTAGE>`: If local cache usage exceeds this, start early eviction of files from cache.
* `--low-disk-threshold=<PERCENTAGE>`: If local cache usage comes below this threshold then stop early eviction.
* `--sync-to-flush=false` : Sync call will force upload a file to storage container if this is set to true, otherwise it just evicts file from local cache.
@ -133,10 +158,12 @@ To learn about a specific command, just include the name of the command (For exa
* `--block-size-mb=<SIZE IN MB>`: Size of a block to be downloaded during streaming.
- Block-Cache options
* `--block-cache-block-size=<SIZE IN MB>`: Size of a block to be downloaded as a unit.
* `--block-cache-pool-size=<SIZE IN MB>`: Size of pool to be used for caching. This limits total memory used by block-cache.
* `--block-cache-pool-size=<SIZE IN MB>`: Size of pool to be used for caching. This limits total memory used by block-cache. Default - 80% of free memory available.
* `--block-cache-path=<PATH>`: Path where downloaded blocks will be persisted. Not providing this parameter will disable the disk caching.
* `--block-cache-disk-size=<SIZE IN MB>`: Disk space to be used for caching.
* `--block-cache-prefetch=<Number of blocks>`: Number of blocks to prefetch at max when sequential reads are in progress.
* `--block-cache-disk-size=<SIZE IN MB>`: Disk space to be used for caching. Default - 80% of free disk space.
* `--block-cache-disk-timeout=<seconds>`: Timeout for which disk cache is valid.
* `--block-cache-prefetch=<Number of blocks>`: Number of blocks to prefetch at max when sequential reads are in progress. Default - 2 times number of CPU cores.
* `--block-cache-parallelism=<count>`: Number of parallel threads doing upload/download operation. Default - 3 times number of CPU cores.
* `--block-cache-prefetch-on-open=true`: Start prefetching on open system call instead of waiting for first read. Enhances perf if file is read sequentially from offset 0.
- Fuse options
* `--attr-timeout=<TIMEOUT IN SECONDS>`: Time the kernel can cache inode attributes.
@ -173,20 +200,39 @@ To learn about a specific command, just include the name of the command (For exa
- Proxy Server:
* `http_proxy`: The proxy server address. Example: `10.1.22.4:8080`.
* `https_proxy`: The proxy server address when https is turned off forcing http. Example: `10.1.22.4:8080`.
- CPK options:
* `AZURE_STORAGE_CPK_ENCRYPTION_KEY`: Customer provided base64-encoded AES-256 encryption key value.
* `AZURE_STORAGE_CPK_ENCRYPTION_KEY_SHA256`: Base64-encoded SHA256 of the cpk encryption key.
## Config file
- See [this](./sampleFileCacheConfig.yaml) sample config file.
- See [this](./setup/baseConfig.yaml) config file for a list and description of all possible configurable options in blobfuse2.
***Please note: do not use quotations `""` for any of the config parameters***
## Choosing Between File Cache and Stream Modes
Please refer to this diagram to decide on whether to use the file cache or streaming. Sample config file URLs are below the diagram.
![alt text](./config_decision_tree.png?raw=true "File Cache vs. Streaming")
## Config Guide
Below diagrams guide you to choose right configuration for your workloads.
- Choose right Auth mode
<br/><br/>
![alt text](./guide/AuthModeHelper.png?raw=true "Auth Mode Selection Guide")
<br/><br/>
- Choose right caching for Read-Only workloads
<br/><br/>
![alt text](./guide/CacheModeForReadOnlyWorkloads.png?raw=true "Cache Mode Selection Guide For Read-Only Workloads")
<br/><br/>
- Choose right caching for Read-Write workloads
<br/><br/>
![alt text](./guide/CacheModeForReadWriteWorkloads.png?raw=true "Cache Mode Selection Guide For Read-Only Workloads")
<br/><br/>
- Choose right block-cache configuration
<br/><br/>
![alt text](./guide/BlockCacheConfig.png?raw=true "Block-Cache Configuration")
<br/><br/>
- Choose right file-cache configuration
<br/><br/>
![alt text](./guide/FileCacheConfig.png?raw=true "Block-Cache Configuration")
<br/><br/>
- [Sample File Cache Config](./sampleFileCacheConfig.yaml)
- [Sample Block-Cache Config](./sampleBlockCacheConfig.yaml)
- [Sample Stream Config](./sampleStreamingConfig.yaml)
- [All Config options](./setup/baseConfig.yaml)
## Frequently Asked Questions
- How do I generate a SAS with permissions for rename?
@ -221,7 +267,10 @@ If your use-case involves updating/uploading file(s) through other means and you
- When Blobfuse2 is mounted on a container, SYS_ADMIN privileges are required for it to interact with the fuse driver. If container is created without the privilege, mount will fail. Sample command to spawn a docker container is
`docker run -it --rm --cap-add=SYS_ADMIN --device=/dev/fuse --security-opt apparmor:unconfined <environment variables> <docker image>`
- In case of `mount all` system may limit on number of containers you can mount in parallel (when you go above 100 containers). To increase this system limit use below command
`echo 256 | sudo tee /proc/sys/fs/inotify/max_user_instances`
- Refer [this](#limitations-in-block-cache) for block-cache limitations.
### Syslog security warning
By default, Blobfuse2 will log to syslog. The default settings will, in some cases, log relevant file paths to syslog.
If this is sensitive information, turn off logging or set log-level to LOG_ERR.

25
TSG.md
Просмотреть файл

@ -3,13 +3,6 @@
Please ensure logging is turned on DEBUG mode when trying to reproduce an issue.
This can help in many instances to understand what the underlying issue is.
A useful setting in your configuration file to utilize when debugging is `sdk-trace: true` under the azstorage component. This will log all outgoing REST calls.
# BlobFuse2 Health Monitor
One of the biggest BlobFuse2 features is our brand new health monitor. It allows customers gain more insight into how their BlobFuse2 instance is behaving with the rest of their machine. Visit [here](https://github.com/Azure/azure-storage-fuse/blob/main/tools/health-monitor/README.md) to set it up.
# Common Mount Problems
**1. Error: fusermount: failed to open /etc/fuse.conf: Permission denied**
@ -65,7 +58,7 @@ The Blobfuse2 config file should specify the accountName as the original Storage
If the config file is correct, please verify name resolution
dig +short myblobstorageaccount.blob.core.windows.net should return a private Ip For eg : 10.0.0.5 or so.
If for some reason the translation/name resolution fails please confirm the VNet settings to ensure that it is forwarding DNS translation requests to Azure Provided DNS 168.63.129.16. In case the Blobfuse2 hosting VM is set up to forward to a Custom DNS Server, the Custom DNS settings should be verified, it should forward DNS requests to the Azure Provided DNS 168.63.129.16.
If for some reason the translation/name resolution fails please confirm the VNet settings to ensure that it is forwarding DNS translation requests to Azure Provided DNS 168.63.129.16. In case the Blobfuse2 hosting VM is set up to forward to a Custom DNS Server, the Custom DNS settings should be verified, it should forward DNS requests to the Azure Provided DNS 168.63.129.16.
Here are few steps to resolve DNS issues when integrating private endpoint with Azure Private DNS:
@ -107,6 +100,22 @@ For HNS account, always add `type: adls` under `azstorage` section in your confi
To create a private-endpoint for DFS in Azure portal: Go to your storage account -> Networking -> Private Endpoint connections. Click `+ Private endpoint`, fill in Subscription, Resource Group, Name, Network Interface Name and Region. Click next and under Target sub-resource select `dfs`. Click Virtual network and select virtual network and Subnet. Click DNS. Select Yes for Integrate with private DNS. Select the Subscription and Resource Group for your private link DNS. Select Next, Next and select Create.
**11. Failed to initialize new pipeline [config error in azstorage [account name not provided]]**
Make sure the configuration file has `azstorage` section in your config file.
The [Blobfuse2 base configuration file](https://github.com/Azure/azure-storage-fuse/blob/main/setup/baseConfig.yaml) contains a list of all settings and a brief explanation of each setting. Use the [sample file cache configuration file](https://github.com/Azure/azure-storage-fuse/blob/main/sampleFileCacheConfig.yaml) or the [sample block cache configuration file](https://github.com/Azure/azure-storage-fuse/blob/main/sampleBlockCacheConfig.yaml) to get started quickly by using some basic settings for each of those scenarios.
**12. Failed to mount in proxy setup [proxyconnect tcp: dial tcp: lookup : no such host]**
Make sure to set the proxy URL in the environment variable `https_proxy` or `http_proxy` and that it is accessible to Blobfuse2 process. If using private endpoint make sure that,
- It is pointing to the `endpoint` in `azstorage` section in config.
- Or, have a DNS resolution where `account.blob.core.windows.net` can be resolved back to the private endpoint. In case of HNS account, make sure to have the private endpoint configured for both blob and dfs accounts.
# Blobfuse2 Health Monitor
One of the Blobfuse2 features is health monitor. It allows customers gain more insight into how their Blobfuse2 instance is behaving with the rest of their machine. Visit [here](https://github.com/Azure/azure-storage-fuse/blob/main/tools/health-monitor/README.md) to set it up. Please note that this feature is currently in preview.
# Common Problems after a Successful Mount
**1. Errno 24: Failed to open file /mnt/tmp/root/filex in file cache. errno = 24 OR Too many files Open error**
Errno 24 in Linux corresponds to 'Too many files open' error which can occur when an application opens more files than it is allowed on the system. Blobfuse2 typically allows 20 files less than the ulimit value set in Linux. Usually the Linux limit is 1024 per process (e.g. Blobfuse2 in this case will allow 1004 open file descriptors at a time). Recommended approach is to edit the /etc/security/limits.conf in Ubuntu and add these two lines,

Просмотреть файл

@ -38,6 +38,17 @@ steps:
sudo apt-get install ${{ parameters.fuselib }} gcc -y
displayName: 'Install libfuse'
# Install azcli
- script: |
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
az --version
displayName: 'Install Azure CLI'
# azcli login
- script: |
az login --identity --username $(ACLI_BLOBFUSE_MSI_APP_ID)
displayName: 'Azure CLI login'
- task: Go@0
inputs:
command: 'build'
@ -63,10 +74,8 @@ steps:
echo "\"msi-appid\"": "\"$(AZTEST_APP_ID)\"", >> $cnfFile
echo "\"msi-resid\"": "\"$(AZTEST_RES_ID)\"", >> $cnfFile
echo "\"msi-objid\"": "\"$(AZTEST_OBJ_ID)\"", >> $cnfFile
echo "\"spn-client\"": "\"$(AZTEST_CLIENT)\"", >> $cnfFile
echo "\"spn-tenant\"": "\"$(AZTEST_TENANT)\"", >> $cnfFile
echo "\"spn-secret\"": "\"$(AZTEST_SECRET)\"", >> $cnfFile
echo "\"skip-msi\"": "${{ parameters.skip_msi }}", >> $cnfFile
echo "\"skip-azcli\"": "false", >> $cnfFile
echo "\"proxy-address\"": "\"${{ parameters.proxy_address }}\"" >> $cnfFile
echo "}" >> $cnfFile
cat $cnfFile
@ -78,7 +87,7 @@ steps:
- script: |
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin
$(go env GOPATH)/bin/golangci-lint --version
$(go env GOPATH)/bin/golangci-lint run --tests=false --build-tags ${{ parameters.tags }} --skip-dirs test,common/stats_collector,common/stats_monitor --max-issues-per-linter=0 --skip-files component/libfuse/libfuse2_handler_test_wrapper.go,component/libfuse/libfuse_handler_test_wrapper.go > lint.log
$(go env GOPATH)/bin/golangci-lint run --tests=false --build-tags ${{ parameters.tags }} --exclude-dirs test,common/stats_collector,common/stats_monitor --max-issues-per-linter=0 --exclude-files component/libfuse/libfuse2_handler_test_wrapper.go,component/libfuse/libfuse_handler_test_wrapper.go > lint.log
result=$(cat lint.log | wc -l)
if [ $result -ne 0 ]; then
echo "-----------------------------------"

Просмотреть файл

@ -0,0 +1,67 @@
parameters:
- name: conf_template
type: string
- name: config_file
type: string
- name: container
type: string
- name: temp_dir
type: string
- name: mount_dir
type: string
- name: idstring
type: string
- name: adls
type: boolean
- name: account_name
type: string
- name: account_key
type: string
- name: account_type
type: string
- name: account_endpoint
- name: distro_name
type: string
- name: quick_test
type: boolean
default: true
- name: verbose_log
type: boolean
default: false
- name: clone
type: boolean
default: false
- name: kversion
type: string
steps:
- template: 'mount.yml'
parameters:
working_dir: $(WORK_DIR)
mount_dir: ${{ parameters.mount_dir }}
temp_dir: ${{ parameters.temp_dir }}
prefix: ${{ parameters.idstring }}
mountStep:
script: |
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR)
- script: |
cd ${{ parameters.mount_dir }}
wget https://cdn.kernel.org/pub/linux/kernel/v6.x/linux-${{ parameters.kversion }}.tar.xz
displayName: 'Get kernel tarfile'
- script: |
tar -xvf {{ parameters.mount_dir }}/linux-${{ parameters.kversion }}.tar.xz
displayName: 'Untar kernel'
- script: |
cd {{ parameters.mount_dir }}/linux-${{ parameters.kversion }}
make defconfig
make
displayName: 'Run MAKE on the kernel'
- template: 'cleanup.yml'
parameters:
working_dir: $(WORK_DIR)
mount_dir: ${{ parameters.mount_dir }}
temp_dir: ${{ parameters.temp_dir }}

Просмотреть файл

@ -80,10 +80,8 @@ steps:
echo "\"msi-appid\"": "\"$(AZTEST_APP_ID)\"", >> $cnfFile
echo "\"msi-resid\"": "\"$(AZTEST_RES_ID)\"", >> $cnfFile
echo "\"msi-objid\"": "\"$(AZTEST_OBJ_ID)\"", >> $cnfFile
echo "\"spn-client\"": "\"$(AZTEST_CLIENT)\"", >> $cnfFile
echo "\"spn-tenant\"": "\"$(AZTEST_TENANT)\"", >> $cnfFile
echo "\"spn-secret\"": "\"$(AZTEST_SECRET)\"", >> $cnfFile
echo "\"skip-msi\"": "true", >> $cnfFile
echo "\"skip-azcli\"": "true", >> $cnfFile
echo "\"proxy-address\"": "\"\"" >> $cnfFile
echo "}" >> $cnfFile

Просмотреть файл

@ -17,6 +17,9 @@ parameters:
- name: skip_msi
type: string
default: "true"
- name: skip_azcli
type: string
default: "true"
- name: proxy_address
type: string
default: ""
@ -35,6 +38,7 @@ steps:
- script: |
sudo apt-get update --fix-missing
sudo apt-get install ${{ parameters.fuselib }} -y
fusermount -V
displayName: 'Libfuse Setup'
condition: eq('${{ parameters.hostedAgent }}', true)
@ -120,10 +124,8 @@ steps:
echo "\"msi-appid\"": "\"$(AZTEST_APP_ID)\"", >> $cnfFile
echo "\"msi-resid\"": "\"$(AZTEST_RES_ID)\"", >> $cnfFile
echo "\"msi-objid\"": "\"$(AZTEST_OBJ_ID)\"", >> $cnfFile
echo "\"spn-client\"": "\"$(AZTEST_CLIENT)\"", >> $cnfFile
echo "\"spn-tenant\"": "\"$(AZTEST_TENANT)\"", >> $cnfFile
echo "\"spn-secret\"": "\"$(AZTEST_SECRET)\"", >> $cnfFile
echo "\"skip-msi\"": "${{ parameters.skip_msi }}", >> $cnfFile
echo "\"skip-azcli\"": "${{ parameters.skip_azcli }}", >> $cnfFile
echo "\"proxy-address\"": "\"${{ parameters.proxy_address }}\"" >> $cnfFile
echo "}" >> $cnfFile
@ -132,6 +134,19 @@ steps:
continueOnError: false
workingDirectory: ${{ parameters.working_directory }}
# Install azcli
- script: |
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
az --version
displayName: 'Install Azure CLI'
condition: eq('${{ parameters.skip_azcli }}', 'false')
# azcli login
- script: |
az login --identity --username $(ACLI_BLOBFUSE_MSI_APP_ID)
displayName: 'Azure CLI login'
condition: eq('${{ parameters.skip_azcli }}', 'false')
# Running unit tests
- task: Go@0
condition: eq('${{ parameters.skip_ut }}', 'false')

Просмотреть файл

@ -7,6 +7,11 @@ parameters:
type: string
steps:
- script: |
ps -ef | grep blobfuse2
df -h
displayName: 'Check process info'
- script: |
sudo fusermount -u ${mount_dir}
sudo fusermount3 -u ${mount_dir}

Просмотреть файл

@ -42,8 +42,6 @@ parameters:
default: false
steps:
- checkout: none
# Get the host details on which these test are running
- script: |
echo $(Description)
@ -52,6 +50,9 @@ steps:
# Create directory structure and prepare to mount
- ${{ parameters.installStep }}
- checkout: none
- script: |
sudo rm -rf $(ROOT_DIR)
sudo mkdir -p $(ROOT_DIR)
@ -101,7 +102,6 @@ steps:
quick_test: ${{ parameters.quick_test }}
verbose_log: ${{ parameters.verbose_log }}
clone: ${{ parameters.clone }}
stream_direct_test: false
# TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable
temp_dir: $(TEMP_DIR)
mount_dir: $(MOUNT_DIR)
@ -122,7 +122,6 @@ steps:
quick_test: ${{ parameters.quick_test }}
verbose_log: ${{ parameters.verbose_log }}
clone: ${{ parameters.clone }}
stream_direct_test: false
# TODO: These can be removed one day and replace all instances of ${{ parameters.temp_dir }} with $(TEMP_DIR) since it is a global variable
temp_dir: $(TEMP_DIR)
mount_dir: $(MOUNT_DIR)

Просмотреть файл

@ -0,0 +1,212 @@
parameters:
- name: conf_template
type: string
- name: config_file
type: string
- name: container
type: string
- name: temp_dir
type: string
- name: mount_dir
type: string
- name: idstring
type: string
- name: adls
type: boolean
- name: account_name
type: string
- name: account_key
type: string
- name: account_type
type: string
- name: account_endpoint
- name: distro_name
type: string
- name: quick_test
type: boolean
default: true
- name: verbose_log
type: boolean
default: false
- name: clone
type: boolean
default: false
- name: block_size_mb
type: string
default: "8"
steps:
- script: |
$(WORK_DIR)/blobfuse2 gen-test-config --config-file=$(WORK_DIR)/testdata/config/azure_key.yaml --container-name=${{ parameters.container }} --temp-path=${{ parameters.temp_dir }} --output-file=${{ parameters.config_file }}
displayName: 'Create Config File for RW mount'
env:
NIGHTLY_STO_ACC_NAME: ${{ parameters.account_name }}
NIGHTLY_STO_ACC_KEY: ${{ parameters.account_key }}
ACCOUNT_TYPE: ${{ parameters.account_type }}
ACCOUNT_ENDPOINT: ${{ parameters.account_endpoint }}
VERBOSE_LOG: ${{ parameters.verbose_log }}
continueOnError: false
- script: |
cat ${{ parameters.config_file }}
displayName: 'Print config file'
- template: 'mount.yml'
parameters:
working_dir: $(WORK_DIR)
mount_dir: ${{ parameters.mount_dir }}
temp_dir: ${{ parameters.temp_dir }}
prefix: ${{ parameters.idstring }}
mountStep:
script: |
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR) --file-cache-timeout=3200
- script: |
for i in $(seq 1 10); do echo $(shuf -i 0-4294967296 -n 1); done | parallel --will-cite -j 5 'head -c {} < /dev/urandom > ${{ parameters.mount_dir }}/datafiles_{}'
for i in {1,2,3,4,5,6,7,8,9,10,20,30,50,100,200}; do echo $i; done | parallel --will-cite -j 5 'head -c {}M < /dev/urandom > ${{ parameters.mount_dir }}/mixedfiles_{}.txt'
for i in {1,2,3,4,5,6,7,8,9,10,20,30,50,100,200}; do echo $i; done | parallel --will-cite -j 5 'head -c {}M < /dev/urandom > ${{ parameters.mount_dir }}/mixedfiles_{}.png'
cd ${{ parameters.mount_dir }}
python3 $(WORK_DIR)/testdata/scripts/generate-parquet-files.py
ls -l ${{ parameters.mount_dir }}/mixedfiles_*
ls -l ${{ parameters.mount_dir }}/datafiles_*
displayName: 'Generate data with File-Cache'
- script: |
md5sum ${{ parameters.mount_dir }}/datafiles_* > $(WORK_DIR)/md5sum_original_files.txt
md5sum ${{ parameters.mount_dir }}/mixedfiles_* >> $(WORK_DIR)/md5sum_original_files.txt
displayName: 'Generate md5Sum with File-Cache'
- script: |
echo "----------------------------------------------"
ls -l ${{ parameters.mount_dir }}
displayName: 'Print contents of File-Cache'
- script: |
$(WORK_DIR)/blobfuse2 unmount all
displayName: 'Unmount RW mount'
- script: |
cd $(WORK_DIR)
$(WORK_DIR)/blobfuse2 gen-test-config --config-file=$(WORK_DIR)/testdata/config/azure_key_bc.yaml --container-name=${{ parameters.container }} --temp-path=${{ parameters.temp_dir }} --output-file=${{ parameters.config_file }}
displayName: 'Create Config File for RO mount'
env:
NIGHTLY_STO_ACC_NAME: ${{ parameters.account_name }}
NIGHTLY_STO_ACC_KEY: ${{ parameters.account_key }}
ACCOUNT_TYPE: ${{ parameters.account_type }}
ACCOUNT_ENDPOINT: ${{ parameters.account_endpoint }}
VERBOSE_LOG: ${{ parameters.verbose_log }}
continueOnError: false
- template: 'mount.yml'
parameters:
working_dir: $(WORK_DIR)
mount_dir: ${{ parameters.mount_dir }}
temp_dir: ${{ parameters.temp_dir }}
prefix: ${{ parameters.idstring }}
ro_mount: true
mountStep:
script: |
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR) -o ro --block-cache-block-size ${{ parameters.block_size_mb }}
- script: |
echo "----------------------------------------------"
ls -l ${{ parameters.mount_dir }}/datafiles*
ls -l ${{ parameters.mount_dir }}/mixedfiles*
displayName: 'Print contents of Block-Cache'
- script: |
md5sum ${{ parameters.mount_dir }}/datafiles_* > $(WORK_DIR)/md5sum_block_cache.txt
md5sum ${{ parameters.mount_dir }}/mixedfiles_* >> $(WORK_DIR)/md5sum_block_cache.txt
displayName: 'Generate md5Sum with Block-Cache'
- script: |
$(WORK_DIR)/blobfuse2 unmount all
displayName: 'Unmount RW mount'
- script: |
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_original_files.txt
cat $(WORK_DIR)/md5sum_original_files.txt | cut -d " " -f1 > $(WORK_DIR)/temp.txt && mv $(WORK_DIR)/temp.txt $(WORK_DIR)/md5sum_original_files.txt
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_block_cache.txt
cat $(WORK_DIR)/md5sum_block_cache.txt | cut -d " " -f1 > $(WORK_DIR)/temp.txt && mv $(WORK_DIR)/temp.txt $(WORK_DIR)/md5sum_block_cache.txt
echo "----------------------------------------------"
diff $(WORK_DIR)/md5sum_original_files.txt $(WORK_DIR)/md5sum_block_cache.txt
if [ $? -ne 0 ]; then
exit 1
fi
displayName: 'Compare md5Sum'
- script: |
cd $(WORK_DIR)
$(WORK_DIR)/blobfuse2 gen-test-config --config-file=$(WORK_DIR)/testdata/config/azure_key_bc.yaml --container-name=${{ parameters.container }} --temp-path=${{ parameters.temp_dir }} --output-file=${{ parameters.config_file }}
displayName: 'Create Config File for RO mount with direct-io and disk-cache enabled'
env:
NIGHTLY_STO_ACC_NAME: ${{ parameters.account_name }}
NIGHTLY_STO_ACC_KEY: ${{ parameters.account_key }}
ACCOUNT_TYPE: ${{ parameters.account_type }}
ACCOUNT_ENDPOINT: ${{ parameters.account_endpoint }}
VERBOSE_LOG: ${{ parameters.verbose_log }}
continueOnError: false
- template: 'mount.yml'
parameters:
working_dir: $(WORK_DIR)
mount_dir: ${{ parameters.mount_dir }}
temp_dir: ${{ parameters.temp_dir }}
prefix: ${{ parameters.idstring }}
ro_mount: true
mountStep:
script: |
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR) -o ro -o direct_io --block-cache-path block_cache --block-cache-block-size ${{ parameters.block_size_mb }}
- script: |
echo "----------------------------------------------"
ls -l ${{ parameters.mount_dir }}
displayName: 'Print contents of Block-Cache'
- script: |
md5sum ${{ parameters.mount_dir }}/datafiles_* > $(WORK_DIR)/md5sum_block_cache_direct_io.txt
md5sum ${{ parameters.mount_dir }}/mixedfiles_* >> $(WORK_DIR)/md5sum_block_cache_direct_io.txt
displayName: 'Generate md5Sum with Block-Cache Direct-IO'
- script: |
md5sum ${{ parameters.mount_dir }}/datafiles_* > $(WORK_DIR)/md5sum_block_cache_disk_cache.txt
md5sum ${{ parameters.mount_dir }}/mixedfiles_* >> $(WORK_DIR)/md5sum_block_cache_disk_cache.txt
displayName: 'Generate md5Sum with Block-Cache Disk-Cache'
- script: |
$(WORK_DIR)/blobfuse2 unmount all
displayName: 'Unmount RW mount'
- script: |
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_original_files.txt
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_block_cache_direct_io.txt | cut -d " " -f1 > $(WORK_DIR)/temp.txt && mv $(WORK_DIR)/temp.txt $(WORK_DIR)/md5sum_block_cache_direct_io.txt
cat $(WORK_DIR)/md5sum_block_cache_direct_io.txt
echo "----------------------------------------------"
diff $(WORK_DIR)/md5sum_original_files.txt $(WORK_DIR)/md5sum_block_cache_direct_io.txt
if [ $? -ne 0 ]; then
exit 1
fi
displayName: 'Compare md5Sum with Block-Cache Direct-IO'
- script: |
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_original_files.txt
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_block_cache_disk_cache.txt | cut -d " " -f1 > $(WORK_DIR)/temp.txt && mv $(WORK_DIR)/temp.txt $(WORK_DIR)/md5sum_block_cache_disk_cache.txt
cat $(WORK_DIR)/md5sum_block_cache_disk_cache.txt
echo "----------------------------------------------"
diff $(WORK_DIR)/md5sum_original_files.txt $(WORK_DIR)/md5sum_block_cache_disk_cache.txt
if [ $? -ne 0 ]; then
exit 1
fi
displayName: 'Compare md5Sum with Block-Cache Disk-Cache'
- template: 'cleanup.yml'
parameters:
working_dir: $(WORK_DIR)
mount_dir: ${{ parameters.mount_dir }}
temp_dir: ${{ parameters.temp_dir }}

Просмотреть файл

@ -25,15 +25,15 @@ parameters:
- name: quick_test
type: boolean
default: true
- name: mnt_flags
type: string
default: ""
- name: verbose_log
type: boolean
default: false
- name: clone
type: boolean
default: false
- name: stream_direct_test
type: boolean
default: false
steps:
- script: |
@ -51,6 +51,7 @@ steps:
cat ${{ parameters.config_file }}
displayName: 'Print config file'
# run below step only if direct_io is false
- template: 'mount.yml'
parameters:
working_dir: $(WORK_DIR)
@ -59,11 +60,11 @@ steps:
prefix: ${{ parameters.idstring }}
mountStep:
script: |
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR) --file-cache-timeout=3200
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR) --file-cache-timeout=3200 ${{ parameters.mnt_flags }}
- script: |
for i in {1,2,3,4,5,6,7,8,9,10,20,30,50,100,200,1024,2048,4096}; do echo $i; done | parallel --will-cite -j 5 'head -c {}M < /dev/urandom > ${{ parameters.mount_dir }}/myfile_{}'
ls -l ${{ parameters.mount_dir }}/myfile_*
ls -lh ${{ parameters.mount_dir }}/myfile_*
displayName: 'Generate data'
- script: |
@ -85,6 +86,11 @@ steps:
VERBOSE_LOG: ${{ parameters.verbose_log }}
continueOnError: false
- script:
cat ${{ parameters.config_file }}
displayName: 'Print block cache config file'
- template: 'mount.yml'
parameters:
working_dir: $(WORK_DIR)
@ -94,7 +100,7 @@ steps:
ro_mount: true
mountStep:
script: |
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR) -o ro
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR) -o ro ${{ parameters.mnt_flags }}
- script: |
md5sum ${{ parameters.mount_dir }}/myfile_* > $(WORK_DIR)/md5sum_block_cache.txt
@ -105,6 +111,11 @@ steps:
displayName: 'Unmount RO mount'
- script: |
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_block_cache.txt
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_file_cache.txt
echo "----------------------------------------------"
diff $(WORK_DIR)/md5sum_block_cache.txt $(WORK_DIR)/md5sum_file_cache.txt
if [ $? -ne 0 ]; then
exit 1
@ -120,30 +131,135 @@ steps:
ro_mount: true
mountStep:
script: |
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR)
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR) ${{ parameters.mnt_flags }}
- script: |
for i in {1,2,3,4,5,6,7,8,9,10,20,30,50,100,200,1024,2048,4096}; do echo $i; done | parallel --will-cite -j 5 'cp ${{ parameters.mount_dir }}/myfile_{} ${{ parameters.mount_dir }}/myfileCopy_{}'
md5sum ${{ parameters.mount_dir }}/myfileCopy_* > $(WORK_DIR)/md5sum_block_cache_write.txt
ls -lh ${{ parameters.mount_dir }}/myfile*
displayName: 'Copy files using block-cache'
- script: |
rm -rf cp ${{ parameters.mount_dir }}/myfile*
displayName: 'Copy files using block-cache'
rm -rf ${{ parameters.mount_dir }}/myfile*
displayName: 'Clear files using block-cache'
- script: |
$(WORK_DIR)/blobfuse2 unmount all
displayName: 'Unmount RW mount'
- script: |
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_block_cache_write.txt
cat $(WORK_DIR)/md5sum_block_cache_write.txt | cut -d " " -f1 > $(WORK_DIR)/md5sum_block_cache_write.txt1
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_file_cache.txt
cat $(WORK_DIR)/md5sum_file_cache.txt | cut -d " " -f1 > $(WORK_DIR)/md5sum_file_cache.txt1
echo "----------------------------------------------"
diff $(WORK_DIR)/md5sum_block_cache_write.txt1 $(WORK_DIR)/md5sum_file_cache.txt1
if [ $? -ne 0 ]; then
exit 1
fi
displayName: 'Compare md5Sum'
- template: 'mount.yml'
parameters:
working_dir: $(WORK_DIR)
mount_dir: ${{ parameters.mount_dir }}
temp_dir: ${{ parameters.temp_dir }}
prefix: ${{ parameters.idstring }}
ro_mount: true
mountStep:
script: |
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR) ${{ parameters.mnt_flags }}
- script: |
rm -rf $(WORK_DIR)/localfile*
for i in {1,2,3,4,5,6,7,8,9,10,20,30,50,100,200,1024,2048,4096}; do echo $i; done | parallel --will-cite -j 5 'head -c {}M < /dev/urandom > $(WORK_DIR)/localfile{}'
displayName: 'Generate local files'
- script: |
rm -rf ${{ parameters.mount_dir }}/remotefile*
for i in {1,2,3,4,5,6,7,8,9,10,20,30,50,100,200,1024,2048,4096}; do echo $i; done | parallel --will-cite -j 5 'cp $(WORK_DIR)/localfile{} ${{ parameters.mount_dir }}/remotefile{}'
displayName: 'Upload local files'
- script: |
md5sum $(WORK_DIR)/localfile* > $(WORK_DIR)/md5sum_local_modified.txt
md5sum ${{ parameters.mount_dir }}/remotefile* > $(WORK_DIR)/md5sum_remote_modified.txt
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_local_modified.txt
cat $(WORK_DIR)/md5sum_local_modified.txt | cut -d " " -f1 > $(WORK_DIR)/md5sum_local_modified.txt1
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_remote_modified.txt
cat $(WORK_DIR)/md5sum_remote_modified.txt | cut -d " " -f1 > $(WORK_DIR)/md5sum_remote_modified.txt1
echo "----------------------------------------------"
diff $(WORK_DIR)/md5sum_local_modified.txt1 $(WORK_DIR)/md5sum_remote_modified.txt1
if [ $? -ne 0 ]; then
exit 1
fi
head -c 13M < /dev/urandom > $(WORK_DIR)/additionaldata.data
displayName: 'Compare MD5 before modification'
- script: |
for i in {1,2,3,4,5,6,7,8,9,10,20,30,50,100,200,1024,2048,4096}; do echo $i; done | parallel --will-cite -j 5 'cat $(WORK_DIR)/additionaldata.data >> $(WORK_DIR)/localfile{}'
ls -lh $(WORK_DIR)/localfile*
displayName: 'Modify local files'
- script: |
for i in {1,2,3,4,5,6,7,8,9,10,20,30,50,100,200,1024,2048,4096}; do echo $i; done | parallel --will-cite -j 5 'cat $(WORK_DIR)/additionaldata.data >> ${{ parameters.mount_dir }}/remotefile{}'
ls -lh ${{ parameters.mount_dir }}/remotefile*
displayName: 'Modify remote files'
- script: |
$(WORK_DIR)/blobfuse2 unmount all
displayName: 'Unmount RW mount'
- template: 'mount.yml'
parameters:
working_dir: $(WORK_DIR)
mount_dir: ${{ parameters.mount_dir }}
temp_dir: ${{ parameters.temp_dir }}
prefix: ${{ parameters.idstring }}
ro_mount: true
mountStep:
script: |
$(WORK_DIR)/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config_file }} --default-working-dir=$(WORK_DIR) ${{ parameters.mnt_flags }}
- script: |
md5sum $(WORK_DIR)/localfile* > $(WORK_DIR)/md5sum_local_modified.txt
md5sum ${{ parameters.mount_dir }}/remotefile* > $(WORK_DIR)/md5sum_remote_modified.txt
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_local_modified.txt
cat $(WORK_DIR)/md5sum_local_modified.txt | cut -d " " -f1 > $(WORK_DIR)/md5sum_local_modified.txt1
echo "----------------------------------------------"
cat $(WORK_DIR)/md5sum_remote_modified.txt
cat $(WORK_DIR)/md5sum_remote_modified.txt | cut -d " " -f1 > $(WORK_DIR)/md5sum_remote_modified.txt1
echo "----------------------------------------------"
diff $(WORK_DIR)/md5sum_local_modified.txt1 $(WORK_DIR)/md5sum_remote_modified.txt1
if [ $? -ne 0 ]; then
exit 1
fi
displayName: 'Compare MD5 of modified files'
- script: |
rm -rf $(WORK_DIR)/localfile*
rm -rf ${{ parameters.mount_dir }}/myfile*
displayName: 'Copy files using block-cache'
- script: |
$(WORK_DIR)/blobfuse2 unmount all
displayName: 'Unmount RW mount'
- task: PublishBuildArtifacts@1
inputs:
pathToPublish: blobfuse2-logs.txt
artifactName: 'blobfuse_block_cache.txt'
condition: failed()
- script: |
tail -n 200 blobfuse2-logs.txt
displayName: 'View Logs'
condition: failed()
- template: 'cleanup.yml'
parameters:
working_dir: $(WORK_DIR)

Просмотреть файл

@ -31,9 +31,6 @@ parameters:
- name: clone
type: boolean
default: false
- name: stream_direct_test
type: boolean
default: false
- name: enable_symlink_adls
type: boolean
default: false
@ -67,7 +64,6 @@ steps:
artifact_name: '${{ parameters.distro_name }}_${{ parameters.idstring }}.txt'
verbose_log: ${{ parameters.verbose_log }}
clone: ${{ parameters.clone }}
stream_direct_test: ${{ parameters.stream_direct_test }}
enable_symlink_adls: ${{ parameters.enable_symlink_adls }}
mountStep:
script: |

Просмотреть файл

@ -19,9 +19,6 @@ parameters:
- name: quick_test
type: boolean
default: true
- name: stream_direct_test
type: boolean
default: false
- name: enable_symlink_adls
type: boolean
default: false
@ -69,7 +66,7 @@ steps:
- task: Go@0
inputs:
command: 'test'
arguments: '-v -timeout=2h ./... -args -mnt-path=${{ parameters.mount_dir }} -adls=${{parameters.adls}} -clone=${{parameters.clone}} -tmp-path=${{parameters.temp_dir}} -quick-test=${{parameters.quick_test}} -stream-direct-test=${{parameters.stream_direct_test}} -enable-symlink-adls=${{parameters.enable_symlink_adls}} -distro-name="${{parameters.distro_name}}"'
arguments: '-v -timeout=2h ./... -args -mnt-path=${{ parameters.mount_dir }} -adls=${{parameters.adls}} -clone=${{parameters.clone}} -tmp-path=${{parameters.temp_dir}} -quick-test=${{parameters.quick_test}} -enable-symlink-adls=${{parameters.enable_symlink_adls}} -distro-name="${{parameters.distro_name}}"'
workingDirectory: ${{ parameters.working_dir }}/test/e2e_tests
displayName: 'E2E Test: ${{ parameters.idstring }}'
timeoutInMinutes: 120

Просмотреть файл

@ -12,7 +12,7 @@ parameters:
default: 'Test'
- name: ro_mount
type: boolean
default: false
default: false
steps:

Просмотреть файл

@ -6,6 +6,8 @@ parameters:
default: "null"
steps:
# Create directory structure and prepare to mount
- ${{ parameters.installStep }}
- checkout: none
# Get the host details on which these test are running
@ -14,8 +16,6 @@ steps:
hostnamectl
displayName: 'Print Agent Info'
# Create directory structure and prepare to mount
- ${{ parameters.installStep }}
- script: |
sudo rm -rf $(ROOT_DIR)
sudo mkdir -p $(ROOT_DIR)

Просмотреть файл

@ -5,14 +5,10 @@ parameters:
type: string
- name: account_endpoint
type: string
- name: spn_account_endpoint
type: string
- name: adls
type: boolean
- name: account_name
type: string
- name: spn_account_name
type: string
- name: account_key
type: string
- name: account_sas
@ -43,20 +39,14 @@ parameters:
type: boolean
- name: test_sas_credential
type: boolean
- name: test_spn_credential
- name: test_azcli_credential
type: boolean
- name: test_stream
type: boolean
- name: stream_config
type: string
- name: stream_filename_config
type: string
- name: test_azurite
type: boolean
default: false
- name: sas_credential_config
type: string
- name: spn_credential_config
- name: azcli_credential_config
type: string
- name: azurite_config
type: string
@ -92,42 +82,6 @@ steps:
displayName: Print config file
condition: ${{ parameters.test_key_credential }}
# Stream e2e
- script: |
cd ${{ parameters.working_dir }}
${{ parameters.working_dir }}/blobfuse2 gen-test-config --config-file=azure_stream.yaml --container-name=${{ parameters.container }} --output-file=${{ parameters.stream_config }}
displayName: Create Stream Config File
env:
ACCOUNT_TYPE: ${{ parameters.account_type }}
NIGHTLY_STO_ACC_NAME: ${{ parameters.account_name }}
NIGHTLY_STO_ACC_KEY: ${{ parameters.account_key }}
ACCOUNT_ENDPOINT: ${{ parameters.account_endpoint }}
VERBOSE_LOG: ${{ parameters.verbose_log }}
condition: ${{ parameters.test_stream }}
continueOnError: false
- script: cat ${{ parameters.stream_config }}
displayName: Print Stream config file with Handle Level Caching
condition: ${{ parameters.test_stream }}
# Stream e2e filename level caching
- script: |
cd ${{ parameters.working_dir }}
${{ parameters.working_dir }}/blobfuse2 gen-test-config --config-file=azure_stream_filename.yaml --container-name=${{ parameters.container }} --output-file=${{ parameters.stream_filename_config }}
displayName: Create Stream Config File
env:
ACCOUNT_TYPE: ${{ parameters.account_type }}
NIGHTLY_STO_ACC_NAME: ${{ parameters.account_name }}
NIGHTLY_STO_ACC_KEY: ${{ parameters.account_key }}
ACCOUNT_ENDPOINT: ${{ parameters.account_endpoint }}
VERBOSE_LOG: ${{ parameters.verbose_log }}
condition: ${{ parameters.test_stream }}
continueOnError: false
- script: cat ${{ parameters.stream_filename_config }}
displayName: Print Stream config file with Filename Caching
condition: ${{ parameters.test_stream }}
# Create sas credential config file if we need to test it
- script: |
cd ${{ parameters.working_dir }}
@ -146,25 +100,22 @@ steps:
displayName: Print SAS config file
condition: ${{ parameters.test_sas_credential }}
# Create spn credential config file if we need to test it
# Create azcli credential config file if we need to test it
- script: |
cd ${{ parameters.working_dir }}
${{ parameters.working_dir }}/blobfuse2 gen-test-config --config-file=azure_spn.yaml --container-name=${{ parameters.container }} --temp-path=${{ parameters.temp_dir }} --output-file=${{ parameters.spn_credential_config }}
displayName: Create SPN Config File
${{ parameters.working_dir }}/blobfuse2 gen-test-config --config-file=azure_cli.yaml --container-name=${{ parameters.container }} --temp-path=${{ parameters.temp_dir }} --output-file=${{ parameters.azcli_credential_config }}
displayName: Create Azure CLI Config File
env:
NIGHTLY_SPN_ACC_NAME: ${{ parameters.spn_account_name }}
NIGHTLY_SPN_CLIENT_ID: ${{ parameters.client_id }}
NIGHTLY_SPN_TENANT_ID: ${{ parameters.tenant_id }}
NIGHTLY_SPN_CLIENT_SECRET: ${{ parameters.client_secret }}
NIGHTLY_STO_BLOB_ACC_NAME: ${{ parameters.account_name }}
ACCOUNT_TYPE: ${{ parameters.account_type }}
ACCOUNT_ENDPOINT: ${{ parameters.spn_account_endpoint }}
ACCOUNT_ENDPOINT: ${{ parameters.account_endpoint }}
VERBOSE_LOG: ${{ parameters.verbose_log }}
condition: ${{ parameters.test_spn_credential }}
condition: ${{ parameters.test_azcli_credential }}
continueOnError: false
- script: cat ${{ parameters.spn_credential_config }}
displayName: Print SPN config file
condition: ${{ parameters.test_spn_credential }}
- script: cat ${{ parameters.azcli_credential_config }}
displayName: Print Azure CLI config file
condition: ${{ parameters.test_azcli_credential }}
# Create azurite config file if we need to test it
- script: |
@ -208,45 +159,25 @@ steps:
timeoutInMinutes: 3
continueOnError: false
- ${{ if eq(parameters.test_stream, true) }}:
- template: e2e-tests.yml
parameters:
working_dir: ${{ parameters.working_dir }}
temp_dir: ${{ parameters.temp_dir }}
mount_dir: ${{ parameters.mount_dir }}
adls: ${{ parameters.adls }}
idstring: ${{ parameters.service }} with Streaming
distro_name: ${{ parameters.distro_name }}
quick_test: ${{ parameters.quick_test }}
artifact_name: '${{ parameters.distro_name }}_${{ parameters.service }}_stream.txt'
verbose_log: ${{ parameters.verbose_log }}
mountStep:
script: >
${{ parameters.working_dir }}/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.stream_config }}
--default-working-dir=${{ parameters.working_dir }}
displayName: 'E2E Test: Mount with Stream Configuration'
timeoutInMinutes: 3
continueOnError: false
- ${{ if eq(parameters.test_stream, true) }}:
- template: e2e-tests.yml
parameters:
working_dir: ${{ parameters.working_dir }}
temp_dir: ${{ parameters.temp_dir }}
mount_dir: ${{ parameters.mount_dir }}
adls: ${{ parameters.adls }}
idstring: ${{ parameters.service }} with Streaming with filename
distro_name: ${{ parameters.distro_name }}
quick_test: ${{ parameters.quick_test }}
artifact_name: '${{ parameters.distro_name }}_${{ parameters.service }}_stream_with_filename.txt'
verbose_log: ${{ parameters.verbose_log }}
mountStep:
script: >
${{ parameters.working_dir }}/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.stream_filename_config }}
--default-working-dir=${{ parameters.working_dir }}
displayName: 'E2E Test: Mount with Filename Stream Configuration'
timeoutInMinutes: 3
continueOnError: false
#--------------------------------------- Tests: End to end tests with Block Cache configurations ------------------------------------------
- template: e2e-tests-spcl.yml
parameters:
conf_template: azure_block_perf.yaml
config_file: ${{ parameters.config }}
container: ${{ parameters.container }}
temp_dir: ${{ parameters.temp_dir }}
mount_dir: ${{ parameters.mount_dir }}
adls: ${{ parameters.adls }}
account_name: ${{ parameters.account_name }}
account_key: ${{ parameters.account_key }}
account_type: ${{ parameters.account_type }}
account_endpoint: ${{ parameters.account_endpoint }}
idstring: "${{ parameters.service }} with Block-cache"
distro_name: ${{ parameters.distro_name }}
quick_test: false
verbose_log: ${{ parameters.verbose_log }}
clone: false
- ${{ if eq(parameters.test_sas_credential, true) }}:
- template: e2e-tests.yml
@ -267,29 +198,29 @@ steps:
timeoutInMinutes: 3
continueOnError: false
- ${{ if eq(parameters.test_spn_credential, true) }}:
- ${{ if eq(parameters.test_azcli_credential, true) }}:
- template: e2e-tests.yml
parameters:
working_dir: ${{ parameters.working_dir }}
mount_dir: ${{ parameters.mount_dir }}
temp_dir: ${{ parameters.temp_dir }}
adls: ${{ parameters.adls }}
idstring: ${{ parameters.service }} with SPN Credentials
idstring: ${{ parameters.service }} with Azure CLI Credentials
distro_name: ${{ parameters.distro_name }}
artifact_name: '${{ parameters.distro_name }}_${{ parameters.service }}_spn.txt'
artifact_name: '${{ parameters.distro_name }}_${{ parameters.service }}_azcli.txt'
verbose_log: ${{ parameters.verbose_log }}
mountStep:
script: >
${{ parameters.working_dir }}/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.spn_credential_config }}
${{ parameters.working_dir }}/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.azcli_credential_config }}
--default-working-dir=${{ parameters.working_dir }}
displayName: 'E2E Test: Mount with SPN Credential Configuration'
displayName: 'E2E Test: Mount with Azure CLI Credential Configuration'
timeoutInMinutes: 3
continueOnError: false
- ${{ if eq(parameters.test_azurite, true) }}:
- bash: |
sudo apt-get install azure-cli -y
sudo npm install -g azurite
sudo apt-get install azure-cli npm -y
sudo npm install -g azurite@3.29.0
sudo mkdir azurite
sudo azurite --silent --location azurite --debug azurite\debug.log &
az storage container create -n ${{ parameters.container }} --connection-string "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;QueueEndpoint=http://127.0.0.1:10001/devstoreaccount1;"
@ -316,22 +247,6 @@ steps:
#--------------------------------------- Tests: End to end tests with different File Cache configurations ------------------------------------------
- template: e2e-tests-spcl.yml
parameters:
conf_template: azure_key_lfu.yaml
config_file: ${{ parameters.config }}
container: ${{ parameters.container }}
temp_dir: ${{ parameters.temp_dir }}
mount_dir: ${{ parameters.mount_dir }}
adls: ${{ parameters.adls }}
account_name: ${{ parameters.account_name }}
account_key: ${{ parameters.account_key }}
account_type: ${{ parameters.account_type }}
account_endpoint: ${{ parameters.account_endpoint }}
idstring: "${{ parameters.service }} LFU policy"
distro_name: ${{ parameters.distro_name }}
verbose_log: ${{ parameters.verbose_log }}
- template: e2e-tests-spcl.yml
parameters:
conf_template: azure_key_lru_purge.yaml
@ -402,7 +317,6 @@ steps:
verbose_log: ${{ parameters.verbose_log }}
enable_symlink_adls: true
#--------------------------------------- Setup: End to end tests with different File Cache configurations ------------------------------------------
- script: |
cd ${{ parameters.working_dir }}
@ -471,4 +385,4 @@ steps:
${{ parameters.working_dir }}/blobfuse2 mount ${{ parameters.mount_dir }} --config-file=${{ parameters.config }}
--default-working-dir=${{ parameters.working_dir }}
displayName: 'HugeList: Mount'
continueOnError: false
continueOnError: false

Просмотреть файл

@ -13,23 +13,26 @@ jobs:
strategy:
matrix:
Ubuntu-20:
imageName: 'ubuntu-20.04'
AgentName: 'blobfuse-ubuntu20'
containerName: 'test-cnt-ubn-20'
fuselib: 'libfuse-dev'
tags: 'fuse2'
adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_20)
Ubuntu-22:
imageName: 'ubuntu-22.04'
AgentName: 'blobfuse-ubuntu22'
containerName: 'test-cnt-ubn-22'
fuselib: 'libfuse3-dev'
tags: 'fuse3'
adlsSas: $(AZTEST_ADLS_CONT_SAS_UBN_22)
pool:
vmImage: $(imageName)
name: "blobfuse-ubuntu-pool"
demands:
- ImageOverride -equals $(AgentName)
variables:
- group: NightlyBlobFuse
steps:
# ----------------------------------------------------------------
- template: 'azure-pipeline-templates/blobfuse2-ci-template.yml'
@ -44,7 +47,7 @@ jobs:
strategy:
matrix:
Ubuntu-22-ARM64:
imageName: 'blobfuse-ubn22-arm64'
AgentName: 'blobfuse-ubn22-arm64'
containerName: 'test-cnt-ubn-22'
fuselib: 'libfuse3-dev'
tags: 'fuse3'
@ -53,7 +56,7 @@ jobs:
pool:
name: "blobfuse-ubn-arm64-pool"
demands:
- ImageOverride -equals $(imageName)
- ImageOverride -equals $(AgentName)
variables:
- group: NightlyBlobFuse

Просмотреть файл

@ -26,17 +26,15 @@ stages:
timeoutInMinutes: 300
strategy:
matrix:
Ubuntu-18:
AgentName: 'blobfuse-ubuntu18'
imageName: 'ubuntu-18.04'
containerName: 'test-cnt-ubn-18'
Ubuntu-20:
AgentName: 'blobfuse-ubuntu20'
containerName: 'test-cnt-ubn-20'
fuselib: 'libfuse-dev'
fuselib2: 'fuse'
tags: 'fuse2'
Ubuntu-20:
AgentName: 'blobfuse-ubuntu20'
imageName: 'ubuntu-20.04'
containerName: 'test-cnt-ubn-20'
Ubuntu-22:
AgentName: 'blobfuse-ubuntu22'
containerName: 'test-cnt-ubn-22'
fuselib: 'libfuse3-dev'
fuselib2: 'fuse3'
tags: 'fuse3'
@ -54,8 +52,6 @@ stages:
value: '$(Pipeline.Workspace)/blobfuse2_tmp'
- name: BLOBFUSE2_CFG
value: '$(Pipeline.Workspace)/blobfuse2.yaml'
- name: BLOBFUSE2_STREAM_CFG
value: '$(Pipeline.Workspace)/blobfuse2_stream.yaml'
- name: BLOBFUSE2_ADLS_CFG
value: '$(Pipeline.Workspace)/blobfuse2.adls.yaml'
- name: GOPATH
@ -106,6 +102,7 @@ stages:
tags: $(tags)
fuselib: $(fuselib)
skip_msi: "false"
skip_azcli: "false"
# -------------------------------------------------------
# UT based code coverage test
@ -235,84 +232,6 @@ stages:
workingDirectory: $(WORK_DIR)
displayName: "ADLS Coverage with profilers"
# -------------------------------------------------------
# Config Generation (Block Blob - LFU policy)
- script: |
cd $(WORK_DIR)
./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/blobfuse2_gentest2.cov gen-test-config --config-file=azure_key_lfu.yaml --container-name=$(containerName) --temp-path=$(TEMP_DIR) --output-file=$(BLOBFUSE2_CFG)
env:
NIGHTLY_STO_ACC_NAME: $(NIGHTLY_STO_BLOB_ACC_NAME)
NIGHTLY_STO_ACC_KEY: $(NIGHTLY_STO_BLOB_ACC_KEY)
ACCOUNT_TYPE: 'block'
ACCOUNT_ENDPOINT: 'https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net'
VERBOSE_LOG: false
displayName: 'Create Config File - LFU'
continueOnError: false
workingDirectory: $(WORK_DIR)
# Code Coverage with e2e-tests for block blob with lfu policy
- script: |
rm -rf $(MOUNT_DIR)/*
rm -rf $(TEMP_DIR)/*
./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/blobfuse2_block_lfu.cov mount $(MOUNT_DIR) --config-file=$(BLOBFUSE2_CFG) --foreground=true &
sleep 10
ps -aux | grep blobfuse2
rm -rf $(MOUNT_DIR)/*
cd test/e2e_tests
go test -v -timeout=7200s ./... -args -mnt-path=$(MOUNT_DIR) -tmp-path=$(TEMP_DIR)
cd -
./blobfuse2 unmount $(MOUNT_DIR)
sleep 5
workingDirectory: $(WORK_DIR)
displayName: "Block Blob LFU Coverage"
# -------------------------------------------------------
# Config Generation (Block Blob - Stream)
- script: |
cd $(WORK_DIR)
./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/blobfuse2_gentest3.cov gen-test-config --config-file=azure_stream.yaml --container-name=$(containerName) --temp-path=$(TEMP_DIR) --output-file=$(BLOBFUSE2_STREAM_CFG)
displayName: 'Create Config File - Stream'
env:
NIGHTLY_STO_ACC_NAME: $(NIGHTLY_STO_BLOB_ACC_NAME)
NIGHTLY_STO_ACC_KEY: $(NIGHTLY_STO_BLOB_ACC_KEY)
ACCOUNT_TYPE: 'block'
ACCOUNT_ENDPOINT: 'https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net'
VERBOSE_LOG: false
continueOnError: false
workingDirectory: $(WORK_DIR)
# Streaming test preparation
- script: |
rm -rf $(MOUNT_DIR)/*
rm -rf $(TEMP_DIR)/*
./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/blobfuse2_stream_prep.cov mount $(MOUNT_DIR) --config-file=$(BLOBFUSE2_CFG) --foreground=true &
sleep 10
ps -aux | grep blobfuse2
for i in {10,50,100,200,500,1024}; do echo $i; done | parallel --will-cite -j 5 'head -c {}M < /dev/urandom > $(WORK_DIR)/myfile_{}'
for i in {10,50,100,200,500,1024}; do echo $i; done | parallel --will-cite -j 5 'cp $(WORK_DIR)/myfile_{} $(MOUNT_DIR)/'
./blobfuse2 unmount "$(MOUNT_DIR)*"
sudo fusermount -u $(MOUNT_DIR)
sleep 5
workingDirectory: $(WORK_DIR)
displayName: "Block Blob Stream Preparation"
# Code Coverage with e2e-tests for block blob with streaming on
- script: |
rm -rf $(MOUNT_DIR)/*
rm -rf $(TEMP_DIR)/*
./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/blobfuse2_stream.cov mount $(MOUNT_DIR) --config-file=$(BLOBFUSE2_STREAM_CFG) --foreground=true &
sleep 10
ps -aux | grep blobfuse2
./blobfuse2 mount list
for i in {10,50,100,200,500,1024}; do echo $i; done | parallel --will-cite -j 5 'diff $(WORK_DIR)/myfile_{} $(MOUNT_DIR)/myfile_{}'
sudo fusermount -u $(MOUNT_DIR)
sleep 5
workingDirectory: $(WORK_DIR)
displayName: "Block Blob Stream Coverage"
# -------------------------------------------------------
# Config Generation (Block Blob) for cli options
- script: |
@ -595,7 +514,7 @@ stages:
sudo apt-get install python3-setuptools -y
sudo apt install python3-pip -y
sudo pip3 install mitmproxy
mitmdump -w proxy_logs.txt &
mitmdump -q -w proxy_logs.txt &
displayName: 'Install & Start Proxy'
# Configure Proxy cert & env
@ -673,7 +592,7 @@ stages:
- script: |
echo 'mode: count' > ./blobfuse2_coverage_raw.rpt
tail -q -n +2 ./*.cov >> ./blobfuse2_coverage_raw.rpt
cat ./blobfuse2_coverage_raw.rpt | grep -v mock_component | grep -v base_component | grep -v loopback | grep -v tools | grep -v "common/log" | grep -v "common/exectime" | grep -v "internal/stats_manager" | grep -v "main.go" | grep -v "component/azstorage/azauthmsi.go" > ./blobfuse2_coverage.rpt
cat ./blobfuse2_coverage_raw.rpt | grep -v mock_component | grep -v base_component | grep -v loopback | grep -v tools | grep -v "common/log" | grep -v "common/exectime" | grep -v "common/types.go" | grep -v "internal/stats_manager" | grep -v "main.go" | grep -v "component/azstorage/azauthmsi.go" | grep -v "component/azstorage/azauthspn.go" | grep -v "component/stream" | grep -v "component/azstorage/azauthcli.go" > ./blobfuse2_coverage.rpt
go tool cover -func blobfuse2_coverage.rpt > ./blobfuse2_func_cover.rpt
go tool cover -html=./blobfuse2_coverage.rpt -o ./blobfuse2_coverage.html
go tool cover -html=./blobfuse2_ut.cov -o ./blobfuse2_ut.html
@ -716,7 +635,6 @@ stages:
matrix:
Ubuntu-20:
AgentName: 'blobfuse-ubuntu20'
imageName: 'ubuntu-20.04'
containerName: 'test-cnt-ubn-20'
fuselib: 'libfuse3-dev'
fuselib2: 'fuse3'
@ -735,8 +653,6 @@ stages:
value: '$(Pipeline.Workspace)/blobfuse2_tmp'
- name: BLOBFUSE2_CFG
value: '$(Pipeline.Workspace)/blobfuse2.yaml'
- name: BLOBFUSE2_STREAM_CFG
value: '$(Pipeline.Workspace)/blobfuse2_stream.yaml'
- name: BLOBFUSE2_ADLS_CFG
value: '$(Pipeline.Workspace)/blobfuse2.adls.yaml'
- name: GOPATH

Просмотреть файл

@ -0,0 +1,74 @@
stages:
- stage: KernelBuild
jobs:
# Ubuntu Tests
- job: Set_1
timeoutInMinutes: 360
strategy:
matrix:
Ubuntu-22:
AgentName: 'blobfuse-benchmark-ubn22'
containerName: 'test-cnt-ubn-22'
pool:
name: "blobfuse-perf-pool"
demands:
- ImageOverride -equals $(AgentName)
variables:
- group: NightlyBlobFuse
- name: ROOT_DIR
value: "/usr/pipeline/workv2"
- name: WORK_DIR
value: "/usr/pipeline/workv2/go/src/azure-storage-fuse"
- name: MOUNT_DIR
value: "/usr/pipeline/workv2/blob_mnt"
- name: TEMP_DIR
value: "/usr/pipeline/workv2/temp"
- name: BLOBFUSE2_CFG
value: "/usr/pipeline/workv2/blobfuse2.yaml"
- name: GOPATH
value: "/usr/pipeline/workv2/go"
steps:
- template: 'azure-pipeline-templates/setup.yml'
parameters:
tags: $(tags)
installStep:
script: |
sudo apt-get update --fix-missing
sudo apt update
sudo apt-get install cmake gcc libfuse3-dev git parallel -y
sudo apt-get install fuse3 -y
displayName: 'Install fuse'
- script: |
sudo apt-get install git fakeroot build-essential ncurses-dev xz-utils libssl-dev bc flex libelf-dev bison -y
displayName: 'Install kernel build dependencies'
- script: |
cd $(WORK_DIR)
$(WORK_DIR)/blobfuse2 gen-test-config --config-file=azure_block_perf.yaml --container-name=$(containerName) --output-file=$(BLOBFUSE2_CFG)
displayName: "Create Config File"
env:
NIGHTLY_STO_ACC_NAME: $(NIGHTLY_STO_BLOB_ACC_NAME)
NIGHTLY_STO_ACC_KEY: $(NIGHTLY_STO_BLOB_ACC_KEY)
ACCOUNT_TYPE: 'block'
ACCOUNT_ENDPOINT: 'https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net'
VERBOSE_LOG: ${{ parameters.verbose_log }}
continueOnError: false
- script: |
cat $(BLOBFUSE2_CFG)
displayName: 'Print config file'
- template: 'azure-pipeline-templates/blobfuse2-data-validation.yml'
parameters:
working_dir: $(WORK_DIR)
mount_dir: $(MOUNT_DIR)
temp_dir: $(TEMP_DIR)
prefix: 'ubn-22'
kversion: "6.10.2"

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -90,6 +90,7 @@ stages:
- script: |
sudo apt-get update --fix-missing
sudo apt-get install ruby-dev build-essential pkg-config cmake gcc g++ rpm $(fuselib) -y
sudo gem install dotenv -v 2.8.1
sudo gem install fpm -V
displayName: "Installing Dependencies"
@ -135,7 +136,7 @@ stages:
displayName: 'Make deb Package'
- script: |
fpm -s dir -t rpm -n blobfuse2 -C pkgDir/ -v `./pkgDir/usr/bin/blobfuse2 --version | cut -d " " -f 3` -d $(depends) \
fpm -s dir -t rpm -n blobfuse2 --rpm-digest sha256 -C pkgDir/ -v `./pkgDir/usr/bin/blobfuse2 --version | cut -d " " -f 3` -d $(depends) \
--maintainer "Blobfuse v-Team <blobfusevteam@microsoft.com>" --url "https://github.com/Azure/azure-storage-fuse" \
--description "An user-space filesystem for interacting with Azure Storage"
mv ./blobfuse2*.rpm ./blobfuse2-`./pkgDir/usr/bin/blobfuse2 --version | cut -d " " -f 3`-$(tags).x86_64.rpm
@ -235,7 +236,7 @@ stages:
displayName: 'Make deb Package'
- script: |
fpm -s dir -t rpm -n blobfuse2 -C pkgDir/ -v `./pkgDir/usr/bin/blobfuse2 --version | cut -d " " -f 3` -d $(depends) \
fpm -s dir -t rpm -n blobfuse2 --rpm-digest sha256 -C pkgDir/ -v `./pkgDir/usr/bin/blobfuse2 --version | cut -d " " -f 3` -d $(depends) \
--maintainer "Blobfuse v-Team <blobfusevteam@microsoft.com>" --url "https://github.com/Azure/azure-storage-fuse" \
--description "An user-space filesystem for interacting with Azure Storage"
mv ./blobfuse2*.rpm ./blobfuse2-`./pkgDir/usr/bin/blobfuse2 --version | cut -d " " -f 3`-$(tags).aarch64.rpm
@ -288,6 +289,14 @@ stages:
md5sum $(Build.ArtifactStagingDirectory)/blobfuse2-temp/*.rpm
displayName: 'List Artifacts'
- script: |
mkdir mariner && chmod 755 mariner
cp blobfuse2-temp/*-fuse3*.rpm mariner
sudo ls -lRt mariner
md5sum mariner/*
displayName: 'Copy artifacts for Mariner'
workingDirectory: $(Build.ArtifactStagingDirectory)
- script: |
sudo apt-get update
wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb
@ -317,11 +326,32 @@ stages:
}
]
- task: SFP.build-tasks.custom-build-task-1.EsrpCodeSigning@1
displayName: 'ESRP CodeSigning blobfuse2 mariner'
inputs:
ConnectedServiceName: 'PMC ESRP Blobfuse2 Signing'
FolderPath: '$(Build.ArtifactStagingDirectory)/mariner'
Pattern: '*.rpm'
signConfigType: inlineSignParams
VerboseLogin: true
inlineOperation: |
[
{
"KeyCode" : "$(ESRP_BLOBFUSE_MARINER_KEY_CODE)",
"OperationCode" : "LinuxSign",
"Parameters" : {},
"ToolName" : "sign",
"ToolVersion" : "1.0"
}
]
# Validate signed images have md5sum changed
- script: |
chmod 755 $(Build.ArtifactStagingDirectory)/blobfuse2-temp/*.rpm
chmod 755 $(Build.ArtifactStagingDirectory)/blobfuse2-temp/*.deb
chmod 755 $(Build.ArtifactStagingDirectory)/mariner/*.rpm
rm -rf $(Build.ArtifactStagingDirectory)/blobfuse2-temp/*.md
rm -rf $(Build.ArtifactStagingDirectory)/mariner/*.md
mv $(Build.ArtifactStagingDirectory)/blobfuse2-temp/* $(Build.ArtifactStagingDirectory)/
rm -rf $(Build.ArtifactStagingDirectory)/blobfuse2-temp/
displayName: 'Make Artifacts executable'
@ -330,6 +360,7 @@ stages:
sudo ls -lRt $(Build.ArtifactStagingDirectory)
md5sum $(Build.ArtifactStagingDirectory)/*.deb
md5sum $(Build.ArtifactStagingDirectory)/*.rpm
md5sum $(Build.ArtifactStagingDirectory)/mariner/*
displayName: 'List Signed Artifacts'
# Push signed images to artifact directory
@ -776,7 +807,7 @@ stages:
artifactName: 'blobfuse2'
displayName: 'Publish Artifacts'
- job: Set_4
- job: Set_4_1
timeoutInMinutes: 120
strategy:
matrix:
@ -787,6 +818,72 @@ stages:
fuse-version: 'fuse3'
tags: 'fuse3'
container: 'test-cnt-rhel-75'
pool:
name: "blobfuse-rhel-pool"
demands:
- ImageOverride -equals $(agentName)
variables:
- group: NightlyBlobFuse
- name: root_dir
value: '$(System.DefaultWorkingDirectory)'
- name: work_dir
value: '$(System.DefaultWorkingDirectory)/azure-storage-fuse'
- name: mount_dir
value: '$(System.DefaultWorkingDirectory)/fusetmp'
- name: temp_dir
value: '$(System.DefaultWorkingDirectory)/fusetmpcache'
steps:
- checkout: none
- script: |
sudo yum update -y
sudo yum install git -y
sudo yum groupinstall "Development Tools" -y
displayName: 'Install Git'
- task: DownloadBuildArtifacts@0
displayName: 'Download Build Artifacts'
inputs:
artifactName: 'blobfuse2-signed'
downloadPath: $(root_dir)
itemPattern: blobfuse2-signed/blobfuse2*$(tags)*x86_64.rpm
- script: |
ls -l
result=$(ls -1 | wc -l)
if [ $result -ne 1 ]; then
exit 1
fi
displayName: 'List Downloaded Package'
workingDirectory: $(root_dir)/blobfuse2-signed
- script: |
for f in ./blobfuse2*$(tags)*.rpm; do mv -v "$f" "${f/-$(tags)./-$(vmImage).}"; done;
cp ./blobfuse2*$(vmImage)*.rpm $(Build.ArtifactStagingDirectory)
f=`ls ./blobfuse2*$(vmImage)*.rpm`
cp "$f" $(sed 's:RHEL-7.5:RHEL-7.8:' <<< "$f")
cp "$f" $(sed 's:RHEL-7.5:RHEL-8.1:' <<< "$f")
cp "$f" $(sed 's:RHEL-7.5:RHEL-8.2:' <<< "$f")
cp ./blobfuse2*RHEL-7.8*.rpm $(Build.ArtifactStagingDirectory)
cp ./blobfuse2*RHEL-8*.rpm $(Build.ArtifactStagingDirectory)
rm -rf ./blobfuse2*RHEL-7.8*.rpm
rm -rf ./blobfuse2*RHEL-8*.rpm
displayName: 'Rename Package'
workingDirectory: $(root_dir)/blobfuse2-signed
# publishing the artifacts generated
- task: PublishBuildArtifacts@1
inputs:
artifactName: 'blobfuse2'
displayName: 'Publish Artifacts'
- job: Set_4_2
timeoutInMinutes: 120
strategy:
matrix:
RHEL-8.6:
agentName: "blobfuse-rhel8_6"
vmImage: 'RHEL-8.6'
@ -821,12 +918,6 @@ stages:
steps:
- checkout: none
- script: |
sudo touch /etc/yum.repos.d/centos.repo
sudo sh -c 'echo -e "[centos-extras]\nname=Centos extras - $basearch\nbaseurl=http://mirror.centos.org/centos/7/extras/x86_64\nenabled=1\ngpgcheck=1\ngpgkey=http://centos.org/keys/RPM-GPG-KEY-CentOS-7" > /etc/yum.repos.d/centos.repo'
condition: or(eq(variables['AgentName'], 'blobfuse-rhel7_5'),eq(variables['AgentName'], 'blobfuse-rhel7_8'))
displayName: "Update OS mirrors"
- script: |
sudo yum update -y
sudo yum install git -y
@ -873,16 +964,6 @@ stages:
- script: |
for f in ./blobfuse2*$(tags)*.rpm; do mv -v "$f" "${f/-$(tags)./-$(vmImage).}"; done;
cp ./blobfuse2*$(vmImage)*.rpm $(Build.ArtifactStagingDirectory)
if [ $(agentName) == "blobfuse-rhel7_5" ]; then
f=`ls ./blobfuse2*$(vmImage)*.rpm`
cp "$f" $(sed 's:RHEL-7.5:RHEL-7.8:' <<< "$f")
cp "$f" $(sed 's:RHEL-7.5:RHEL-8.1:' <<< "$f")
cp "$f" $(sed 's:RHEL-7.5:RHEL-8.2:' <<< "$f")
cp ./blobfuse2*RHEL-7.8*.rpm $(Build.ArtifactStagingDirectory)
cp ./blobfuse2*RHEL-8*.rpm $(Build.ArtifactStagingDirectory)
rm -rf ./blobfuse2*RHEL-7.8*.rpm
rm -rf ./blobfuse2*RHEL-8*.rpm
fi
displayName: 'Rename Package'
workingDirectory: $(root_dir)/blobfuse2-signed
@ -890,11 +971,7 @@ stages:
sudo sed -i '/^failovermethod=/d' /etc/yum.repos.d/*.repo
sudo rpm -qip blobfuse2*$(vmImage)*.rpm
sudo yum groupinstall "Development Tools" -y
if [[ $(agentName) == "blobfuse-rhel7_5" || $(agentName) == "blobfuse-rhel7_8" ]]; then
sudo yum install fuse fuse3-libs fuse3-devel fuse3 -y
else
sudo yum install fuse fuse3-libs fuse3-devel fuse3 -y --nobest --allowerasing
fi
sudo yum install fuse fuse3-libs fuse3-devel fuse3 -y --nobest --allowerasing
sudo rpm -i blobfuse2*$(vmImage)*.rpm
displayName: 'Install Package'
workingDirectory: $(Build.ArtifactStagingDirectory)
@ -1239,17 +1316,19 @@ stages:
timeoutInMinutes: 120
strategy:
matrix:
Mariner:
agentName: "blobfuse-mariner"
vmImage: 'Mariner'
fuse-version: 'fuse2'
tags: 'fuse2'
container: "test-cnt-mari-1"
Mariner2:
agentName: "blobfuse-mariner2"
DistroVer: "Mariner2"
Description: "CBL-Mariner2 Linux"
fuselib: 'libfuse3-dev'
fuse-version: 'fuse3'
tags: 'fuse3'
container: "test-cnt-mari-2"
pool:
name: "blobfuse-mariner-pool"
demands:
- ImageOverride -equals $(agentName)
- ImageOverride -equals $(AgentName)
variables:
- group: NightlyBlobFuse
@ -1266,7 +1345,8 @@ stages:
- checkout: none
- script: |
sudo tdnf install build-essential git fuse fuse-devel python36 -y
sudo tdnf update -y
sudo tdnf install git -y
displayName: 'Install Git'
- script: |
@ -1285,7 +1365,103 @@ stages:
scriptPath: "$(work_dir)/go_installer.sh"
args: "$(root_dir)/"
displayName: "GoTool Custom Setup"
# get glibc version with which build is done
- script: |
ldd --version
displayName: "GLIBC Version"
- task: DownloadBuildArtifacts@0
displayName: 'Download Build Artifacts'
inputs:
artifactName: 'blobfuse2-signed'
downloadPath: $(root_dir)
itemPattern: blobfuse2-signed/mariner/blobfuse2*$(tags)*x86_64.rpm
- script: |
ls -l
result=$(ls -1 | wc -l)
if [ $result -ne 1 ]; then
exit 1
fi
displayName: 'List Downloaded Package'
workingDirectory: $(root_dir)/blobfuse2-signed/mariner
- script: |
sudo rpm -qip blobfuse2*.rpm
sudo tdnf install gcc build-essential fuse3 fuse3-devel -y
sudo rpm -i blobfuse2*.rpm
displayName: 'Install Package'
workingDirectory: $(root_dir)/blobfuse2-signed/mariner
- template: 'azure-pipeline-templates/release-distro-tests.yml'
parameters:
root_dir: $(root_dir)
work_dir: $(work_dir)
mount_dir: $(mount_dir)
temp_dir: $(temp_dir)
container: $(container)
- job: Set_9
timeoutInMinutes: 120
strategy:
matrix:
Rocky-8.0:
agentName: "blobfuse-rocky8"
vmImage: 'Rocky-8.0'
fuselib: 'fuse3-devel'
fuse-version: 'fuse3'
tags: 'fuse3'
container: 'test-cnt-rocky-8'
Rocky-9.0:
agentName: "blobfuse-rocky9"
vmImage: 'Rocky-9.0'
fuselib: 'fuse3-devel'
fuse-version: 'fuse3'
tags: 'fuse3'
container: 'test-cnt-rocky-9'
pool:
name: "blobfuse2-rocky-pool"
demands:
- ImageOverride -equals $(agentName)
variables:
- group: NightlyBlobFuse
- name: root_dir
value: '$(System.DefaultWorkingDirectory)'
- name: work_dir
value: '$(System.DefaultWorkingDirectory)/azure-storage-fuse'
- name: mount_dir
value: '$(System.DefaultWorkingDirectory)/fusetmp'
- name: temp_dir
value: '$(System.DefaultWorkingDirectory)/fusetmpcache'
steps:
- checkout: none
- script: |
sudo yum update -y
sudo yum install wget git -y
displayName: 'Install Git'
- script: |
git clone https://github.com/Azure/azure-storage-fuse
displayName: 'Checkout Code'
workingDirectory: $(root_dir)
- script: |
git checkout `echo $(Build.SourceBranch) | cut -d "/" -f 1,2 --complement`
displayName: 'Checkout Branch'
workingDirectory: $(root_dir)/azure-storage-fuse
# Custom script to install Go-lang
- task: ShellScript@2
inputs:
scriptPath: "$(work_dir)/go_installer.sh"
args: "$(root_dir)/"
displayName: "GoTool Custom Setup"
# get glibc version with which build is done
- script: |
ldd --version
@ -1305,7 +1481,7 @@ stages:
exit 1
fi
displayName: 'List Downloaded Package'
workingDirectory: $(root_dir)/blobfuse2-signed
workingDirectory: $(root_dir)/blobfuse2-signed
- script: |
for f in ./blobfuse2*$(tags)*.rpm; do mv -v "$f" "${f/-$(tags)./-$(vmImage).}"; done;
@ -1314,9 +1490,11 @@ stages:
workingDirectory: $(root_dir)/blobfuse2-signed
- script: |
sudo rpm -qip blobfuse2*.rpm
sudo tdnf install build-essential fuse fuse-devel -y
sudo rpm -i blobfuse2*.rpm
sudo sed -i '/^failovermethod=/d' /etc/yum.repos.d/*.repo
sudo rpm -qip blobfuse2*$(vmImage)*.rpm
sudo yum groupinstall "Development Tools" -y
sudo yum install fuse fuse3-libs fuse3-devel fuse3 -y --nobest --allowerasing
sudo rpm -i blobfuse2*$(vmImage)*.rpm
displayName: 'Install Package'
workingDirectory: $(Build.ArtifactStagingDirectory)
@ -1327,15 +1505,14 @@ stages:
mount_dir: $(mount_dir)
temp_dir: $(temp_dir)
container: $(container)
# extras: "--foreground=true"
# publishing the artifacts generated
- task: PublishBuildArtifacts@1
inputs:
artifactName: 'blobfuse2'
displayName: 'Publish Artifacts'
# TestArtifacts ends here
# TestArtifacts ends here
- stage: ReleaseArtifacts
dependsOn: TestArtifacts
condition: succeeded('TestArtifacts')
@ -1407,7 +1584,7 @@ stages:
assetUploadMode: replace
- ${{ if eq(parameters.publish_artifacts, true) }}:
- stage: PublishArtifcats
- stage: PublishArtifacts
dependsOn: ReleaseArtifacts
condition: succeeded('ReleaseArtifacts')
jobs:
@ -1459,6 +1636,16 @@ stages:
artifactName: 'blobfuse2-signed'
downloadPath: $(Build.ArtifactStagingDirectory)
- script: |
cd mariner
for f in ./blobfuse2*fuse3*.rpm; do mv -v "$f" "${f/-fuse3./-cm2.}"; done
ls -lRt
mv blobfuse2*.rpm ..
cd ..
rm -r mariner/
displayName: 'Rename Mariner binaries'
workingDirectory: $(Build.ArtifactStagingDirectory)/blobfuse2-signed/
- script: |
sudo ls -lRt $(Build.ArtifactStagingDirectory)
displayName: 'List Artifacts'
@ -1492,8 +1679,33 @@ stages:
fuse2AmdRpm=`pmc --msal-cert-path $(pmcCertificate.secureFilePath) --config $(settings.secureFilePath) --id-only package upload blobfuse2*fuse2.x86_64.rpm`
echo "Fuse2 AMD RPM ID: $fuse2AmdRpm"
marinerAmdRpmFile=$(ls blobfuse2* | grep 'cm2\.x86_64\.rpm')
marinerFuse3AmdRpm=`pmc --msal-cert-path $(pmcCertificate.secureFilePath) --config $(settings.secureFilePath) --id-only package upload blobfuse2*cm2.x86_64.rpm`
echo "Mariner fuse3 AMD RPM ID: $marinerFuse3AmdRpm"
echo "Mariner fuse3 AMD RPM $marinerAmdRpmFile"
marinerAarchRpmFile=$(ls blobfuse2* | grep 'cm2\.aarch64\.rpm')
marinerFuse3AarchRpm=`pmc --msal-cert-path $(pmcCertificate.secureFilePath) --config $(settings.secureFilePath) --id-only package upload blobfuse2*cm2.aarch64.rpm`
echo "Mariner fuse3 ARM RPM ID: $marinerFuse3AarchRpm"
echo "Mariner fuse3 ARM RPM: $marinerAarchRpmFile"
is_preview="false"
echo "##vso[task.setvariable variable=is_preview]$is_preview"
if [[ $marinerAmdRpmFile == *"preview"* ]]; then
is_preview="true"
echo "##vso[task.setvariable variable=is_preview]$is_preview"
fi
while IFS=, read -r distro fuseArchType repoName releaseName; do
# If the package is preview, publish to mariner preview package
if [[ $distro == *"Mariner-"* ]]; then
if [ $is_preview = "true" ]; then
repoName=$(echo $repoName | sed 's/prod/preview/')
fi
fi
echo "Uploading packages for $distro"
pmc --msal-cert-path $(pmcCertificate.secureFilePath) --config $(settings.secureFilePath) repo package update --add-packages ${!fuseArchType} $repoName $releaseName
done < <(tail -n +3 ../packages.csv)
@ -1506,6 +1718,13 @@ stages:
then
echo "Skipping for ARM type on $distro"
else
if [[ $distro == *"Mariner-"* ]]; then
if [ "$(is_preview)" = "true" ]; then
repoName=$(echo $repoName | sed 's/prod/preview/')
fi
fi
echo "Repository Name: $repoName"
echo "Publishing for $distro"
pmc --msal-cert-path $(pmcCertificate.secureFilePath) --config $(settings.secureFilePath) repo publish $repoName
fi

Просмотреть файл

@ -1,5 +1,5 @@
#!/bin/bash
echo "Using Go - $(go version)"
if [ "$1" == "fuse2" ]
then
# Build blobfuse2 with fuse2

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -132,7 +132,7 @@ func validateHMonOptions() error {
}
if len(errMsg) != 0 {
return fmt.Errorf(errMsg)
return fmt.Errorf("%s", errMsg)
}
return nil

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -87,6 +87,7 @@ type mountOptions struct {
ProfilerIP string `config:"profiler-ip"`
MonitorOpt monitorOptions `config:"health_monitor"`
WaitForMount time.Duration `config:"wait-for-mount"`
LazyWrite bool `config:"lazy-write"`
// v1 support
Streaming bool `config:"streaming"`
@ -97,7 +98,7 @@ type mountOptions struct {
var options mountOptions
func (opt *mountOptions) validate(skipEmptyMount bool) error {
func (opt *mountOptions) validate(skipNonEmptyMount bool) error {
if opt.MountPath == "" {
return fmt.Errorf("mount path not provided")
}
@ -105,8 +106,33 @@ func (opt *mountOptions) validate(skipEmptyMount bool) error {
if _, err := os.Stat(opt.MountPath); os.IsNotExist(err) {
return fmt.Errorf("mount directory does not exists")
} else if common.IsDirectoryMounted(opt.MountPath) {
return fmt.Errorf("directory is already mounted")
} else if !skipEmptyMount && !common.IsDirectoryEmpty(opt.MountPath) {
// Try to cleanup the stale mount
log.Info("Mount::validate : Mount directory is already mounted, trying to cleanup")
active, err := common.IsMountActive(opt.MountPath)
if active || err != nil {
// Previous mount is still active so we need to fail this mount
return fmt.Errorf("directory is already mounted")
} else {
// Previous mount is in stale state so lets cleanup the state
log.Info("Mount::validate : Cleaning up stale mount")
if err = unmountBlobfuse2(opt.MountPath); err != nil {
return fmt.Errorf("directory is already mounted, unmount manually before remount [%v]", err.Error())
}
// Clean up the file-cache temp directory if any
var tempCachePath string
_ = config.UnmarshalKey("file_cache.path", &tempCachePath)
var cleanupOnStart bool
_ = config.UnmarshalKey("file_cache.cleanup-on-start", &cleanupOnStart)
if tempCachePath != "" && !cleanupOnStart {
if err = common.TempCacheCleanup(tempCachePath); err != nil {
return fmt.Errorf("failed to cleanup file cache [%s]", err.Error())
}
}
}
} else if !skipNonEmptyMount && !common.IsDirectoryEmpty(opt.MountPath) {
return fmt.Errorf("mount directory is not empty")
}
@ -237,6 +263,8 @@ var mountCmd = &cobra.Command{
FlagErrorHandling: cobra.ExitOnError,
RunE: func(_ *cobra.Command, args []string) error {
options.MountPath = common.ExpandPath(args[0])
common.MountPath = options.MountPath
configFileExists := true
if options.ConfigFile == "" {
@ -285,8 +313,6 @@ var mountCmd = &cobra.Command{
options.Components = pipeline
}
skipNonEmpty := false
if config.IsSet("libfuse-options") {
for _, v := range options.LibfuseOptions {
parameter := strings.Split(v, "=")
@ -309,8 +335,11 @@ var mountCmd = &cobra.Command{
config.Set("read-only", "true")
} else if v == "allow_root" || v == "allow_root=true" {
config.Set("allow-root", "true")
} else if v == "nonempty" {
skipNonEmpty = true
} else if v == "nonempty" || v == "nonempty=true" {
// For fuse3, -o nonempty mount option has been removed and
// mounting over non-empty directories is now always allowed.
// For fuse2, this option is supported.
options.NonEmpty = true
config.Set("nonempty", "true")
} else if strings.HasPrefix(v, "umask=") {
umask, err := strconv.ParseUint(parameter[1], 10, 32)
@ -346,7 +375,7 @@ var mountCmd = &cobra.Command{
options.Logging.LogLevel = "LOG_WARNING"
}
err = options.validate(options.NonEmpty || skipNonEmpty)
err = options.validate(options.NonEmpty)
if err != nil {
return err
}
@ -401,11 +430,18 @@ var mountCmd = &cobra.Command{
var pipeline *internal.Pipeline
log.Crit("Starting Blobfuse2 Mount : %s on [%s]", common.Blobfuse2Version, common.GetCurrentDistro())
log.Info("Mount Command: %s", os.Args)
log.Crit("Logging level set to : %s", logLevel.String())
log.Debug("Mount allowed on nonempty path : %v", options.NonEmpty)
pipeline, err = internal.NewPipeline(options.Components, !daemon.WasReborn())
if err != nil {
log.Err("mount : failed to initialize new pipeline [%v]", err)
return Destroy(fmt.Sprintf("failed to initialize new pipeline [%s]", err.Error()))
if err.Error() == "Azure CLI not found on path" {
log.Err("mount : failed to initialize new pipeline :: To authenticate using MSI with object-ID, ensure Azure CLI is installed. Alternatively, use app/client ID or resource ID for authentication. [%v]", err)
return Destroy(fmt.Sprintf("failed to initialize new pipeline :: To authenticate using MSI with object-ID, ensure Azure CLI is installed. Alternatively, use app/client ID or resource ID for authentication. [%s]", err.Error()))
} else {
log.Err("mount : failed to initialize new pipeline [%v]", err)
return Destroy(fmt.Sprintf("failed to initialize new pipeline [%s]", err.Error()))
}
}
common.ForegroundMount = options.Foreground
@ -673,6 +709,9 @@ func init() {
mountCmd.PersistentFlags().Bool("read-only", false, "Mount the system in read only mode. Default value false.")
config.BindPFlag("read-only", mountCmd.PersistentFlags().Lookup("read-only"))
mountCmd.PersistentFlags().Bool("lazy-write", false, "Async write to storage container after file handle is closed.")
config.BindPFlag("lazy-write", mountCmd.PersistentFlags().Lookup("lazy-write"))
mountCmd.PersistentFlags().String("default-working-dir", "", "Default working directory for storing log files and other blobfuse2 information")
mountCmd.PersistentFlags().Lookup("default-working-dir").Hidden = true
config.BindPFlag("default-working-dir", mountCmd.PersistentFlags().Lookup("default-working-dir"))
@ -715,5 +754,5 @@ func init() {
func Destroy(message string) error {
_ = log.Destroy()
return fmt.Errorf(message)
return fmt.Errorf("%s", message)
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -62,6 +62,7 @@ azstorage:
mode: key
endpoint: myEndpoint
container: myContainer
max-retries: 1
components:
- libfuse
- file_cache
@ -73,20 +74,6 @@ health_monitor:
- blobfuse_stats
`
var configMountLoopback string = `
logging:
type: syslog
default-working-dir: /tmp/blobfuse2
components:
- libfuse
- loopbackfs
libfuse:
attribute-expiration-sec: 120
entry-expiration-sec: 60
loopbackfs:
path: /tmp/bfuseloopback
`
var configPriorityTest string = `
logging:
type: syslog
@ -164,8 +151,9 @@ func (suite *mountTestSuite) TestMountDirNotEmpty() {
suite.assert.NotNil(err)
suite.assert.Contains(op, "mount directory is not empty")
op, err = executeCommandC(rootCmd, "mount", mntDir, fmt.Sprintf("--config-file=%s", confFileMntTest), "-o", "nonempty", "--foreground")
op, err = executeCommandC(rootCmd, "mount", mntDir, fmt.Sprintf("--config-file=%s", confFileMntTest), "-o", "nonempty")
suite.assert.NotNil(err)
suite.assert.Contains(op, "failed to initialize new pipeline")
}
// mount failure test where the mount path is not provided

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -151,6 +151,29 @@ func beginDetectNewVersion() chan interface{} {
return
}
warningsUrl := common.Blobfuse2ListContainerURL + "/securitywarnings/" + common.Blobfuse2Version
hasWarnings := checkVersionExists(warningsUrl)
if hasWarnings {
// This version has known issues associated with it
// Check whether the version has been blocked by the dev team or not.
blockedVersions := common.Blobfuse2ListContainerURL + "/blockedversions/" + common.Blobfuse2Version
isBlocked := checkVersionExists(blockedVersions)
if isBlocked {
// This version is blocked and customer shall not be allowed to use this.
blockedPage := common.BlobFuse2BlockingURL + "#" + strings.ReplaceAll(strings.ReplaceAll(common.Blobfuse2Version, ".", ""), "~", "")
fmt.Fprintf(stderr, "PANIC: Visit %s to see the list of known issues blocking your current version [%s]\n", blockedPage, common.Blobfuse2Version)
log.Warn("PANIC: Visit %s to see the list of known issues blocking your current version [%s]\n", blockedPage, common.Blobfuse2Version)
os.Exit(1)
} else {
// This version is not blocked but has know issues list which customer shall visit.
warningsPage := common.BlobFuse2WarningsURL + "#" + strings.ReplaceAll(strings.ReplaceAll(common.Blobfuse2Version, ".", ""), "~", "")
fmt.Fprintf(stderr, "WARNING: Visit %s to see the list of known issues associated with your current version [%s]\n", warningsPage, common.Blobfuse2Version)
log.Warn("WARNING: Visit %s to see the list of known issues associated with your current version [%s]\n", warningsPage, common.Blobfuse2Version)
}
}
if local.OlderThan(*remote) {
executablePathSegments := strings.Split(strings.Replace(os.Args[0], "\\", "/", -1), "/")
executableName := executablePathSegments[len(executablePathSegments)-1]
@ -158,14 +181,6 @@ func beginDetectNewVersion() chan interface{} {
fmt.Fprintf(stderr, "*** "+executableName+": A new version [%s] is available. Consider upgrading to latest version for bug-fixes & new features. ***\n", remoteVersion)
log.Info("*** "+executableName+": A new version [%s] is available. Consider upgrading to latest version for bug-fixes & new features. ***\n", remoteVersion)
warningsUrl := common.Blobfuse2ListContainerURL + "/securitywarnings/" + common.Blobfuse2Version
hasWarnings := checkVersionExists(warningsUrl)
if hasWarnings {
warningsPage := common.BlobFuse2WarningsURL + "#" + strings.ReplaceAll(common.Blobfuse2Version, ".", "")
fmt.Fprintf(stderr, "Visit %s to see the list of vulnerabilities associated with your current version [%s]\n", warningsPage, common.Blobfuse2Version)
log.Warn("Visit %s to see the list of vulnerabilities associated with your current version [%s]\n", warningsPage, common.Blobfuse2Version)
}
completed <- "A new version of Blobfuse2 is available"
}
}()

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -104,6 +104,27 @@ func (suite *rootCmdSuite) TestNoSecurityWarnings() {
suite.assert.False(found)
}
func (suite *rootCmdSuite) TestSecurityWarnings() {
defer suite.cleanupTest()
warningsUrl := common.Blobfuse2ListContainerURL + "/securitywarnings/" + "1.1.1"
found := checkVersionExists(warningsUrl)
suite.assert.True(found)
}
func (suite *rootCmdSuite) TestBlockedVersion() {
defer suite.cleanupTest()
warningsUrl := common.Blobfuse2ListContainerURL + "/blockedversions/" + "1.1.1"
isBlocked := checkVersionExists(warningsUrl)
suite.assert.True(isBlocked)
}
func (suite *rootCmdSuite) TestNonBlockedVersion() {
defer suite.cleanupTest()
warningsUrl := common.Blobfuse2ListContainerURL + "/blockedversions/" + common.Blobfuse2Version
found := checkVersionExists(warningsUrl)
suite.assert.False(found)
}
func (suite *rootCmdSuite) TestGetRemoteVersionInvalidURL() {
defer suite.cleanupTest()
out, err := getRemoteVersion("abcd")

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -73,7 +73,7 @@ func (suite *keysTreeTestSuite) TestParseValue() {
{val: "65535", toType: reflect.Uint16, result: 65535},
{val: "4294967295", toType: reflect.Uint32, result: 4294967295},
{val: "18446744073709551615", toType: reflect.Uint64, result: uint64(18446744073709551615)},
{val: "6.24321908234", toType: reflect.Float32, result: 6.24321908234},
{val: "6.24321908234", toType: reflect.Float32, result: (float32)(6.24321908234)},
{val: "31247921747687123.123871293791263", toType: reflect.Float64, result: 31247921747687123.123871293791263},
{val: "6-8i", toType: reflect.Complex64, result: 6 - 8i},
{val: "2341241-910284i", toType: reflect.Complex128, result: 2341241 - 910284i},

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -219,10 +219,11 @@ func (l *BaseLogger) logEvent(lvl string, format string, args ...interface{}) {
// Only log if the log level matches the log request
_, fn, ln, _ := runtime.Caller(3)
msg := fmt.Sprintf(format, args...)
msg = fmt.Sprintf("%s : %s[%d] : %s [%s (%d)]: %s",
msg = fmt.Sprintf("%s : %s[%d] : [%s] %s [%s (%d)]: %s",
time.Now().Format(time.UnixDate),
l.fileConfig.LogTag,
l.procPID,
common.MountPath,
lvl,
filepath.Base(fn), ln,
msg)

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -120,7 +120,7 @@ func getSyslogLevel(lvl common.LogLevel) syslog.Priority {
func (l *SysLogger) write(lvl string, format string, args ...interface{}) {
_, fn, ln, _ := runtime.Caller(3)
msg := fmt.Sprintf(format, args...)
l.logger.Print(lvl, " [", filepath.Base(fn), " (", ln, ")]: ", msg)
l.logger.Print("[", common.MountPath, "] ", lvl, " [", filepath.Base(fn), " (", ln, ")]: ", msg)
}
func (l *SysLogger) Debug(format string, args ...interface{}) {

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -47,7 +47,7 @@ import (
// Standard config default values
const (
blobfuse2Version_ = "2.2.0-preview.2"
blobfuse2Version_ = "2.3.3"
DefaultMaxLogFileSize = 512
DefaultLogFileCount = 10
@ -64,9 +64,14 @@ const (
DefaultAllowOtherPermissionBits os.FileMode = 0777
MbToBytes = 1024 * 1024
GbToBytes = 1024 * MbToBytes
BfuseStats = "blobfuse_stats"
FuseAllowedFlags = "invalid FUSE options. Allowed FUSE configurations are: `-o attr_timeout=TIMEOUT`, `-o negative_timeout=TIMEOUT`, `-o entry_timeout=TIMEOUT` `-o allow_other`, `-o allow_root`, `-o umask=PERMISSIONS -o default_permissions`, `-o ro`"
UserAgentHeader = "User-Agent"
BlockCacheRWErrMsg = "Notice: The random write flow using block cache is temporarily blocked due to potential data integrity issues. This is a precautionary measure. \nIf you see this message, contact blobfusedev@microsoft.com or create a GitHub issue. We're working on a fix. More details: https://aka.ms/blobfuse2warnings."
)
func FuseIgnoredFlags() []string {
@ -88,6 +93,8 @@ var BfsDisabled = false
var TransferPipe = "/tmp/transferPipe"
var PollingPipe = "/tmp/pollPipe"
var MountPath string
// LogLevel enum
type LogLevel int
@ -305,3 +312,13 @@ func GetIdLength(id string) int64 {
existingBlockId, _ := base64.StdEncoding.DecodeString(id)
return int64(len(existingBlockId))
}
func init() {
val, present := os.LookupEnv("HOME")
if !present {
val = "./"
}
DefaultWorkDir = filepath.Join(val, ".blobfuse2")
DefaultLogFilePath = filepath.Join(DefaultWorkDir, "blobfuse2.log")
StatsConfigFilePath = filepath.Join(DefaultWorkDir, "stats_monitor.cfg")
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -34,6 +34,8 @@
package common
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
@ -95,3 +97,11 @@ func (suite *typesTestSuite) TestFindBlocksToModify() {
suite.assert.Equal(largerThanFile, true)
suite.assert.Equal(appendOnly, true)
}
func (suite *typesTestSuite) TestDefaultWorkDir() {
val, err := os.UserHomeDir()
suite.assert.Nil(err)
suite.assert.Equal(DefaultWorkDir, filepath.Join(val, ".blobfuse2"))
suite.assert.Equal(DefaultLogFilePath, filepath.Join(val, ".blobfuse2/blobfuse2.log"))
suite.assert.Equal(StatsConfigFilePath, filepath.Join(val, ".blobfuse2/stats_monitor.cfg"))
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -85,21 +85,80 @@ func IsDirectoryMounted(path string) bool {
return false
}
func IsMountActive(path string) (bool, error) {
// Get the process details for this path using ps -aux
var out bytes.Buffer
cmd := exec.Command("pidof", "blobfuse2")
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
if err.Error() == "exit status 1" {
return false, nil
} else {
return true, fmt.Errorf("failed to get pid of blobfuse2 [%v]", err.Error())
}
}
// out contains the list of pids of the processes that are running
pidString := strings.Replace(out.String(), "\n", " ", -1)
pids := strings.Split(pidString, " ")
for _, pid := range pids {
// Get the mount path for this pid
// For this we need to check the command line arguments given to this command
// If the path is same then we need to return true
if pid == "" {
continue
}
cmd = exec.Command("ps", "-o", "args=", "-p", pid)
out.Reset()
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return true, fmt.Errorf("failed to get command line arguments for pid %s [%v]", pid, err.Error())
}
if strings.Contains(out.String(), path) {
return true, nil
}
}
return false, nil
}
// IsDirectoryEmpty is a utility function that returns true if the directory at that path is empty or not
func IsDirectoryEmpty(path string) bool {
if !DirectoryExists(path) {
// Directory does not exists so safe to assume its empty
return true
}
f, _ := os.Open(path)
defer f.Close()
_, err := f.Readdirnames(1)
if err == io.EOF {
return true
// If there is nothing in the directory then it is empty
return err == io.EOF
}
func TempCacheCleanup(path string) error {
if !IsDirectoryEmpty(path) {
// List the first level children of the directory
dirents, err := os.ReadDir(path)
if err != nil {
// Failed to list, return back error
return fmt.Errorf("failed to list directory contents : %s", err.Error())
}
// Delete all first level children with their hierarchy
for _, entry := range dirents {
os.RemoveAll(filepath.Join(path, entry.Name()))
}
}
if err != nil && err.Error() == "invalid argument" {
fmt.Println("Broken Mount : First Unmount ", path)
}
return false
return nil
}
// DirectoryExists is a utility function that returns true if the directory at that path exists and returns false if it does not exist.
@ -383,3 +442,36 @@ func GetDiskUsageFromStatfs(path string) (float64, float64, error) {
usedSpace := float64(totalSpace - availableSpace)
return usedSpace, float64(usedSpace) / float64(totalSpace) * 100, nil
}
func GetFuseMinorVersion() int {
var out bytes.Buffer
cmd := exec.Command("fusermount3", "--version")
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return 0
}
output := strings.Split(out.String(), ":")
if len(output) < 2 {
return 0
}
version := strings.Trim(output[1], " ")
if version == "" {
return 0
}
output = strings.Split(version, ".")
if len(output) < 2 {
return 0
}
val, err := strconv.Atoi(output[1])
if err != nil {
return 0
}
return val
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -34,9 +34,11 @@
package common
import (
"bytes"
"fmt"
"math/rand"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
@ -67,6 +69,74 @@ func TestUtil(t *testing.T) {
suite.Run(t, new(utilTestSuite))
}
func (suite *typesTestSuite) TestIsMountActiveNoMount() {
var out bytes.Buffer
cmd := exec.Command("pidof", "blobfuse2")
cmd.Stdout = &out
err := cmd.Run()
suite.assert.Equal("exit status 1", err.Error())
res, err := IsMountActive("/mnt/blobfuse")
suite.assert.Nil(err)
suite.assert.False(res)
}
func (suite *typesTestSuite) TestIsMountActiveTwoMounts() {
var out bytes.Buffer
// Define the file name and the content you want to write
fileName := "config.yaml"
lbpath := filepath.Join(home_dir, "lbpath")
os.MkdirAll(lbpath, 0777)
defer os.RemoveAll(lbpath)
content := "components:\n" +
" - libfuse\n" +
" - loopbackfs\n\n" +
"loopbackfs:\n" +
" path: " + lbpath + "\n\n"
mntdir := filepath.Join(home_dir, "mountdir")
os.MkdirAll(mntdir, 0777)
defer os.RemoveAll(mntdir)
dir, err := os.Getwd()
suite.assert.Nil(err)
configFile := filepath.Join(dir, "config.yaml")
// Create or open the file. If it doesn't exist, it will be created.
file, err := os.Create(fileName)
suite.assert.Nil(err)
defer file.Close() // Ensure the file is closed after we're done
// Write the content to the file
_, err = file.WriteString(content)
suite.assert.Nil(err)
err = os.Chdir("..")
suite.assert.Nil(err)
dir, err = os.Getwd()
suite.assert.Nil(err)
binary := filepath.Join(dir, "blobfuse2")
cmd := exec.Command(binary, mntdir, "--config-file", configFile)
cmd.Stdout = &out
err = cmd.Run()
suite.assert.Nil(err)
res, err := IsMountActive(mntdir)
suite.assert.Nil(err)
suite.assert.True(res)
res, err = IsMountActive("/mnt/blobfuse")
suite.assert.Nil(err)
suite.assert.False(res)
cmd = exec.Command(binary, "unmount", mntdir)
cmd.Stdout = &out
err = cmd.Run()
suite.assert.Nil(err)
}
func (suite *typesTestSuite) TestDirectoryExists() {
rand := randomString(8)
dir := filepath.Join(home_dir, "dir"+rand)
@ -229,3 +299,44 @@ func (suite *utilTestSuite) TestGetDiskUsage() {
suite.assert.NotEqual(usagePercent, 100)
_ = os.RemoveAll(filepath.Join(pwd, "util_test"))
}
func (suite *utilTestSuite) TestDirectoryCleanup() {
dirName := "./TestDirectoryCleanup"
// Directory does not exists
exists := DirectoryExists(dirName)
suite.assert.False(exists)
err := TempCacheCleanup(dirName)
suite.assert.Nil(err)
// Directory exists but is empty
_ = os.MkdirAll(dirName, 0777)
exists = DirectoryExists(dirName)
suite.assert.True(exists)
empty := IsDirectoryEmpty(dirName)
suite.assert.True(empty)
err = TempCacheCleanup(dirName)
suite.assert.Nil(err)
// Directory exists and is not empty
_ = os.MkdirAll(dirName+"/A", 0777)
exists = DirectoryExists(dirName)
suite.assert.True(exists)
empty = IsDirectoryEmpty(dirName)
suite.assert.False(empty)
err = TempCacheCleanup(dirName)
suite.assert.Nil(err)
_ = os.RemoveAll(dirName)
}
func (suite *utilTestSuite) TestGetFuseMinorVersion() {
i := GetFuseMinorVersion()
suite.assert.GreaterOrEqual(i, 0)
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -41,6 +41,7 @@ import (
const Blobfuse2ListContainerURL = "https://blobfuse2.blob.core.windows.net/release"
const BlobFuse2WarningsURL = "https://aka.ms/blobfuse2warnings"
const BlobFuse2BlockingURL = "https://aka.ms/blobfuse2blockers"
type Version struct {
segments []int64
@ -49,26 +50,26 @@ type Version struct {
}
// To keep the code simple, we assume we only use a simple subset of semantic versions.
// Namely, the version is either a normal stable version, or a pre-release version with '-preview' attached.
// Examples: 10.1.0, 11.2.0-preview.1
// Namely, the version is either a normal stable version, or a pre-release version with '~preview' or '-preview' attached.
// Examples: 10.1.0, 11.2.0-preview.1, 11.2.0~preview.1
func ParseVersion(raw string) (*Version, error) {
const standardError = "invalid version string"
rawSegments := strings.Split(raw, ".")
if !(len(rawSegments) == 3 || (len(rawSegments) == 4 && strings.Contains(rawSegments[2], "-"))) {
if !(len(rawSegments) == 3 || (len(rawSegments) == 4 && (strings.Contains(rawSegments[2], "-") || strings.Contains(rawSegments[2], "~")))) {
return nil, errors.New(standardError)
}
v := &Version{segments: make([]int64, 4), original: raw}
for i, str := range rawSegments {
//For any case such as SemVer-preview.1, SemVer-beta.1, SemVer-alpha.1 this would be true, and we assume the version to be a preview version.
if strings.Contains(str, "-") {
if strings.Contains(str, "-") || strings.Contains(str, "~") {
if i != 2 {
return nil, errors.New(standardError)
}
v.preview = true
//Splitting the string into two pieces and extracting SemVer which is always at 0th index
str = strings.Split(str, "-")[0]
str = strings.Split(strings.Split(str, "-")[0], "~")[0]
}
val, err := strconv.ParseInt(str, 10, 64)

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -58,6 +58,14 @@ func (vSuite *versionTestSuite) TestVersionEquality() {
v1, _ = ParseVersion("10.0.0-beta.5")
v2, _ = ParseVersion("10.0.0-beta.5")
assert.Equal(v1.compare(*v2), 0)
v1, _ = ParseVersion("10.0.0~preview.1")
v2, _ = ParseVersion("10.0.0~preview.1")
assert.Equal(v1.compare(*v2), 0)
v1, _ = ParseVersion("10.0.0~beta.5")
v2, _ = ParseVersion("10.0.0~beta.5")
assert.Equal(v1.compare(*v2), 0)
}
func (vSuite *versionTestSuite) TestVersionSuperiority() {
@ -82,6 +90,18 @@ func (vSuite *versionTestSuite) TestVersionSuperiority() {
v1, _ = ParseVersion("15.5.5-preview.6")
v2, _ = ParseVersion("15.5.5-preview.3")
assert.Equal(v1.compare(*v2), 1)
v1, _ = ParseVersion("15.5.6")
v2, _ = ParseVersion("15.5.6~preview.3")
assert.Equal(v1.compare(*v2), 1)
v1, _ = ParseVersion("15.5.6~preview.6")
v2, _ = ParseVersion("15.5.6~preview.3")
assert.Equal(v1.compare(*v2), 1)
v1, _ = ParseVersion("15.5.7~preview.6")
v2, _ = ParseVersion("15.5.7-preview.3")
assert.Equal(v1.compare(*v2), 1)
}
func (vSuite *versionTestSuite) TestVersionInferiority() {
@ -106,6 +126,18 @@ func (vSuite *versionTestSuite) TestVersionInferiority() {
v1, _ = ParseVersion("15.5.5-preview.3")
v2, _ = ParseVersion("15.5.5-preview.6")
assert.Equal(v1.compare(*v2), -1)
v1, _ = ParseVersion("15.5.6~preview.6")
v2, _ = ParseVersion("15.5.6")
assert.Equal(v1.compare(*v2), -1)
v1, _ = ParseVersion("15.5.6~preview.3")
v2, _ = ParseVersion("15.5.6~preview.6")
assert.Equal(v1.compare(*v2), -1)
v1, _ = ParseVersion("15.5.7-preview.3")
v2, _ = ParseVersion("15.5.7~preview.6")
assert.Equal(v1.compare(*v2), -1)
}
func TestVersionTestSuite(t *testing.T) {

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -153,8 +153,8 @@ func (ac *AttrCache) Configure(_ bool) error {
ac.noSymlinks = conf.NoSymlinks
log.Info("AttrCache::Configure : cache-timeout %d, symlink %t, cache-on-list %t",
ac.cacheTimeout, ac.noSymlinks, ac.cacheOnList)
log.Info("AttrCache::Configure : cache-timeout %d, symlink %t, cache-on-list %t, max-files %d",
ac.cacheTimeout, ac.noSymlinks, ac.cacheOnList, ac.maxFiles)
return nil
}
@ -232,7 +232,7 @@ func (ac *AttrCache) CreateDir(options internal.CreateDirOptions) error {
log.Trace("AttrCache::CreateDir : %s", options.Name)
err := ac.NextComponent().CreateDir(options)
if err == nil {
if err == nil || err == syscall.EEXIST {
ac.cacheLock.RLock()
defer ac.cacheLock.RUnlock()
ac.invalidatePath(options.Name)
@ -568,6 +568,18 @@ func (ac *AttrCache) Chown(options internal.ChownOptions) error {
return err
}
func (ac *AttrCache) CommitData(options internal.CommitDataOptions) error {
log.Trace("AttrCache::CommitData : %s", options.Name)
err := ac.NextComponent().CommitData(options)
if err == nil {
ac.cacheLock.RLock()
defer ac.cacheLock.RUnlock()
ac.invalidatePath(options.Name)
}
return err
}
// ------------------------- Factory -------------------------------------------
// Pipeline will call this method to create your object, initialize your variables here

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -34,6 +34,8 @@
package azstorage
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
"github.com/Azure/azure-storage-fuse/v2/common/log"
)
@ -71,7 +73,7 @@ type azAuthConfig struct {
type azAuth interface {
getEndpoint() string
setOption(key, value string)
getCredential() interface{}
getServiceClient(stConfig *AzStorageConfig) (interface{}, error)
}
// getAzAuth returns a new AzAuth
@ -89,14 +91,14 @@ func getAzAuth(config azAuthConfig) azAuth {
config.Endpoint)
if EAccountType.BLOCK() == config.AccountType {
return getAzAuthBlob(config)
return getAzBlobAuth(config)
} else if EAccountType.ADLS() == config.AccountType {
return getAzAuthBfs(config)
return getAzDatalakeAuth(config)
}
return nil
}
func getAzAuthBlob(config azAuthConfig) azAuth {
func getAzBlobAuth(config azAuthConfig) azAuth {
base := azAuthBase{config: config}
if config.AuthMode == EAuthType.KEY() {
return &azAuthBlobKey{
@ -122,40 +124,52 @@ func getAzAuthBlob(config azAuthConfig) azAuth {
azAuthBase: base,
},
}
} else if config.AuthMode == EAuthType.AZCLI() {
return &azAuthBlobCLI{
azAuthCLI{
azAuthBase: base,
},
}
} else {
log.Crit("azAuth::getAzAuthBlob : Auth type %s not supported. Failed to create Auth object", config.AuthMode)
log.Crit("azAuth::getAzBlobAuth : Auth type %s not supported. Failed to create Auth object", config.AuthMode)
}
return nil
}
func getAzAuthBfs(config azAuthConfig) azAuth {
func getAzDatalakeAuth(config azAuthConfig) azAuth {
base := azAuthBase{config: config}
if config.AuthMode == EAuthType.KEY() {
return &azAuthBfsKey{
return &azAuthDatalakeKey{
azAuthKey{
azAuthBase: base,
},
}
} else if config.AuthMode == EAuthType.SAS() {
return &azAuthBfsSAS{
return &azAuthDatalakeSAS{
azAuthSAS{
azAuthBase: base,
},
}
} else if config.AuthMode == EAuthType.MSI() {
return &azAuthBfsMSI{
return &azAuthDatalakeMSI{
azAuthMSI{
azAuthBase: base,
},
}
} else if config.AuthMode == EAuthType.SPN() {
return &azAuthBfsSPN{
return &azAuthDatalakeSPN{
azAuthSPN{
azAuthBase: base,
},
}
} else if config.AuthMode == EAuthType.AZCLI() {
return &azAuthDatalakeCLI{
azAuthCLI{
azAuthBase: base,
},
}
} else {
log.Crit("azAuth::getAzAuthBfs : Auth type %s not supported. Failed to create Auth object", config.AuthMode)
log.Crit("azAuth::getAzDatalakeAuth : Auth type %s not supported. Failed to create Auth object", config.AuthMode)
}
return nil
}
@ -171,3 +185,32 @@ func (base *azAuthBase) setOption(_, _ string) {}
func (base *azAuthBase) getEndpoint() string {
return base.config.Endpoint
}
// this type is included in OAuth modes - spn and msi
type azOAuthBase struct{}
// TODO:: track2 : check ActiveDirectoryEndpoint and AuthResource part
func (base *azOAuthBase) getAzIdentityClientOptions(config *azAuthConfig) azcore.ClientOptions {
if config == nil {
log.Err("azAuth::getAzIdentityClientOptions : azAuthConfig is nil")
return azcore.ClientOptions{}
}
opts := azcore.ClientOptions{
Cloud: getCloudConfiguration(config.Endpoint),
Logging: getSDKLogOptions(),
}
if config.ActiveDirectoryEndpoint != "" {
log.Debug("azAuthBase::getAzIdentityClientOptions : ActiveDirectoryAuthorityHost = %s", config.ActiveDirectoryEndpoint)
opts.Cloud.ActiveDirectoryAuthorityHost = config.ActiveDirectoryEndpoint
}
if config.AuthResource != "" {
if val, ok := opts.Cloud.Services[cloud.ResourceManager]; ok {
log.Debug("azAuthBase::getAzIdentityClientOptions : AuthResource = %s", config.AuthResource)
val.Endpoint = config.AuthResource
opts.Cloud.Services[cloud.ResourceManager] = val
}
}
return opts
}

Просмотреть файл

@ -12,7 +12,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -74,6 +74,7 @@ type storageTestConfiguration struct {
SpnTenantId string `json:"spn-tenant"`
SpnClientSecret string `json:"spn-secret"`
SkipMsi bool `json:"skip-msi"`
SkipAzCLI bool `json:"skip-azcli"`
ProxyAddress string `json:"proxy-address"`
}
@ -458,7 +459,7 @@ func (suite *authTestSuite) TestBlockSasKeySetOption() {
assert.Fail("TestBlockSasKeySetOption : Failed to create Storage object")
}
stg.SetupPipeline()
stg.NewCredentialKey("saskey", storageTestConfigurationParameters.BlockSas)
stg.UpdateServiceClient("saskey", storageTestConfigurationParameters.BlockSas)
if err := stg.SetupPipeline(); err != nil {
assert.Fail("TestBlockSasKeySetOption : Failed to setup pipeline")
}
@ -591,7 +592,7 @@ func (suite *authTestSuite) TestAdlsSasKeySetOption() {
assert.Fail("TestBlockSasKeySetOption : Failed to create Storage object")
}
stg.SetupPipeline()
stg.NewCredentialKey("saskey", storageTestConfigurationParameters.AdlsSas)
stg.UpdateServiceClient("saskey", storageTestConfigurationParameters.AdlsSas)
if err := stg.SetupPipeline(); err != nil {
assert.Fail("TestBlockSasKeySetOption : Failed to setup pipeline")
}
@ -686,113 +687,60 @@ func (suite *authTestSuite) TestAdlskMsiResId() {
}
}
func (suite *authTestSuite) TestBlockInvalidSpn() {
func (suite *authTestSuite) TestBlockAzCLI() {
defer suite.cleanupTest()
stgConfig := AzStorageConfig{
container: storageTestConfigurationParameters.BlockContainer,
authConfig: azAuthConfig{
AuthMode: EAuthType.SPN(),
AccountType: EAccountType.BLOCK(),
AccountName: storageTestConfigurationParameters.BlockAccount,
ClientID: storageTestConfigurationParameters.SpnClientId,
TenantID: storageTestConfigurationParameters.SpnTenantId,
ClientSecret: "",
Endpoint: generateEndpoint(false, storageTestConfigurationParameters.BlockAccount, EAccountType.BLOCK()),
AuthMode: EAuthType.AZCLI(),
AccountType: EAccountType.BLOCK(),
AccountName: storageTestConfigurationParameters.BlockAccount,
Endpoint: generateEndpoint(false, storageTestConfigurationParameters.BlockAccount, EAccountType.BLOCK()),
},
}
assert := assert.New(suite.T())
stg := NewAzStorageConnection(stgConfig)
if stg == nil {
assert.Fail("TestBlockInvalidSpn : Failed to create Storage object")
}
if err := stg.SetupPipeline(); err == nil {
assert.Fail("TestBlockInvalidSpn : Setup pipeline even though spn is invalid")
assert.NotNil(stg)
err := stg.SetupPipeline()
assert.Nil(err)
err = stg.TestPipeline()
if storageTestConfigurationParameters.SkipAzCLI {
// error is returned when azcli is not installed or logged out
assert.NotNil(err)
} else {
assert.Nil(err)
}
}
func (suite *authTestSuite) TestBlockInvalidTokenPathSpn() {
defer suite.cleanupTest()
_ = os.WriteFile("newtoken.txt", []byte("abcdef"), 0777)
defer os.Remove("newtoken.txt")
stgConfig := AzStorageConfig{
container: storageTestConfigurationParameters.BlockContainer,
authConfig: azAuthConfig{
AuthMode: EAuthType.SPN(),
AccountType: EAccountType.BLOCK(),
AccountName: storageTestConfigurationParameters.BlockAccount,
ClientID: storageTestConfigurationParameters.SpnClientId,
TenantID: storageTestConfigurationParameters.SpnTenantId,
ClientSecret: "",
Endpoint: generateEndpoint(false, storageTestConfigurationParameters.BlockAccount, EAccountType.BLOCK()),
OAuthTokenFilePath: "newtoken.txt",
},
}
assert := assert.New(suite.T())
stg := NewAzStorageConnection(stgConfig)
if stg == nil {
assert.Fail("TestBlockInvalidSpn : Failed to create Storage object")
}
_ = stg.SetupPipeline()
}
func (suite *authTestSuite) TestBlockSpn() {
defer suite.cleanupTest()
stgConfig := AzStorageConfig{
container: storageTestConfigurationParameters.BlockContainer,
authConfig: azAuthConfig{
AuthMode: EAuthType.SPN(),
AccountType: EAccountType.BLOCK(),
AccountName: storageTestConfigurationParameters.BlockAccount,
ClientID: storageTestConfigurationParameters.SpnClientId,
TenantID: storageTestConfigurationParameters.SpnTenantId,
ClientSecret: storageTestConfigurationParameters.SpnClientSecret,
Endpoint: generateEndpoint(false, storageTestConfigurationParameters.BlockAccount, EAccountType.BLOCK()),
},
}
suite.validateStorageTest("TestBlockSpn", stgConfig)
}
func (suite *authTestSuite) TestAdlsInvalidSpn() {
func (suite *authTestSuite) TestAdlsAzCLI() {
defer suite.cleanupTest()
stgConfig := AzStorageConfig{
container: storageTestConfigurationParameters.AdlsContainer,
authConfig: azAuthConfig{
AuthMode: EAuthType.SPN(),
AccountType: EAccountType.ADLS(),
AccountName: storageTestConfigurationParameters.AdlsAccount,
ClientID: storageTestConfigurationParameters.SpnClientId,
TenantID: storageTestConfigurationParameters.SpnTenantId,
ClientSecret: "",
Endpoint: generateEndpoint(false, storageTestConfigurationParameters.AdlsAccount, EAccountType.ADLS()),
AuthMode: EAuthType.AZCLI(),
AccountType: EAccountType.ADLS(),
AccountName: storageTestConfigurationParameters.AdlsAccount,
Endpoint: generateEndpoint(false, storageTestConfigurationParameters.AdlsAccount, EAccountType.ADLS()),
},
}
assert := assert.New(suite.T())
stg := NewAzStorageConnection(stgConfig)
if stg == nil {
assert.Fail("TestAdlsInvalidSpn : Failed to create Storage object")
}
if err := stg.SetupPipeline(); err == nil {
assert.Fail("TestAdlsInvalidSpn : Setup pipeline even though spn is invalid")
}
}
assert.NotNil(stg)
func (suite *authTestSuite) TestAdlsSpn() {
defer suite.cleanupTest()
stgConfig := AzStorageConfig{
container: storageTestConfigurationParameters.AdlsContainer,
authConfig: azAuthConfig{
AuthMode: EAuthType.SPN(),
AccountType: EAccountType.ADLS(),
AccountName: storageTestConfigurationParameters.AdlsAccount,
ClientID: storageTestConfigurationParameters.SpnClientId,
TenantID: storageTestConfigurationParameters.SpnTenantId,
ClientSecret: storageTestConfigurationParameters.SpnClientSecret,
Endpoint: generateEndpoint(false, storageTestConfigurationParameters.AdlsAccount, EAccountType.ADLS()),
},
err := stg.SetupPipeline()
assert.Nil(err)
err = stg.TestPipeline()
if storageTestConfigurationParameters.SkipAzCLI {
// error is returned when azcli is not installed or logged out
assert.NotNil(err)
} else {
assert.Nil(err)
}
suite.validateStorageTest("TestAdlsSpn", stgConfig)
}
func (suite *authTestSuite) cleanupTest() {

Просмотреть файл

@ -0,0 +1,107 @@
/*
_____ _____ _____ ____ ______ _____ ------
| | | | | | | | | | | | |
| | | | | | | | | | | | |
| --- | | | | |-----| |---- | | |-----| |----- ------
| | | | | | | | | | | | |
| ____| |_____ | ____| | ____| | |_____| _____| |_____ |_____
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE
*/
package azstorage
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
serviceBfs "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service"
"github.com/Azure/azure-storage-fuse/v2/common/log"
)
// Verify that the Auth implement the correct AzAuth interfaces
var _ azAuth = &azAuthBlobCLI{}
var _ azAuth = &azAuthDatalakeCLI{}
type azAuthCLI struct {
azAuthBase
}
func (azcli *azAuthCLI) getTokenCredential() (azcore.TokenCredential, error) {
cred, err := azidentity.NewAzureCLICredential(nil)
return cred, err
}
type azAuthBlobCLI struct {
azAuthCLI
}
// getServiceClient : returns service client for blob using azcli as authentication mode
func (azcli *azAuthBlobCLI) getServiceClient(stConfig *AzStorageConfig) (interface{}, error) {
cred, err := azcli.getTokenCredential()
if err != nil {
log.Err("azAuthBlobCLI::getServiceClient : Failed to get token credential from azcli [%s]", err.Error())
return nil, err
}
opts, err := getAzBlobServiceClientOptions(stConfig)
if err != nil {
log.Err("azAuthBlobCLI::getServiceClient : Failed to create client options [%s]", err.Error())
return nil, err
}
svcClient, err := service.NewClient(azcli.config.Endpoint, cred, opts)
if err != nil {
log.Err("azAuthBlobCLI::getServiceClient : Failed to create service client [%s]", err.Error())
}
return svcClient, err
}
type azAuthDatalakeCLI struct {
azAuthCLI
}
// getServiceClient : returns service client for datalake using azcli as authentication mode
func (azcli *azAuthDatalakeCLI) getServiceClient(stConfig *AzStorageConfig) (interface{}, error) {
cred, err := azcli.getTokenCredential()
if err != nil {
log.Err("azAuthDatalakeCLI::getServiceClient : Failed to get token credential from azcli [%s]", err.Error())
return nil, err
}
opts, err := getAzDatalakeServiceClientOptions(stConfig)
if err != nil {
log.Err("azAuthDatalakeCLI::getServiceClient : Failed to create client options [%s]", err.Error())
return nil, err
}
svcClient, err := serviceBfs.NewClient(azcli.config.Endpoint, cred, opts)
if err != nil {
log.Err("azAuthDatalakeCLI::getServiceClient : Failed to create service client [%s]", err.Error())
}
return svcClient, err
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -34,15 +34,18 @@
package azstorage
import (
"github.com/Azure/azure-storage-fuse/v2/common/log"
"errors"
"github.com/Azure/azure-storage-azcopy/v10/azbfs"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake"
serviceBfs "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service"
"github.com/Azure/azure-storage-fuse/v2/common/log"
)
// Verify that the Auth implement the correct AzAuth interfaces
var _ azAuth = &azAuthBlobKey{}
var _ azAuth = &azAuthBfsKey{}
var _ azAuth = &azAuthDatalakeKey{}
type azAuthKey struct {
azAuthBase
@ -52,38 +55,60 @@ type azAuthBlobKey struct {
azAuthKey
}
// GetCredential : Gets shared key based storage credentials for blob
func (azkey *azAuthBlobKey) getCredential() interface{} {
// getServiceClient : returns shared key based service client for blob
func (azkey *azAuthBlobKey) getServiceClient(stConfig *AzStorageConfig) (interface{}, error) {
if azkey.config.AccountKey == "" {
log.Err("azAuthBlobKey::getCredential : Shared key for account is empty, cannot authenticate user")
return nil
log.Err("azAuthBlobKey::getServiceClient : Shared key for account is empty, cannot authenticate user")
return nil, errors.New("shared key for account is empty, cannot authenticate user")
}
credential, err := azblob.NewSharedKeyCredential(
azkey.config.AccountName,
azkey.config.AccountKey)
cred, err := azblob.NewSharedKeyCredential(azkey.config.AccountName, azkey.config.AccountKey)
if err != nil {
log.Err("azAuthBlobKey::getCredential : Failed to create shared key credentials")
return nil
log.Err("azAuthBlobKey::getServiceClient : Failed to create shared key credential [%s]", err.Error())
return nil, err
}
return credential
opts, err := getAzBlobServiceClientOptions(stConfig)
if err != nil {
log.Err("azAuthBlobKey::getServiceClient : Failed to create client options [%s]", err.Error())
return nil, err
}
svcClient, err := service.NewClientWithSharedKeyCredential(azkey.config.Endpoint, cred, opts)
if err != nil {
log.Err("azAuthBlobKey::getServiceClient : Failed to create service client [%s]", err.Error())
}
return svcClient, err
}
type azAuthBfsKey struct {
type azAuthDatalakeKey struct {
azAuthKey
}
// GetCredential : Gets shared key based storage credentials for datalake
func (azkey *azAuthBfsKey) getCredential() interface{} {
// getServiceClient : returns shared key based service client for datalake
func (azkey *azAuthDatalakeKey) getServiceClient(stConfig *AzStorageConfig) (interface{}, error) {
if azkey.config.AccountKey == "" {
log.Err("azAuthBfsKey::getCredential : Shared key for account is empty, cannot authenticate user")
return nil
log.Err("azAuthDatalakeKey::getServiceClient : Shared key for account is empty, cannot authenticate user")
return nil, errors.New("shared key for account is empty, cannot authenticate user")
}
credential := azbfs.NewSharedKeyCredential(
azkey.config.AccountName,
azkey.config.AccountKey)
cred, err := azdatalake.NewSharedKeyCredential(azkey.config.AccountName, azkey.config.AccountKey)
if err != nil {
log.Err("azAuthDatalakeKey::getServiceClient : Failed to create shared key credential [%s]", err.Error())
return nil, err
}
return credential
opts, err := getAzDatalakeServiceClientOptions(stConfig)
if err != nil {
log.Err("azAuthDatalakeKey::getServiceClient : Failed to create client options [%s]", err.Error())
return nil, err
}
svcClient, err := serviceBfs.NewClientWithSharedKeyCredential(azkey.config.Endpoint, cred, opts)
if err != nil {
log.Err("azAuthDatalakeKey::getServiceClient : Failed to create service client [%s]", err.Error())
}
return svcClient, err
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -36,74 +36,52 @@ package azstorage
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"os"
"os/exec"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
serviceBfs "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service"
"github.com/Azure/azure-storage-fuse/v2/common/log"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/azure-storage-azcopy/v10/azbfs"
"github.com/Azure/azure-storage-azcopy/v10/common"
"github.com/Azure/azure-storage-blob-go/azblob"
)
// Verify that the Auth implement the correct AzAuth interfaces
var _ azAuth = &azAuthBlobMSI{}
var _ azAuth = &azAuthBfsMSI{}
var _ azAuth = &azAuthDatalakeMSI{}
type azAuthMSI struct {
azAuthBase
azOAuthBase
}
func getNextExpiryTimer(token *adal.Token) time.Duration {
delay := time.Duration(5+rand.Intn(120)) * time.Second
return time.Until(token.Expires()) - delay
func (azmsi *azAuthMSI) getTokenCredential() (azcore.TokenCredential, error) {
opts := azmsi.getAzIdentityClientOptions(&azmsi.config)
msiOpts := &azidentity.ManagedIdentityCredentialOptions{
ClientOptions: opts,
}
if azmsi.config.ApplicationID != "" {
msiOpts.ID = (azidentity.ClientID)(azmsi.config.ApplicationID)
} else if azmsi.config.ResourceID != "" {
msiOpts.ID = (azidentity.ResourceID)(azmsi.config.ResourceID)
} else if azmsi.config.ObjectID != "" {
// login using azcli
return azmsi.getTokenCredentialUsingCLI()
}
cred, err := azidentity.NewManagedIdentityCredential(msiOpts)
return cred, err
}
// fetchToken : Generates a token based on the config
func (azmsi *azAuthMSI) fetchToken(endpoint string) (*common.OAuthTokenInfo, error) {
// Resource string is fixed and has no relation with any of the user inputs
// This is not the resource URL, rather a way to identify the resource type and tenant
// There are two options in the structure datalake and storage but datalake is not populated
// and does not work in all types of clouds (US, German, China etc).
// resource := azure.PublicCloud.ResourceIdentifiers.Datalake
// resource := azure.PublicCloud.ResourceIdentifiers.Storage
oAuthTokenInfo := &common.OAuthTokenInfo{
Identity: true,
IdentityInfo: common.IdentityInfo{
ClientID: azmsi.config.ApplicationID,
ObjectID: azmsi.config.ObjectID,
MSIResID: azmsi.config.ResourceID},
ActiveDirectoryEndpoint: endpoint,
}
func (azmsi *azAuthMSI) getTokenCredentialUsingCLI() (azcore.TokenCredential, error) {
command := "az login --identity --username " + azmsi.config.ObjectID
token, err := oAuthTokenInfo.GetNewTokenFromMSI(context.Background())
if err != nil {
return nil, err
}
oAuthTokenInfo.Token = *token
return oAuthTokenInfo, nil
}
// fetchTokenFromCLI : Generates a token using the Az Cli
func (azmsi *azAuthMSI) fetchTokenFromCLI() (*common.OAuthTokenInfo, error) {
resource := "https://storage.azure.com"
if azmsi.config.AuthResource != "" {
resource = azmsi.config.AuthResource
}
commandLine := "az account get-access-token -o json --resource " + resource
if azmsi.config.TenantID != "" {
commandLine += " --tenant " + azmsi.config.TenantID
}
cliCmd := exec.CommandContext(context.Background(), "/bin/sh", "-c", commandLine)
cliCmd := exec.CommandContext(context.Background(), "/bin/sh", "-c", command)
cliCmd.Dir = "/bin"
cliCmd.Env = os.Environ()
@ -119,232 +97,64 @@ func (azmsi *azAuthMSI) fetchTokenFromCLI() (*common.OAuthTokenInfo, error) {
if msg == "" {
msg = err.Error()
}
return nil, fmt.Errorf(msg)
return nil, fmt.Errorf("%s", msg)
}
log.Info("azAuthMSI::fetchTokenFromCLI : Successfully fetched token from Azure CLI : %s", output)
t := struct {
AccessToken string `json:"accessToken"`
Authority string `json:"_authority"`
ClientID string `json:"_clientId"`
ExpiresOn string `json:"expiresOn"`
IdentityProvider string `json:"identityProvider"`
IsMRRT bool `json:"isMRRT"`
RefreshToken string `json:"refreshToken"`
Resource string `json:"resource"`
TokenType string `json:"tokenType"`
UserID string `json:"userId"`
}{}
log.Info("azAuthMSI::getTokenCredentialUsingCLI : Successfully logged in using Azure CLI")
log.Debug("azAuthMSI::getTokenCredentialUsingCLI : Output: %s", output)
err = json.Unmarshal(output, &t)
if err != nil {
return nil, err
}
// the Azure CLI's "expiresOn" is local time
_, err = time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local)
if err != nil {
return nil, fmt.Errorf("error parsing token expiration time %q: %v", t.ExpiresOn, err)
}
tokenInfo := &common.OAuthTokenInfo{
Token: adal.Token{
AccessToken: t.AccessToken,
RefreshToken: t.RefreshToken,
ExpiresOn: json.Number(t.ExpiresOn),
Resource: t.Resource,
Type: t.TokenType,
},
}
return tokenInfo, nil
cred, err := azidentity.NewAzureCLICredential(nil)
return cred, err
}
type azAuthBlobMSI struct {
azAuthMSI
}
// GetCredential : Get MSI based credentials for blob
func (azmsi *azAuthBlobMSI) getCredential() interface{} {
// Generate the token based on configured inputs
var token *common.OAuthTokenInfo = nil
var err error = nil
norefresh := false
msi_endpoint := os.Getenv("MSI_ENDPOINT")
if strings.Contains(msi_endpoint, "127.0.0.1:") {
// this might be AML workspace so try to get token using CLI
log.Info("azAuthBlobMSI::getCredential : Potential AML workspace detected")
token, err = azmsi.fetchTokenFromCLI()
if err != nil {
log.Err("azAuthBlobMSI::getCredential : %s", err.Error())
} else if token != nil {
norefresh = true
}
}
if token == nil {
log.Debug("azAuthBlobMSI::getCredential : Going for conventional fetchToken. MSI Endpoint : %s", msi_endpoint)
token, err = azmsi.fetchToken(msi_endpoint)
if token == nil {
log.Debug("azAuthBlobMSI::getCredential : Going for conventional fetchToken without endpoint")
token, err = azmsi.fetchToken("")
}
}
// getServiceClient : returns MSI based service client for blob
func (azmsi *azAuthBlobMSI) getServiceClient(stConfig *AzStorageConfig) (interface{}, error) {
cred, err := azmsi.getTokenCredential()
if err != nil {
// fmt.Println(token.AccessToken)
log.Err("azAuthBlobMSI::getCredential : Failed to get credential [%s]", err.Error())
return nil
log.Err("azAuthBlobMSI::getServiceClient : Failed to get token credential from MSI [%s]", err.Error())
return nil, err
}
var tc azblob.TokenCredential
if norefresh {
log.Info("azAuthBlobMSI::getCredential : MSI Token over CLI retrieved %s (%d)", token.AccessToken, token.Expires())
// We are running in cli mode so token can not be refreshed, on expiry just get the new token
tc = azblob.NewTokenCredential(token.AccessToken, func(tc azblob.TokenCredential) time.Duration {
for failCount := 0; failCount < 5; failCount++ {
newToken, err := azmsi.fetchTokenFromCLI()
if err != nil {
log.Err("azAuthBlobMSI::getCredential : Failed to refresh token attempt %d [%s]", failCount, err.Error())
time.Sleep(time.Duration(rand.Intn(5)) * time.Second)
continue
}
// set the new token value
tc.SetToken(newToken.AccessToken)
log.Debug("azAuthBlobMSI::getCredential : MSI Token retrieved %s (%d)", newToken.AccessToken, newToken.Expires())
// Get the next token slightly before the current one expires
return getNextExpiryTimer(&newToken.Token)
}
log.Err("azAuthBlobMSI::getCredential : Failed to refresh token bailing out.")
return 0
})
} else {
log.Info("azAuthBlobMSI::getCredential : MSI Token retrieved %s (%d)", token.AccessToken, token.Expires())
// Using token create the credential object, here also register a call back which refreshes the token
tc = azblob.NewTokenCredential(token.AccessToken, func(tc azblob.TokenCredential) time.Duration {
// token, err := azmsi.fetchToken(msi_endpoint)
// if err != nil {
// log.Err("azAuthBlobMSI::getCredential : Failed to fetch token [%s]", err.Error())
// return 0
// }
for failCount := 0; failCount < 5; failCount++ {
newToken, err := token.Refresh(context.Background())
if err != nil {
log.Err("azAuthBlobMSI::getCredential : Failed to refresh token attempt %d [%s]", failCount, err.Error())
time.Sleep(time.Duration(rand.Intn(5)) * time.Second)
continue
}
// set the new token value
tc.SetToken(newToken.AccessToken)
log.Debug("azAuthBlobMSI::getCredential : MSI Token retrieved %s (%d)", newToken.AccessToken, newToken.Expires())
// Get the next token slightly before the current one expires
return getNextExpiryTimer(newToken)
}
log.Err("azAuthBlobMSI::getCredential : Failed to refresh token bailing out.")
return 0
})
opts, err := getAzBlobServiceClientOptions(stConfig)
if err != nil {
log.Err("azAuthBlobMSI::getServiceClient : Failed to create client options [%s]", err.Error())
return nil, err
}
return tc
svcClient, err := service.NewClient(azmsi.config.Endpoint, cred, opts)
if err != nil {
log.Err("azAuthBlobMSI::getServiceClient : Failed to create service client [%s]", err.Error())
}
return svcClient, err
}
type azAuthBfsMSI struct {
type azAuthDatalakeMSI struct {
azAuthMSI
}
// GetCredential : Get MSI based credentials for datalake
func (azmsi *azAuthBfsMSI) getCredential() interface{} {
// Generate the token based on configured inputs
var token *common.OAuthTokenInfo = nil
var err error = nil
norefresh := false
msi_endpoint := os.Getenv("MSI_ENDPOINT")
if strings.Contains(msi_endpoint, "127.0.0.1:") {
// this might be AML workspace so try to get token using CLI
log.Info("azAuthBfsMSI::getCredential : Potential AML workspace detected")
token, err = azmsi.fetchTokenFromCLI()
if err != nil {
log.Err("azAuthBfsMSI::getCredential : %s", err.Error())
} else if token != nil {
norefresh = true
}
}
if token == nil {
log.Debug("azAuthBfsMSI::getCredential : Going for conventional fetchToken. MSI Endpoint : %s", msi_endpoint)
token, err = azmsi.fetchToken(msi_endpoint)
if token == nil {
log.Debug("azAuthBfsMSI::getCredential : Going for conventional fetchToken without endpoint")
token, err = azmsi.fetchToken("")
}
}
// getServiceClient : returns MSI based service client for datalake
func (azmsi *azAuthDatalakeMSI) getServiceClient(stConfig *AzStorageConfig) (interface{}, error) {
cred, err := azmsi.getTokenCredential()
if err != nil {
// fmt.Println(token.AccessToken)
log.Err("azAuthBfsMSI::getCredential : Failed to get credential [%s]", err.Error())
return nil
log.Err("azAuthDatalakeMSI::getServiceClient : Failed to get token credential from MSI [%s]", err.Error())
return nil, err
}
var tc azbfs.TokenCredential
if norefresh {
log.Info("azAuthBfsMSI::getCredential : MSI Token over CLI retrieved %s (%d)", token.AccessToken, token.Expires())
// We are running in cli mode so token can not be refreshed, on expiry just get the new token
tc = azbfs.NewTokenCredential(token.AccessToken, func(tc azbfs.TokenCredential) time.Duration {
for failCount := 0; failCount < 5; failCount++ {
newToken, err := azmsi.fetchTokenFromCLI()
if err != nil {
log.Err("azAuthBfsMSI::getCredential : Failed to refresh token attempt %d [%s]", failCount, err.Error())
time.Sleep(time.Duration(rand.Intn(5)) * time.Second)
continue
}
// set the new token value
tc.SetToken(newToken.AccessToken)
log.Debug("azAuthBfsMSI::getCredential : MSI Token retrieved %s (%d)", newToken.AccessToken, newToken.Expires())
// Get the next token slightly before the current one expires
return getNextExpiryTimer(&newToken.Token)
}
log.Err("azAuthBfsMSI::getCredential : Failed to refresh token bailing out.")
return 0
})
} else {
log.Info("azAuthBfsMSI::getCredential : MSI Token retrieved %s (%d)", token.AccessToken, token.Expires())
// Using token create the credential object, here also register a call back which refreshes the token
tc = azbfs.NewTokenCredential(token.AccessToken, func(tc azbfs.TokenCredential) time.Duration {
// token, err := azmsi.fetchToken(msi_endpoint)
// if err != nil {
// log.Err("azAuthBfsMSI::getCredential : Failed to fetch token [%s]", err.Error())
// return 0
// }
for failCount := 0; failCount < 5; failCount++ {
newToken, err := token.Refresh(context.Background())
if err != nil {
log.Err("azAuthBfsMSI::getCredential : Failed to refresh token attempt %d [%s]", failCount, err.Error())
time.Sleep(time.Duration(rand.Intn(5)) * time.Second)
continue
}
// set the new token value
tc.SetToken(newToken.AccessToken)
log.Debug("azAuthBfsMSI::getCredential : MSI Token retrieved %s (%d)", newToken.AccessToken, newToken.Expires())
// Get the next token slightly before the current one expires
return getNextExpiryTimer(newToken)
}
log.Err("azAuthBfsMSI::getCredential : Failed to refresh token bailing out.")
return 0
})
opts, err := getAzDatalakeServiceClientOptions(stConfig)
if err != nil {
log.Err("azAuthDatalakeMSI::getServiceClient : Failed to create client options [%s]", err.Error())
return nil, err
}
return tc
svcClient, err := serviceBfs.NewClient(azmsi.config.Endpoint, cred, opts)
if err != nil {
log.Err("azAuthDatalakeMSI::getServiceClient : Failed to create service client [%s]", err.Error())
}
return svcClient, err
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -34,29 +34,22 @@
package azstorage
import (
"fmt"
"errors"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
serviceBfs "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service"
"github.com/Azure/azure-storage-fuse/v2/common/log"
"github.com/Azure/azure-storage-azcopy/v10/azbfs"
"github.com/Azure/azure-storage-blob-go/azblob"
)
// Verify that the Auth implement the correct AzAuth interfaces
var _ azAuth = &azAuthBlobSAS{}
var _ azAuth = &azAuthBfsSAS{}
var _ azAuth = &azAuthDatalakeSAS{}
type azAuthSAS struct {
azAuthBase
}
// GetEndpoint : Gets the SAS endpoint
func (azsas *azAuthSAS) getEndpoint() string {
return fmt.Sprintf("%s%s",
azsas.config.Endpoint,
azsas.config.SASKey)
}
// SetOption : Sets the sas key information for the SAS auth.
func (azsas *azAuthSAS) setOption(key, value string) {
if key == "saskey" {
@ -64,30 +57,57 @@ func (azsas *azAuthSAS) setOption(key, value string) {
}
}
// GetEndpoint : Gets the SAS endpoint
func (azsas *azAuthSAS) getEndpoint() string {
return azsas.config.Endpoint + "?" + strings.TrimLeft(azsas.config.SASKey, "?")
}
type azAuthBlobSAS struct {
azAuthSAS
}
// GetCredential : Gets SAS based credentials for blob
func (azsas *azAuthBlobSAS) getCredential() interface{} {
// getServiceClient : returns SAS based service client for blob
func (azsas *azAuthBlobSAS) getServiceClient(stConfig *AzStorageConfig) (interface{}, error) {
if azsas.config.SASKey == "" {
log.Err("azAuthBlobSAS::getCredential : SAS key for account is empty, cannot authenticate user")
return nil
log.Err("azAuthBlobSAS::getServiceClient : SAS key for account is empty, cannot authenticate user")
return nil, errors.New("sas key for account is empty, cannot authenticate user")
}
return azblob.NewAnonymousCredential()
opts, err := getAzBlobServiceClientOptions(stConfig)
if err != nil {
log.Err("azAuthBlobSAS::getServiceClient : Failed to create client options [%s]", err.Error())
return nil, err
}
svcClient, err := service.NewClientWithNoCredential(azsas.getEndpoint(), opts)
if err != nil {
log.Err("azAuthBlobSAS::getServiceClient : Failed to create service client [%s]", err.Error())
}
return svcClient, err
}
type azAuthBfsSAS struct {
type azAuthDatalakeSAS struct {
azAuthSAS
}
// GetCredential : Gets SAS based credentials for datralake
func (azsas *azAuthBfsSAS) getCredential() interface{} {
// getServiceClient : returns SAS based service client for datalake
func (azsas *azAuthDatalakeSAS) getServiceClient(stConfig *AzStorageConfig) (interface{}, error) {
if azsas.config.SASKey == "" {
log.Err("azAuthBfsSAS::getCredential : SAS key for account is empty, cannot authenticate user")
return nil
log.Err("azAuthDatalakeSAS::getServiceClient : SAS key for account is empty, cannot authenticate user")
return nil, errors.New("sas key for account is empty, cannot authenticate user")
}
return azbfs.NewAnonymousCredential()
opts, err := getAzDatalakeServiceClientOptions(stConfig)
if err != nil {
log.Err("azAuthDatalakeSAS::getServiceClient : Failed to create client options [%s]", err.Error())
return nil, err
}
svcClient, err := serviceBfs.NewClientWithNoCredential(azsas.getEndpoint(), opts)
if err != nil {
log.Err("azAuthDatalakeSAS::getServiceClient : Failed to create service client [%s]", err.Error())
}
return svcClient, err
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -34,168 +34,104 @@
package azstorage
import (
"fmt"
"math/rand"
"os"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
serviceBfs "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service"
"github.com/Azure/azure-storage-fuse/v2/common/log"
"github.com/Azure/azure-storage-azcopy/v10/azbfs"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
)
// Verify that the Auth implement the correct AzAuth interfaces
var _ azAuth = &azAuthBlobSPN{}
var _ azAuth = &azAuthBfsSPN{}
var _ azAuth = &azAuthDatalakeSPN{}
type azAuthSPN struct {
azAuthBase
azOAuthBase
}
func getNextExpiryTimerSPN(spt *adal.ServicePrincipalToken) time.Duration {
delay := time.Duration(5+rand.Intn(120)) * time.Second
return time.Until(spt.Token().Expires()) - delay
}
func (azspn *azAuthSPN) getTokenCredential() (azcore.TokenCredential, error) {
var cred azcore.TokenCredential
var err error
func (azspn *azAuthSPN) getAADEndpoint() string {
if azspn.config.ActiveDirectoryEndpoint != "" {
return azspn.config.ActiveDirectoryEndpoint
}
return azure.PublicCloud.ActiveDirectoryEndpoint
}
// fetchToken : Generates a token based on the config
func (azspn *azAuthSPN) fetchToken() (*adal.ServicePrincipalToken, error) {
// Use the configured AAD endpoint for token generation
config, err := adal.NewOAuthConfig(azspn.getAADEndpoint(), azspn.config.TenantID)
if err != nil {
log.Err("AzAuthSPN::fetchToken : Failed to generate OAuth Config for SPN [%s]", err.Error())
return nil, err
}
// Create the resource URL
resourceURL := azspn.config.AuthResource
if resourceURL == "" {
resourceURL = azspn.getEndpoint()
}
// Generate the SPN token
var spt *adal.ServicePrincipalToken
clOpts := azspn.getAzIdentityClientOptions(&azspn.config)
if azspn.config.OAuthTokenFilePath != "" {
log.Trace("AzAuthSPN::fetchToken : Going for fedrated token flow.")
log.Trace("AzAuthSPN::getTokenCredential : Going for fedrated token flow")
tokenReader := func() (string, error) {
token, err := os.ReadFile(azspn.config.OAuthTokenFilePath)
if err != nil {
return "", fmt.Errorf("failed to read OAuth token file %s [%v]", azspn.config.OAuthTokenFilePath, err.Error())
}
return string(token), nil
}
spt, err = adal.NewServicePrincipalTokenFromFederatedTokenCallback(*config, azspn.config.ClientID, tokenReader, resourceURL)
// TODO:: track2 : test this in Azure Kubernetes setup
cred, err = azidentity.NewWorkloadIdentityCredential(&azidentity.WorkloadIdentityCredentialOptions{
ClientOptions: clOpts,
ClientID: azspn.config.ClientID,
TenantID: azspn.config.TenantID,
TokenFilePath: azspn.config.OAuthTokenFilePath,
})
if err != nil {
log.Err("AzAuthSPN::fetchToken : Failed to generate token for SPN [%s]", err.Error())
log.Err("AzAuthSPN::getTokenCredential : Failed to generate token for SPN [%s]", err.Error())
return nil, err
}
} else {
spt, err = adal.NewServicePrincipalToken(*config, azspn.config.ClientID, azspn.config.ClientSecret, resourceURL)
log.Trace("AzAuthSPN::getTokenCredential : Using client secret for fetching token")
cred, err = azidentity.NewClientSecretCredential(azspn.config.TenantID, azspn.config.ClientID, azspn.config.ClientSecret, &azidentity.ClientSecretCredentialOptions{
ClientOptions: clOpts,
})
if err != nil {
log.Err("AzAuthSPN::fetchToken : Failed to generate token for SPN [%s]", err.Error())
log.Err("AzAuthSPN::getTokenCredential : Failed to generate token for SPN [%s]", err.Error())
return nil, err
}
}
return spt, nil
return cred, err
}
type azAuthBlobSPN struct {
azAuthSPN
}
// GetCredential : Get SPN based credentials for blob
func (azspn *azAuthBlobSPN) getCredential() interface{} {
spt, err := azspn.fetchToken()
// getServiceClient : returns SPN based service client for blob
func (azspn *azAuthBlobSPN) getServiceClient(stConfig *AzStorageConfig) (interface{}, error) {
cred, err := azspn.getTokenCredential()
if err != nil {
log.Err("azAuthBlobSPN::getCredential : Failed to fetch token for SPN [%s]", err.Error())
return nil
log.Err("azAuthBlobSPN::getServiceClient : Failed to get token credential from SPN [%s]", err.Error())
return nil, err
}
// Using token create the credential object, here also register a call back which refreshes the token
tc := azblob.NewTokenCredential(spt.Token().AccessToken, func(tc azblob.TokenCredential) time.Duration {
// spt, err = azspn.fetchToken()
// if err != nil {
// log.Err("azAuthBlobSPN::getCredential : Failed to fetch SPN token [%s]", err.Error())
// return 0
// }
for failCount := 0; failCount < 5; failCount++ {
err = spt.Refresh()
if err != nil {
log.Err("azAuthBfsSPN::getCredential : Failed to refresh token attempt %d [%s]", failCount, err.Error())
time.Sleep(time.Duration(rand.Intn(5)) * time.Second)
continue
}
opts, err := getAzBlobServiceClientOptions(stConfig)
if err != nil {
log.Err("azAuthBlobSPN::getServiceClient : Failed to create client options [%s]", err.Error())
return nil, err
}
// set the new token value
tc.SetToken(spt.Token().AccessToken)
log.Debug("azAuthBlobSPN::getCredential : SPN Token retrieved %s (%d)", spt.Token().AccessToken, spt.Token().Expires())
svcClient, err := service.NewClient(azspn.config.Endpoint, cred, opts)
if err != nil {
log.Err("azAuthBlobSPN::getServiceClient : Failed to create service client [%s]", err.Error())
}
// Get the next token slightly before the current one expires
return getNextExpiryTimerSPN(spt)
// Test code to expire token every 30 seconds
// return time.Until(time.Now()) + 30*time.Second
}
log.Err("azAuthBfsSPN::getCredential : Failed to refresh token bailing out.")
return 0
})
return tc
return svcClient, err
}
type azAuthBfsSPN struct {
type azAuthDatalakeSPN struct {
azAuthSPN
}
// GetCredential : Get SPN based credentials for datalake
func (azspn *azAuthBfsSPN) getCredential() interface{} {
spt, err := azspn.fetchToken()
// getServiceClient : returns SPN based service client for datalake
func (azspn *azAuthDatalakeSPN) getServiceClient(stConfig *AzStorageConfig) (interface{}, error) {
cred, err := azspn.getTokenCredential()
if err != nil {
log.Err("azAuthBfsSPN::getCredential : Failed to fetch token for SPN [%s]", err.Error())
return nil
log.Err("azAuthDatalakeSPN::getServiceClient : Failed to get token credential from SPN [%s]", err.Error())
return nil, err
}
// Using token create the credential object, here also register a call back which refreshes the token
tc := azbfs.NewTokenCredential(spt.Token().AccessToken, func(tc azbfs.TokenCredential) time.Duration {
// spt, err = azspn.fetchToken()
// if err != nil {
// log.Err("azAuthBfsSPN::getCredential : Failed to fetch SPN token [%s]", err.Error())
// return 0
// }
for failCount := 0; failCount < 5; failCount++ {
err = spt.Refresh()
if err != nil {
log.Err("azAuthBfsSPN::getCredential : Failed to refresh token attempt %d [%s]", failCount, err.Error())
time.Sleep(time.Duration(rand.Intn(5)) * time.Second)
continue
}
opts, err := getAzDatalakeServiceClientOptions(stConfig)
if err != nil {
log.Err("azAuthDatalakeSPN::getServiceClient : Failed to create client options [%s]", err.Error())
return nil, err
}
// set the new token value
tc.SetToken(spt.Token().AccessToken)
log.Debug("azAuthBfsSPN::getCredential : SPN Token retrieved %s (%d)", spt.Token().AccessToken, spt.Token().Expires())
svcClient, err := serviceBfs.NewClient(azspn.config.Endpoint, cred, opts)
if err != nil {
log.Err("azAuthDatalakeSPN::getServiceClient : Failed to create service client [%s]", err.Error())
}
// Get the next token slightly before the current one expires
return getNextExpiryTimerSPN(spt)
// Test code to expire token every 30 seconds
// return time.Until(time.Now()) + 30*time.Second
}
log.Err("azAuthBfsSPN::getCredential : Failed to refresh token bailing out.")
return 0
})
return tc
return svcClient, err
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -40,7 +40,7 @@ import (
"syscall"
"time"
azcopyCommon "github.com/Azure/azure-storage-azcopy/v10/common"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-storage-fuse/v2/common"
"github.com/Azure/azure-storage-fuse/v2/common/config"
"github.com/Azure/azure-storage-fuse/v2/common/log"
@ -131,6 +131,9 @@ func (az *AzStorage) OnConfigChange() {
log.Err("AzStorage::OnConfigChange : failed to UpdateConfig", err.Error())
return
}
// dynamic update of the sdk log listener
setSDKLogListener()
}
func (az *AzStorage) configureAndTest(isParent bool) error {
@ -142,6 +145,9 @@ func (az *AzStorage) configureAndTest(isParent bool) error {
return err
}
// set SDK log listener to log the requests and responses
setSDKLogListener()
err = az.storage.SetPrefixPath(az.stConfig.prefixPath)
if err != nil {
log.Err("AzStorage::configureAndTest : Failed to set prefix path [%s]", err.Error())
@ -170,11 +176,6 @@ func (az *AzStorage) Start(ctx context.Context) error {
// create stats collector for azstorage
azStatsCollector = stats_manager.NewStatsCollector(az.Name())
// This is a workaround right now to disable the input watcher thread which continuously monitors below config to change
// Running this thread continuously increases the CPU usage by 5% even when there is no activity on blobfuse2 mount path
// Lifecycle manager init is commented in the "blobfuse2-cpu-usage" branch. Blobfuse2 imports azcopy from this branch.
azcopyCommon.GetLifecycleMgr().EnableInputWatcher()
return nil
}
@ -304,7 +305,9 @@ func (az *AzStorage) StreamDir(options internal.StreamDirOptions) ([]*internal.O
log.Debug("AzStorage::StreamDir : Retrieved %d objects with %s marker for Path %s", len(new_list), options.Token, path)
if new_marker != nil && *new_marker != "" {
if new_marker == nil {
new_marker = to.Ptr("")
} else if *new_marker != "" {
log.Debug("AzStorage::StreamDir : next-marker %s for Path %s", *new_marker, path)
if len(new_list) == 0 {
/* In some customer scenario we have seen that new_list is empty but marker is not empty
@ -543,6 +546,10 @@ func (az *AzStorage) FlushFile(options internal.FlushFileOptions) error {
return az.storage.StageAndCommit(options.Handle.Path, options.Handle.CacheObj.BlockOffsetList)
}
func (az *AzStorage) GetCommittedBlockList(name string) (*internal.CommittedBlockList, error) {
return az.storage.GetCommittedBlockList(name)
}
func (az *AzStorage) StageData(opt internal.StageDataOptions) error {
return az.storage.StageBlock(opt.Name, opt.Data, opt.Id)
}
@ -566,7 +573,7 @@ func NewazstorageComponent() internal.Component {
stConfig: AzStorageConfig{
blockSize: 0,
maxConcurrency: 32,
defaultTier: getAccessTierType("none"),
defaultTier: getAccessTierType(""),
authConfig: azAuthConfig{
AuthMode: EAuthType.KEY(),
UseHTTP: false,
@ -652,6 +659,9 @@ func init() {
config.BindPFlag(compName+".honour-acl", honourACL)
honourACL.Hidden = true
cpkEnabled := config.AddBoolFlag("cpk-enabled", false, "Enable client provided key.")
config.BindPFlag(compName+".cpk-enabled", cpkEnabled)
config.RegisterFlagCompletionFunc("container-name", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return nil, cobra.ShellCompDirectiveNoFileComp
})

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -60,3 +60,18 @@ const (
size = "Size"
target = "Target"
)
// headers which should be logged and not redacted
var allowedHeaders []string = []string{
"x-ms-version", "x-ms-date", "x-ms-range", "x-ms-delete-snapshots", "x-ms-delete-type-permanent", "x-ms-blob-content-type",
"x-ms-blob-type", "x-ms-copy-source", "x-ms-copy-id", "x-ms-copy-status", "x-ms-access-tier", "x-ms-creation-time", "x-ms-copy-progress",
"x-ms-access-tier-inferred", "x-ms-acl", "x-ms-group", "x-ms-lease-state", "x-ms-owner", "x-ms-permissions", "x-ms-resource-type", "x-ms-content-crc64",
"x-ms-rename-source", "accept-ranges", "x-ms-continuation",
}
// query parameters which should be logged and not redacted
var allowedQueryParams []string = []string{
"comp", "delimiter", "include", "marker", "maxresults", "prefix", "restype", "blockid", "blocklisttype",
"directory", "recursive", "resource", "se", "sp", "spr", "srt", "ss", "st", "sv", "action", "continuation", "mode",
"client_id", "authorization_endpoint",
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -39,10 +39,10 @@ import (
"reflect"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-storage-fuse/v2/common/config"
"github.com/Azure/azure-storage-fuse/v2/common/log"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/JeffreyRichter/enum/enum"
)
@ -71,6 +71,10 @@ func (AuthType) MSI() AuthType {
return AuthType(4)
}
func (AuthType) AZCLI() AuthType {
return AuthType(5)
}
func (a AuthType) String() string {
return enum.StringInt(a, reflect.TypeOf(a))
}
@ -120,24 +124,24 @@ const DefaultMaxResultsForList int32 = 2
// https://github.com/Azure/go-autorest/blob/a46566dfcbdc41e736295f94e9f690ceaf50094a/autorest/adal/token.go#L788
// newServicePrincipalTokenFromMSI : reads them directly from env
const (
EnvAzStorageAccount = "AZURE_STORAGE_ACCOUNT"
EnvAzStorageAccountType = "AZURE_STORAGE_ACCOUNT_TYPE"
EnvAzStorageAccessKey = "AZURE_STORAGE_ACCESS_KEY"
EnvAzStorageSasToken = "AZURE_STORAGE_SAS_TOKEN"
EnvAzStorageIdentityClientId = "AZURE_STORAGE_IDENTITY_CLIENT_ID"
EnvAzStorageIdentityResourceId = "AZURE_STORAGE_IDENTITY_RESOURCE_ID"
EnvAzStorageIdentityObjectId = "AZURE_STORAGE_IDENTITY_OBJECT_ID"
EnvAzStorageSpnTenantId = "AZURE_STORAGE_SPN_TENANT_ID"
EnvAzStorageSpnClientId = "AZURE_STORAGE_SPN_CLIENT_ID"
EnvAzStorageSpnClientSecret = "AZURE_STORAGE_SPN_CLIENT_SECRET"
EnvAzStorageSpnOAuthTokenFilePath = "AZURE_OAUTH_TOKEN_FILE"
EnvAzStorageAadEndpoint = "AZURE_STORAGE_AAD_ENDPOINT"
EnvAzStorageAuthType = "AZURE_STORAGE_AUTH_TYPE"
EnvAzStorageBlobEndpoint = "AZURE_STORAGE_BLOB_ENDPOINT"
EnvHttpProxy = "http_proxy"
EnvHttpsProxy = "https_proxy"
EnvAzStorageAccountContainer = "AZURE_STORAGE_ACCOUNT_CONTAINER"
EnvAzAuthResource = "AZURE_STORAGE_AUTH_RESOURCE"
EnvAzStorageAccount = "AZURE_STORAGE_ACCOUNT"
EnvAzStorageAccountType = "AZURE_STORAGE_ACCOUNT_TYPE"
EnvAzStorageAccessKey = "AZURE_STORAGE_ACCESS_KEY"
EnvAzStorageSasToken = "AZURE_STORAGE_SAS_TOKEN"
EnvAzStorageIdentityClientId = "AZURE_STORAGE_IDENTITY_CLIENT_ID"
EnvAzStorageIdentityResourceId = "AZURE_STORAGE_IDENTITY_RESOURCE_ID"
EnvAzStorageIdentityObjectId = "AZURE_STORAGE_IDENTITY_OBJECT_ID"
EnvAzStorageSpnTenantId = "AZURE_STORAGE_SPN_TENANT_ID"
EnvAzStorageSpnClientId = "AZURE_STORAGE_SPN_CLIENT_ID"
EnvAzStorageSpnClientSecret = "AZURE_STORAGE_SPN_CLIENT_SECRET"
EnvAzStorageSpnOAuthTokenFilePath = "AZURE_OAUTH_TOKEN_FILE"
EnvAzStorageAadEndpoint = "AZURE_STORAGE_AAD_ENDPOINT"
EnvAzStorageAuthType = "AZURE_STORAGE_AUTH_TYPE"
EnvAzStorageBlobEndpoint = "AZURE_STORAGE_BLOB_ENDPOINT"
EnvAzStorageAccountContainer = "AZURE_STORAGE_ACCOUNT_CONTAINER"
EnvAzAuthResource = "AZURE_STORAGE_AUTH_RESOURCE"
EnvAzStorageCpkEncryptionKey = "AZURE_STORAGE_CPK_ENCRYPTION_KEY"
EnvAzStorageCpkEncryptionKeySha256 = "AZURE_STORAGE_CPK_ENCRYPTION_KEY_SHA256"
)
type AzStorageOptions struct {
@ -168,7 +172,6 @@ type AzStorageOptions struct {
MaxRetryDelay int32 `config:"max-retry-delay-sec" yaml:"max-retry-delay-sec,omitempty"`
HttpProxyAddress string `config:"http-proxy" yaml:"http-proxy,omitempty"`
HttpsProxyAddress string `config:"https-proxy" yaml:"https-proxy,omitempty"`
SdkTrace bool `config:"sdk-trace" yaml:"sdk-trace,omitempty"`
FailUnsupportedOp bool `config:"fail-unsupported-op" yaml:"fail-unsupported-op,omitempty"`
AuthResourceString string `config:"auth-resource" yaml:"auth-resource,omitempty"`
UpdateMD5 bool `config:"update-md5" yaml:"update-md5"`
@ -178,6 +181,9 @@ type AzStorageOptions struct {
DisableCompression bool `config:"disable-compression" yaml:"disable-compression"`
Telemetry string `config:"telemetry" yaml:"telemetry"`
HonourACL bool `config:"honour-acl" yaml:"honour-acl"`
CPKEnabled bool `config:"cpk-enabled" yaml:"cpk-enabled"`
CPKEncryptionKey string `config:"cpk-encryption-key" yaml:"cpk-encryption-key"`
CPKEncryptionKeySha256 string `config:"cpk-encryption-key-sha256" yaml:"cpk-encryption-key-sha256"`
// v1 support
UseAdls bool `config:"use-adls" yaml:"-"`
@ -211,12 +217,13 @@ func RegisterEnvVariables() {
config.BindEnv("azstorage.mode", EnvAzStorageAuthType)
config.BindEnv("azstorage.http-proxy", EnvHttpProxy)
config.BindEnv("azstorage.https-proxy", EnvHttpsProxy)
config.BindEnv("azstorage.container", EnvAzStorageAccountContainer)
config.BindEnv("azstorage.auth-resource", EnvAzAuthResource)
config.BindEnv("azstorage.cpk-encryption-key", EnvAzStorageCpkEncryptionKey)
config.BindEnv("azstorage.cpk-encryption-key-sha256", EnvAzStorageCpkEncryptionKeySha256)
}
// ----------- Config Parsing and Validation ---------------
@ -317,8 +324,8 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error {
}
if opt.BlockSize != 0 {
if opt.BlockSize > azblob.BlockBlobMaxStageBlockBytes {
log.Err("ParseAndValidateConfig : Block size is too large. Block size has to be smaller than %s Bytes", azblob.BlockBlobMaxStageBlockBytes)
if opt.BlockSize > blockblob.MaxStageBlockBytes {
log.Err("ParseAndValidateConfig : Block size is too large. Block size has to be smaller than %s Bytes", blockblob.MaxStageBlockBytes)
return errors.New("block size is too large")
}
az.stConfig.blockSize = opt.BlockSize * 1024 * 1024
@ -340,6 +347,16 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error {
opt.UseHTTP = !opt.UseHTTPS
}
if opt.CPKEnabled {
if opt.CPKEncryptionKey == "" || opt.CPKEncryptionKeySha256 == "" {
log.Err("ParseAndValidateConfig : CPK key or CPK key sha256 not provided")
return errors.New("CPK key or key sha256 not provided")
}
az.stConfig.cpkEnabled = opt.CPKEnabled
az.stConfig.cpkEncryptionKey = opt.CPKEncryptionKey
az.stConfig.cpkEncryptionKeySha256 = opt.CPKEncryptionKeySha256
}
// Validate endpoint
if opt.Endpoint == "" {
log.Warn("ParseAndValidateConfig : account endpoint not provided, assuming the default .core.windows.net style endpoint")
@ -385,12 +402,9 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error {
}
}
}
az.stConfig.proxyAddress = formatEndpointProtocol(az.stConfig.proxyAddress, opt.UseHTTP)
log.Info("ParseAndValidateConfig : using the following proxy address from the config file: %s", az.stConfig.proxyAddress)
az.stConfig.sdkTrace = opt.SdkTrace
log.Info("ParseAndValidateConfig : sdk logging from the config file: %t", az.stConfig.sdkTrace)
err = ParseAndReadDynamicConfig(az, opt, false)
if err != nil {
return err
@ -444,6 +458,8 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error {
az.stConfig.authConfig.ClientSecret = opt.ClientSecret
az.stConfig.authConfig.TenantID = opt.TenantID
az.stConfig.authConfig.OAuthTokenFilePath = opt.OAuthTokenFilePath
case EAuthType.AZCLI():
az.stConfig.authConfig.AuthMode = EAuthType.AZCLI()
default:
log.Err("ParseAndValidateConfig : Invalid auth mode %s", opt.AuthMode)
@ -482,14 +498,14 @@ func ParseAndValidateConfig(az *AzStorage, opt AzStorageOptions) error {
log.Warn("unsupported v1 CLI parameter: debug-libcurl is not applicable in blobfuse2.")
}
log.Info("ParseAndValidateConfig : Account: %s, Container: %s, AccountType: %s, Auth: %s, Prefix: %s, Endpoint: %s, ListBlock: %d, MD5 : %v %v, Virtual Directory: %v, Max Results For List %v, Disable Compression: %v",
log.Info("ParseAndValidateConfig : account %s, container %s, account-type %s, auth %s, prefix %s, endpoint %s, MD5 %v %v, virtual-directory %v, disable-compression %v, CPK %v",
az.stConfig.authConfig.AccountName, az.stConfig.container, az.stConfig.authConfig.AccountType, az.stConfig.authConfig.AuthMode,
az.stConfig.prefixPath, az.stConfig.authConfig.Endpoint, az.stConfig.cancelListForSeconds, az.stConfig.validateMD5, az.stConfig.updateMD5, az.stConfig.virtualDirectory, az.stConfig.maxResultsForList, az.stConfig.disableCompression)
log.Info("ParseAndValidateConfig : Retry Config: Retry count %d, Max Timeout %d, BackOff Time %d, Max Delay %d",
az.stConfig.prefixPath, az.stConfig.authConfig.Endpoint, az.stConfig.validateMD5, az.stConfig.updateMD5, az.stConfig.virtualDirectory, az.stConfig.disableCompression, az.stConfig.cpkEnabled)
log.Info("ParseAndValidateConfig : use-HTTP %t, block-size %d, max-concurrency %d, default-tier %s, fail-unsupported-op %t, mount-all-containers %t", az.stConfig.authConfig.UseHTTP, az.stConfig.blockSize, az.stConfig.maxConcurrency, az.stConfig.defaultTier, az.stConfig.ignoreAccessModifiers, az.stConfig.mountAllContainers)
log.Info("ParseAndValidateConfig : Retry Config: retry-count %d, max-timeout %d, backoff-time %d, max-delay %d",
az.stConfig.maxRetries, az.stConfig.maxTimeout, az.stConfig.backoffTime, az.stConfig.maxRetryDelay)
log.Info("ParseAndValidateConfig : Telemetry : %s, Honour ACL: %v, disable symlink: %v", az.stConfig.telemetry, az.stConfig.honourACL, az.stConfig.disableSymlink)
log.Info("ParseAndValidateConfig : Telemetry : %s, honour-ACL %v, disable-symlink %v", az.stConfig.telemetry, az.stConfig.honourACL, az.stConfig.disableSymlink)
return nil
}
@ -566,9 +582,9 @@ func ParseAndReadDynamicConfig(az *AzStorage, opt AzStorageOptions, reload bool)
if reload {
log.Info("ParseAndReadDynamicConfig : SAS Key updated")
if err := az.storage.NewCredentialKey("saskey", az.stConfig.authConfig.SASKey); err != nil {
if err := az.storage.UpdateServiceClient("saskey", az.stConfig.authConfig.SASKey); err != nil {
az.stConfig.authConfig.SASKey = oldSas
_ = az.storage.NewCredentialKey("saskey", az.stConfig.authConfig.SASKey)
_ = az.storage.UpdateServiceClient("saskey", az.stConfig.authConfig.SASKey)
return errors.New("SAS key update failure")
}
}

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -36,7 +36,7 @@ package azstorage
import (
"testing"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-storage-fuse/v2/common"
"github.com/Azure/azure-storage-fuse/v2/common/config"
"github.com/Azure/azure-storage-fuse/v2/common/log"
@ -129,7 +129,7 @@ func (s *configTestSuite) TestBlockSize() {
assert.NotNil(err)
assert.Equal(az.stConfig.blockSize, opt.BlockSize*1024*1024)
opt.BlockSize = azblob.BlockBlobMaxStageBlockBytes + 1
opt.BlockSize = blockblob.MaxStageBlockBytes + 1
err = ParseAndValidateConfig(az, opt)
assert.NotNil(err)
assert.Contains(err.Error(), "block size is too large")
@ -172,26 +172,51 @@ func (s *configTestSuite) TestProxyConfig() {
opt.HttpsProxyAddress = "127.0.0.1"
err := ParseAndValidateConfig(az, opt)
assert.Nil(err)
assert.Equal(az.stConfig.proxyAddress, opt.HttpsProxyAddress)
assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS))
opt.HttpProxyAddress = "128.0.0.1"
opt.HttpsProxyAddress = "https://128.0.0.1:8080/"
err = ParseAndValidateConfig(az, opt)
assert.Nil(err)
assert.Equal(az.stConfig.proxyAddress, opt.HttpProxyAddress)
assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS))
opt.HttpsProxyAddress = "http://129.0.0.1:8080/"
err = ParseAndValidateConfig(az, opt)
assert.Nil(err)
assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS))
opt.HttpProxyAddress = "130.0.0.1"
err = ParseAndValidateConfig(az, opt)
assert.Nil(err)
assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpProxyAddress, !opt.UseHTTPS))
opt.HttpProxyAddress = "http://131.0.0.1:8080/"
err = ParseAndValidateConfig(az, opt)
assert.Nil(err)
assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpProxyAddress, !opt.UseHTTPS))
config.SetBool(compName+".use-https", true)
opt.UseHTTPS = true
opt.HttpsProxyAddress = ""
opt.HttpProxyAddress = "127.0.0.1"
opt.HttpProxyAddress = "132.0.0.1"
err = ParseAndValidateConfig(az, opt)
assert.NotNil(err)
assert.Contains(err.Error(), "`http-proxy` Invalid : must set `use-http: true`")
opt.HttpsProxyAddress = "128.0.0.1"
opt.HttpsProxyAddress = "133.0.0.1"
err = ParseAndValidateConfig(az, opt)
assert.Nil(err)
assert.Equal(az.stConfig.proxyAddress, opt.HttpsProxyAddress)
assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS))
opt.HttpsProxyAddress = "http://134.0.0.1:8080/"
err = ParseAndValidateConfig(az, opt)
assert.Nil(err)
assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS))
opt.HttpsProxyAddress = "https://135.0.0.1:8080/"
err = ParseAndValidateConfig(az, opt)
assert.Nil(err)
assert.Equal(az.stConfig.proxyAddress, formatEndpointProtocol(opt.HttpsProxyAddress, !opt.UseHTTPS))
}
func (s *configTestSuite) TestMaxResultsForList() {
@ -385,7 +410,7 @@ func (s *configTestSuite) TestCompressionType() {
}
func (s *configTestSuite) TestInvalidSASRefresh() {
func (s *configTestSuite) TestSASRefresh() {
defer config.ResetConfig()
assert := assert.New(s.T())
az := &AzStorage{}
@ -409,8 +434,7 @@ func (s *configTestSuite) TestInvalidSASRefresh() {
az.storage = &BlockBlob{Auth: &azAuthBlobSAS{azAuthSAS: azAuthSAS{azAuthBase: azAuthBase{config: azAuthConfig{Endpoint: "abcd:://qreq!@#$%^&*()_)(*&^%$#"}}}}}
err := ParseAndReadDynamicConfig(az, opt, true)
assert.NotNil(err)
assert.Equal(err.Error(), "SAS key update failure")
assert.Nil(err)
}
func TestConfigTestSuite(t *testing.T) {

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -34,19 +34,16 @@
package azstorage
import (
"net/url"
"os"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-storage-fuse/v2/common"
"github.com/Azure/azure-storage-fuse/v2/common/log"
"github.com/Azure/azure-storage-fuse/v2/internal"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
)
// Example for azblob usage : https://godoc.org/github.com/Azure/azure-storage-blob-go/azblob#pkg-examples
// For methods help refer : https://godoc.org/github.com/Azure/azure-storage-blob-go/azblob#ContainerURL
// Example for azblob usage : https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#pkg-examples
// For methods help refer : https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#Client
type AzStorageConfig struct {
authConfig azAuthConfig
@ -56,7 +53,7 @@ type AzStorageConfig struct {
maxConcurrency uint16
// tier to be set on every upload
defaultTier azblob.AccessTierType
defaultTier *blob.AccessTier
// Return back readDir on mount for given amount of time
cancelListForSeconds uint16
@ -67,7 +64,6 @@ type AzStorageConfig struct {
backoffTime int32
maxRetryDelay int32
proxyAddress string
sdkTrace bool
ignoreAccessModifiers bool
mountAllContainers bool
@ -80,14 +76,15 @@ type AzStorageConfig struct {
telemetry string
honourACL bool
disableSymlink bool
// CPK related config
cpkEnabled bool
cpkEncryptionKey string
cpkEncryptionKeySha256 string
}
type AzStorageConnection struct {
Config AzStorageConfig
Pipeline pipeline.Pipeline
Endpoint *url.URL
}
type AzConnection interface {
@ -121,8 +118,8 @@ type AzConnection interface {
ReadBuffer(name string, offset int64, len int64) ([]byte, error)
ReadInBuffer(name string, offset int64, len int64, data []byte) error
WriteFromFile(name string, metadata map[string]string, fi *os.File) error
WriteFromBuffer(name string, metadata map[string]string, data []byte) error
WriteFromFile(name string, metadata map[string]*string, fi *os.File) error
WriteFromBuffer(name string, metadata map[string]*string, data []byte) error
Write(options internal.WriteFileOptions) error
GetFileBlockOffsets(name string) (*common.BlockOffsetList, error)
@ -131,10 +128,11 @@ type AzConnection interface {
TruncateFile(string, int64) error
StageAndCommit(name string, bol *common.BlockOffsetList) error
GetCommittedBlockList(string) (*internal.CommittedBlockList, error)
StageBlock(string, []byte, string) error
CommitBlocks(string, []string) error
NewCredentialKey(_, _ string) error
UpdateServiceClient(_, _ string) error
}
// NewAzStorageConnection : Based on account type create respective AzConnection Object

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -35,7 +35,7 @@ package azstorage
import (
"context"
"errors"
"fmt"
"io/fs"
"net/url"
"os"
@ -44,21 +44,25 @@ import (
"syscall"
"time"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-fuse/v2/common"
"github.com/Azure/azure-storage-fuse/v2/common/log"
"github.com/Azure/azure-storage-fuse/v2/internal"
"github.com/Azure/azure-storage-azcopy/v10/azbfs"
"github.com/Azure/azure-storage-azcopy/v10/ste"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/directory"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/file"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/filesystem"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service"
)
type Datalake struct {
AzStorageConnection
Auth azAuth
Service azbfs.ServiceURL
Filesystem azbfs.FileSystemURL
BlockBlob BlockBlob
Auth azAuth
Service *service.Client
Filesystem *filesystem.Client
BlockBlob BlockBlob
datalakeCPKOpt *file.CPKInfo
}
// Verify that Datalake implements AzConnection interface
@ -91,6 +95,14 @@ func transformConfig(dlConfig AzStorageConfig) AzStorageConfig {
func (dl *Datalake) Configure(cfg AzStorageConfig) error {
dl.Config = cfg
if dl.Config.cpkEnabled {
dl.datalakeCPKOpt = &file.CPKInfo{
EncryptionKey: &dl.Config.cpkEncryptionKey,
EncryptionKeySHA256: &dl.Config.cpkEncryptionKeySha256,
EncryptionAlgorithm: to.Ptr(directory.EncryptionAlgorithmTypeAES256),
}
}
return dl.BlockBlob.Configure(transformConfig(cfg))
}
@ -103,63 +115,43 @@ func (dl *Datalake) UpdateConfig(cfg AzStorageConfig) error {
return dl.BlockBlob.UpdateConfig(cfg)
}
// NewSASKey : New SAS key provided by user
func (dl *Datalake) NewCredentialKey(key, value string) (err error) {
// UpdateServiceClient : Update the SAS specified by the user and create new service client
func (dl *Datalake) UpdateServiceClient(key, value string) (err error) {
if key == "saskey" {
dl.Auth.setOption(key, value)
// Update the endpoint url from the credential
dl.Endpoint, err = url.Parse(dl.Auth.getEndpoint())
// get the service client with updated SAS
svcClient, err := dl.Auth.getServiceClient(&dl.Config)
if err != nil {
log.Err("Datalake::NewCredentialKey : Failed to form base endpoint url [%s]", err.Error())
return errors.New("failed to form base endpoint url")
log.Err("Datalake::UpdateServiceClient : Failed to get service client [%s]", err.Error())
return err
}
// Update the service url
dl.Service = azbfs.NewServiceURL(*dl.Endpoint, dl.Pipeline)
// update the service client
dl.Service = svcClient.(*service.Client)
// Update the filesystem url
dl.Filesystem = dl.Service.NewFileSystemURL(dl.Config.container)
// Update the filesystem client
dl.Filesystem = dl.Service.NewFileSystemClient(dl.Config.container)
}
return dl.BlockBlob.NewCredentialKey(key, value)
return dl.BlockBlob.UpdateServiceClient(key, value)
}
// getCredential : Create the credential object
func (dl *Datalake) getCredential() azbfs.Credential {
log.Trace("Datalake::getCredential : Getting credential")
// createServiceClient : Create the service client
func (dl *Datalake) createServiceClient() (*service.Client, error) {
log.Trace("Datalake::createServiceClient : Getting service client")
dl.Auth = getAzAuth(dl.Config.authConfig)
if dl.Auth == nil {
log.Err("Datalake::getCredential : Failed to retrieve auth object")
return nil
log.Err("Datalake::createServiceClient : Failed to retrieve auth object")
return nil, fmt.Errorf("failed to retrieve auth object")
}
cred := dl.Auth.getCredential()
if cred == nil {
log.Err("Datalake::getCredential : Failed to get credential")
return nil
svcClient, err := dl.Auth.getServiceClient(&dl.Config)
if err != nil {
log.Err("Datalake::createServiceClient : Failed to get service client [%s]", err.Error())
return nil, err
}
return cred.(azbfs.Credential)
}
// NewPipeline creates a Pipeline using the specified credentials and options.
func NewBfsPipeline(c azbfs.Credential, o azbfs.PipelineOptions, ro ste.XferRetryOptions) pipeline.Pipeline {
// Closest to API goes first; closest to the wire goes last
f := []pipeline.Factory{
azbfs.NewTelemetryPolicyFactory(o.Telemetry),
azbfs.NewUniqueRequestIDPolicyFactory(),
// ste.NewBlobXferRetryPolicyFactory(ro),
ste.NewBFSXferRetryPolicyFactory(ro),
}
f = append(f, c)
f = append(f,
pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked
ste.NewRequestLogPolicyFactory(ste.RequestLogOptions{
LogWarningIfTryOverThreshold: o.RequestLog.LogWarningIfTryOverThreshold,
SyslogDisabled: o.RequestLog.SyslogDisabled,
}))
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log})
return svcClient.(*service.Client), nil
}
// SetupPipeline : Based on the config setup the ***URLs
@ -167,33 +159,15 @@ func (dl *Datalake) SetupPipeline() error {
log.Trace("Datalake::SetupPipeline : Setting up")
var err error
// Get the credential
cred := dl.getCredential()
if cred == nil {
log.Err("Datalake::SetupPipeline : Failed to get credential")
return errors.New("failed to get credential")
}
// Create a new pipeline
options, retryOptions := getAzBfsPipelineOptions(dl.Config)
dl.Pipeline = NewBfsPipeline(cred, options, retryOptions)
if dl.Pipeline == nil {
log.Err("Datalake::SetupPipeline : Failed to create pipeline object")
return errors.New("failed to create pipeline object")
}
// Get the endpoint url from the credential
dl.Endpoint, err = url.Parse(dl.Auth.getEndpoint())
// create the service client
dl.Service, err = dl.createServiceClient()
if err != nil {
log.Err("Datalake::SetupPipeline : Failed to form base end point url [%s]", err.Error())
return errors.New("failed to form base end point url")
log.Err("Datalake::SetupPipeline : Failed to get service client [%s]", err.Error())
return err
}
// Create the service url
dl.Service = azbfs.NewServiceURL(*dl.Endpoint, dl.Pipeline)
// Create the filesystem url
dl.Filesystem = dl.Service.NewFileSystemURL(dl.Config.container)
// create the filesystem client
dl.Filesystem = dl.Service.NewFileSystemClient(dl.Config.container)
return dl.BlockBlob.SetupPipeline()
}
@ -206,27 +180,24 @@ func (dl *Datalake) TestPipeline() error {
return nil
}
if dl.Filesystem.String() == "" {
log.Err("Datalake::TestPipeline : Filesystem URL is not built, check your credentials")
if dl.Filesystem == nil || dl.Filesystem.DFSURL() == "" || dl.Filesystem.BlobURL() == "" {
log.Err("Datalake::TestPipeline : Filesystem Client is not built, check your credentials")
return nil
}
maxResults := int32(2)
listPath, err := dl.Filesystem.ListPaths(context.Background(),
azbfs.ListPathsFilesystemOptions{
Path: &dl.Config.prefixPath,
Recursive: false,
MaxResults: &maxResults,
})
listPathPager := dl.Filesystem.NewListPathsPager(false, &filesystem.ListPathsOptions{
MaxResults: &maxResults,
Prefix: &dl.Config.prefixPath,
})
// we are just validating the auth mode used. So, no need to iterate over the pages
_, err := listPathPager.NextPage(context.Background())
if err != nil {
log.Err("Datalake::TestPipeline : Failed to validate account with given auth %s", err.Error)
return err
}
if listPath == nil {
log.Info("Datalake::TestPipeline : Filesystem is empty")
}
return dl.BlockBlob.TestPipeline()
}
@ -262,14 +233,24 @@ func (dl *Datalake) CreateFile(name string, mode os.FileMode) error {
func (dl *Datalake) CreateDirectory(name string) error {
log.Trace("Datalake::CreateDirectory : name %s", name)
directoryURL := dl.Filesystem.NewDirectoryURL(filepath.Join(dl.Config.prefixPath, name))
_, err := directoryURL.Create(context.Background(), false)
directoryURL := dl.Filesystem.NewDirectoryClient(filepath.Join(dl.Config.prefixPath, name))
_, err := directoryURL.Create(context.Background(), &directory.CreateOptions{
CPKInfo: dl.datalakeCPKOpt,
AccessConditions: &directory.AccessConditions{
ModifiedAccessConditions: &directory.ModifiedAccessConditions{
IfNoneMatch: to.Ptr(azcore.ETagAny),
},
},
})
if err != nil {
serr := storeDatalakeErrToErr(err)
if serr == InvalidPermission {
log.Err("Datalake::CreateDirectory : Insufficient permissions for %s [%s]", name, err.Error())
return syscall.EACCES
} else if serr == ErrFileAlreadyExists {
log.Err("Datalake::CreateDirectory : Path already exists for %s [%s]", name, err.Error())
return syscall.EEXIST
} else {
log.Err("Datalake::CreateDirectory : Failed to create directory %s [%s]", name, err.Error())
return err
@ -288,9 +269,8 @@ func (dl *Datalake) CreateLink(source string, target string) error {
// DeleteFile : Delete a file in the filesystem/directory
func (dl *Datalake) DeleteFile(name string) (err error) {
log.Trace("Datalake::DeleteFile : name %s", name)
fileURL := dl.Filesystem.NewRootDirectoryURL().NewFileURL(filepath.Join(dl.Config.prefixPath, name))
_, err = fileURL.Delete(context.Background())
fileClient := dl.Filesystem.NewFileClient(filepath.Join(dl.Config.prefixPath, name))
_, err = fileClient.Delete(context.Background(), nil)
if err != nil {
serr := storeDatalakeErrToErr(err)
if serr == ErrFileNotFound {
@ -315,8 +295,8 @@ func (dl *Datalake) DeleteFile(name string) (err error) {
func (dl *Datalake) DeleteDirectory(name string) (err error) {
log.Trace("Datalake::DeleteDirectory : name %s", name)
directoryURL := dl.Filesystem.NewDirectoryURL(filepath.Join(dl.Config.prefixPath, name))
_, err = directoryURL.Delete(context.Background(), nil, true)
directoryClient := dl.Filesystem.NewDirectoryClient(filepath.Join(dl.Config.prefixPath, name))
_, err = directoryClient.Delete(context.Background(), nil)
// TODO : There is an ability to pass a continuation token here for recursive delete, should we implement this logic to follow continuation token? The SDK does not currently do this.
if err != nil {
serr := storeDatalakeErrToErr(err)
@ -336,12 +316,11 @@ func (dl *Datalake) DeleteDirectory(name string) (err error) {
func (dl *Datalake) RenameFile(source string, target string) error {
log.Trace("Datalake::RenameFile : %s -> %s", source, target)
fileURL := dl.Filesystem.NewRootDirectoryURL().NewFileURL(url.PathEscape(filepath.Join(dl.Config.prefixPath, source)))
fileClient := dl.Filesystem.NewFileClient(url.PathEscape(filepath.Join(dl.Config.prefixPath, source)))
_, err := fileURL.Rename(context.Background(),
azbfs.RenameFileOptions{
DestinationPath: filepath.Join(dl.Config.prefixPath, target),
})
_, err := fileClient.Rename(context.Background(), filepath.Join(dl.Config.prefixPath, target), &file.RenameOptions{
CPKInfo: dl.datalakeCPKOpt,
})
if err != nil {
serr := storeDatalakeErrToErr(err)
if serr == ErrFileNotFound {
@ -360,12 +339,10 @@ func (dl *Datalake) RenameFile(source string, target string) error {
func (dl *Datalake) RenameDirectory(source string, target string) error {
log.Trace("Datalake::RenameDirectory : %s -> %s", source, target)
directoryURL := dl.Filesystem.NewDirectoryURL(url.PathEscape(filepath.Join(dl.Config.prefixPath, source)))
_, err := directoryURL.Rename(context.Background(),
azbfs.RenameDirectoryOptions{
DestinationPath: filepath.Join(dl.Config.prefixPath, target),
})
directoryClient := dl.Filesystem.NewDirectoryClient(url.PathEscape(filepath.Join(dl.Config.prefixPath, source)))
_, err := directoryClient.Rename(context.Background(), filepath.Join(dl.Config.prefixPath, target), &directory.RenameOptions{
CPKInfo: dl.datalakeCPKOpt,
})
if err != nil {
serr := storeDatalakeErrToErr(err)
if serr == ErrFileNotFound {
@ -384,8 +361,10 @@ func (dl *Datalake) RenameDirectory(source string, target string) error {
func (dl *Datalake) GetAttr(name string) (attr *internal.ObjAttr, err error) {
log.Trace("Datalake::GetAttr : name %s", name)
pathURL := dl.Filesystem.NewRootDirectoryURL().NewFileURL(filepath.Join(dl.Config.prefixPath, name))
prop, err := pathURL.GetProperties(context.Background())
fileClient := dl.Filesystem.NewFileClient(filepath.Join(dl.Config.prefixPath, name))
prop, err := fileClient.GetProperties(context.Background(), &file.GetPropertiesOptions{
CPKInfo: dl.datalakeCPKOpt,
})
if err != nil {
e := storeDatalakeErrToErr(err)
if e == ErrFileNotFound {
@ -399,14 +378,7 @@ func (dl *Datalake) GetAttr(name string) (attr *internal.ObjAttr, err error) {
}
}
lastModified, err := time.Parse(time.RFC1123, prop.LastModified())
if err != nil {
log.Err("Datalake::GetAttr : Failed to convert last modified time for %s [%s]", name, err.Error())
return attr, err
}
mode, err := getFileMode(prop.XMsPermissions())
mode, err := getFileMode(*prop.Permissions)
if err != nil {
log.Err("Datalake::GetAttr : Failed to get file mode for %s [%s]", name, err.Error())
return attr, err
@ -415,28 +387,30 @@ func (dl *Datalake) GetAttr(name string) (attr *internal.ObjAttr, err error) {
attr = &internal.ObjAttr{
Path: name,
Name: filepath.Base(name),
Size: prop.ContentLength(),
Size: *prop.ContentLength,
Mode: mode,
Mtime: lastModified,
Atime: lastModified,
Ctime: lastModified,
Crtime: lastModified,
Mtime: *prop.LastModified,
Atime: *prop.LastModified,
Ctime: *prop.LastModified,
Crtime: *prop.LastModified,
Flags: internal.NewFileBitMap(),
}
parseProperties(attr, prop.XMsProperties())
if azbfs.PathResourceDirectory == azbfs.PathResourceType(prop.XMsResourceType()) {
parseMetadata(attr, prop.Metadata)
if *prop.ResourceType == "directory" {
attr.Flags = internal.NewDirBitMap()
attr.Mode = attr.Mode | os.ModeDir
}
attr.Flags.Set(internal.PropFlagMetadataRetrieved)
if dl.Config.honourACL && dl.Config.authConfig.ObjectID != "" {
acl, err := pathURL.GetAccessControl(context.Background())
acl, err := fileClient.GetAccessControl(context.Background(), nil)
if err != nil {
// Just ignore the error here as rest of the attributes have been retrieved
log.Err("Datalake::GetAttr : Failed to get ACL for %s [%s]", name, err.Error())
} else {
mode, err := getFileModeFromACL(dl.Config.authConfig.ObjectID, acl.ACL, acl.Owner)
mode, err := getFileModeFromACL(dl.Config.authConfig.ObjectID, *acl.ACL, *acl.Owner)
if err != nil {
log.Err("Datalake::GetAttr : Failed to get file mode from ACL for %s [%s]", name, err.Error())
} else {
@ -472,14 +446,14 @@ func (dl *Datalake) List(prefix string, marker *string, count int32) ([]*interna
}
// Get a result segment starting with the path indicated by the current Marker.
listPath, err := dl.Filesystem.ListPaths(context.Background(),
azbfs.ListPathsFilesystemOptions{
Path: &prefixPath,
Recursive: false,
MaxResults: &count,
ContinuationToken: marker,
})
pager := dl.Filesystem.NewListPathsPager(false, &filesystem.ListPathsOptions{
Marker: marker,
MaxResults: &count,
Prefix: &prefixPath,
})
// Process the paths returned in this result segment (if the segment is empty, the loop body won't execute)
listPath, err := pager.NextPage(context.Background())
if err != nil {
log.Err("Datalake::List : Failed to validate account with given auth %s", err.Error())
m := ""
@ -496,7 +470,7 @@ func (dl *Datalake) List(prefix string, marker *string, count int32) ([]*interna
// Process the paths returned in this result segment (if the segment is empty, the loop body won't execute)
for _, pathInfo := range listPath.Paths {
var attr *internal.ObjAttr
var lastModifiedTime time.Time
if dl.Config.disableSymlink {
var mode fs.FileMode
if pathInfo.Permissions != nil {
@ -519,15 +493,21 @@ func (dl *Datalake) List(prefix string, marker *string, count int32) ([]*interna
log.Err("Datalake::List : Failed to get file length for %s", *pathInfo.Name)
}
if pathInfo.LastModified != nil {
lastModifiedTime, err = time.Parse(time.RFC1123, *pathInfo.LastModified)
if err != nil {
log.Err("Datalake::List : Failed to get last modified time for %s [%s]", *pathInfo.Name, err.Error())
}
}
attr = &internal.ObjAttr{
Path: *pathInfo.Name,
Name: filepath.Base(*pathInfo.Name),
Size: contentLength,
Mode: mode,
Mtime: pathInfo.LastModifiedTime(),
Atime: pathInfo.LastModifiedTime(),
Ctime: pathInfo.LastModifiedTime(),
Crtime: pathInfo.LastModifiedTime(),
Mtime: lastModifiedTime,
Atime: lastModifiedTime,
Ctime: lastModifiedTime,
Crtime: lastModifiedTime,
Flags: internal.NewFileBitMap(),
}
if pathInfo.IsDirectory != nil && *pathInfo.IsDirectory {
@ -552,10 +532,10 @@ func (dl *Datalake) List(prefix string, marker *string, count int32) ([]*interna
// Alternatively, if you want Datalake list paths to return metadata/properties as well.
// pass CLI parameter --no-symlinks=false in the mount command.
pathList = append(pathList, attr)
}
m := listPath.XMsContinuation()
return pathList, &m, nil
return pathList, listPath.Continuation, nil
}
// ReadToFile : Download a file to a local file
@ -574,12 +554,12 @@ func (dl *Datalake) ReadInBuffer(name string, offset int64, len int64, data []by
}
// WriteFromFile : Upload local file to file
func (dl *Datalake) WriteFromFile(name string, metadata map[string]string, fi *os.File) (err error) {
func (dl *Datalake) WriteFromFile(name string, metadata map[string]*string, fi *os.File) (err error) {
return dl.BlockBlob.WriteFromFile(name, metadata, fi)
}
// WriteFromBuffer : Upload from a buffer to a file
func (dl *Datalake) WriteFromBuffer(name string, metadata map[string]string, data []byte) error {
func (dl *Datalake) WriteFromBuffer(name string, metadata map[string]*string, data []byte) error {
return dl.BlockBlob.WriteFromBuffer(name, metadata, data)
}
@ -603,7 +583,7 @@ func (dl *Datalake) TruncateFile(name string, size int64) error {
// ChangeMod : Change mode of a path
func (dl *Datalake) ChangeMod(name string, mode os.FileMode) error {
log.Trace("Datalake::ChangeMod : Change mode of file %s to %s", name, mode)
fileURL := dl.Filesystem.NewRootDirectoryURL().NewFileURL(filepath.Join(dl.Config.prefixPath, name))
fileClient := dl.Filesystem.NewFileClient(filepath.Join(dl.Config.prefixPath, name))
/*
// If we need to call the ACL set api then we need to get older acl string here
@ -621,7 +601,9 @@ func (dl *Datalake) ChangeMod(name string, mode os.FileMode) error {
*/
newPerm := getACLPermissions(mode)
_, err := fileURL.SetAccessControl(context.Background(), azbfs.BlobFSAccessControl{Permissions: newPerm})
_, err := fileClient.SetAccessControl(context.Background(), &file.SetAccessControlOptions{
Permissions: &newPerm,
})
if err != nil {
log.Err("Datalake::ChangeMod : Failed to change mode of file %s to %s [%s]", name, mode, err.Error())
e := storeDatalakeErrToErr(err)
@ -662,6 +644,11 @@ func (dl *Datalake) ChangeOwner(name string, _ int, _ int) error {
return syscall.ENOTSUP
}
// GetCommittedBlockList : Get the list of committed blocks
func (dl *Datalake) GetCommittedBlockList(name string) (*internal.CommittedBlockList, error) {
return dl.BlockBlob.GetCommittedBlockList(name)
}
// StageBlock : stages a block and returns its blockid
func (dl *Datalake) StageBlock(name string, data []byte, id string) error {
return dl.BlockBlob.StageBlock(name, data, id)

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -34,9 +34,7 @@
package azstorage
import (
"context"
"crypto/md5"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
@ -49,15 +47,19 @@ import (
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
serviceBfs "github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/service"
"github.com/Azure/azure-storage-fuse/v2/common"
"github.com/Azure/azure-storage-fuse/v2/common/log"
"github.com/Azure/azure-storage-fuse/v2/internal"
"github.com/Azure/azure-storage-azcopy/v10/azbfs"
"github.com/Azure/azure-storage-azcopy/v10/ste"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azdatalake/datalakeerror"
)
// ----------- Helper to create pipeline options ---------------
@ -72,6 +74,7 @@ const (
DualStack bool = true
MaxIdleConns int = 0 // No limit
MaxIdleConnsPerHost int = 100
MaxConnsPerHost int = 0 // No limit
IdleConnTimeout time.Duration = 90 * time.Second
TLSHandshakeTimeout time.Duration = 10 * time.Second
ExpectContinueTimeout time.Duration = 1 * time.Second
@ -80,11 +83,10 @@ const (
MaxResponseHeaderBytes int64 = 0
)
// getAzBlobPipelineOptions : Create pipeline options based on the config
func getAzBlobPipelineOptions(conf AzStorageConfig) (azblob.PipelineOptions, ste.XferRetryOptions) {
retryOptions := ste.XferRetryOptions{
Policy: ste.RetryPolicyExponential, // Use exponential backoff as opposed to linear
MaxTries: conf.maxRetries, // Try at most 3 times to perform the operation (set to 1 to disable retries)
// getAzStorageClientOptions : Create client options based on the config
func getAzStorageClientOptions(conf *AzStorageConfig) (azcore.ClientOptions, error) {
retryOptions := policy.RetryOptions{
MaxRetries: conf.maxRetries, // Try at most 3 times to perform the operation (set to 1 to disable retries)
TryTimeout: time.Second * time.Duration(conf.maxTimeout), // Maximum time allowed for any single try
RetryDelay: time.Second * time.Duration(conf.backoffTime), // Backoff amount for each retry (exponential or linear)
MaxRetryDelay: time.Second * time.Duration(conf.maxRetryDelay), // Max delay between retries
@ -94,82 +96,80 @@ func getAzBlobPipelineOptions(conf AzStorageConfig) (azblob.PipelineOptions, ste
if telemetryValue != "" {
telemetryValue += " "
}
telemetryValue += UserAgent() + " (" + common.GetCurrentDistro() + ")"
telemetryPolicy := newBlobfuseTelemetryPolicy(telemetryValue)
telemetryOptions := azblob.TelemetryOptions{
Value: telemetryValue,
logOptions := getSDKLogOptions()
transportOptions, err := newBlobfuse2HttpClient(conf)
if err != nil {
log.Err("utils::getAzStorageClientOptions : Failed to create transport client [%s]", err.Error())
}
sysLogDisabled := log.GetType() == "silent" // If logging is enabled, allow the SDK to log retries to syslog.
requestLogOptions := azblob.RequestLogOptions{
// TODO: We can potentially consider making LogWarningIfTryOverThreshold a user settable option. For now lets use the default
SyslogDisabled: sysLogDisabled,
}
logOptions := getLogOptions(conf.sdkTrace)
// Create custom HTTPClient to pass to the factory in order to set our proxy
var pipelineHTTPClient = newBlobfuse2HttpClient(conf)
return azblob.PipelineOptions{
Log: logOptions,
RequestLog: requestLogOptions,
Telemetry: telemetryOptions,
HTTPSender: newBlobfuse2HTTPClientFactory(pipelineHTTPClient),
},
// Set RetryOptions to control how HTTP request are retried when retryable failures occur
retryOptions
return azcore.ClientOptions{
Retry: retryOptions,
Logging: logOptions,
PerCallPolicies: []policy.Policy{telemetryPolicy},
Transport: transportOptions,
}, err
}
// getAzBfsPipelineOptions : Create pipeline options based on the config
func getAzBfsPipelineOptions(conf AzStorageConfig) (azbfs.PipelineOptions, ste.XferRetryOptions) {
retryOptions := ste.XferRetryOptions{
Policy: ste.RetryPolicyExponential, // Use exponential backoff as opposed to linear
MaxTries: conf.maxRetries, // Try at most 3 times to perform the operation (set to 1 to disable retries)
TryTimeout: time.Second * time.Duration(conf.maxTimeout), // Maximum time allowed for any single try
RetryDelay: time.Second * time.Duration(conf.backoffTime), // Backoff amount for each retry (exponential or linear)
MaxRetryDelay: time.Second * time.Duration(conf.maxRetryDelay), // Max delay between retries
}
// getAzBlobServiceClientOptions : Create azblob service client options based on the config
func getAzBlobServiceClientOptions(conf *AzStorageConfig) (*service.ClientOptions, error) {
opts, err := getAzStorageClientOptions(conf)
return &service.ClientOptions{
ClientOptions: opts,
}, err
}
telemetryValue := conf.telemetry
if telemetryValue != "" {
telemetryValue += " "
}
// getAzDatalakeServiceClientOptions : Create azdatalake service client options based on the config
func getAzDatalakeServiceClientOptions(conf *AzStorageConfig) (*serviceBfs.ClientOptions, error) {
opts, err := getAzStorageClientOptions(conf)
return &serviceBfs.ClientOptions{
ClientOptions: opts,
}, err
}
telemetryValue += UserAgent() + " (" + common.GetCurrentDistro() + ")"
telemetryOptions := azbfs.TelemetryOptions{
Value: telemetryValue,
// getLogOptions : to configure the SDK logging policy
func getSDKLogOptions() policy.LogOptions {
if log.GetType() == "silent" || log.GetLogLevel() < common.ELogLevel.LOG_DEBUG() {
return policy.LogOptions{}
} else {
// add headers and query params which should be logged and not redacted
return policy.LogOptions{
AllowedHeaders: allowedHeaders,
AllowedQueryParams: allowedQueryParams,
}
}
}
sysLogDisabled := log.GetType() == "silent" // If logging is enabled, allow the SDK to log retries to syslog.
requestLogOptions := azbfs.RequestLogOptions{
// TODO: We can potentially consider making LogWarningIfTryOverThreshold a user settable option. For now lets use the default
SyslogDisabled: sysLogDisabled,
// setSDKLogListener : log the requests and responses.
// It is disabled if,
// - logging type is silent
// - logging level is less than debug
func setSDKLogListener() {
if log.GetType() == "silent" || log.GetLogLevel() < common.ELogLevel.LOG_DEBUG() {
// reset listener
azlog.SetListener(nil)
} else {
azlog.SetListener(func(cls azlog.Event, msg string) {
log.Debug("SDK(%s) : %s", cls, msg)
})
}
logOptions := getLogOptions(conf.sdkTrace)
// Create custom HTTPClient to pass to the factory in order to set our proxy
var pipelineHTTPClient = newBlobfuse2HttpClient(conf)
return azbfs.PipelineOptions{
Log: logOptions,
RequestLog: requestLogOptions,
Telemetry: telemetryOptions,
HTTPSender: newBlobfuse2HTTPClientFactory(pipelineHTTPClient),
},
// Set RetryOptions to control how HTTP request are retried when retryable failures occur
retryOptions
}
// Create an HTTP Client with configured proxy
// TODO: More configurations for other http client parameters?
func newBlobfuse2HttpClient(conf AzStorageConfig) *http.Client {
var ProxyURL func(req *http.Request) (*url.URL, error) = func(req *http.Request) (*url.URL, error) {
// If a proxy address is passed return
var proxyURL url.URL = url.URL{
Host: conf.proxyAddress,
}
return &proxyURL, nil
}
func newBlobfuse2HttpClient(conf *AzStorageConfig) (*http.Client, error) {
var ProxyURL func(req *http.Request) (*url.URL, error)
if conf.proxyAddress == "" {
ProxyURL = nil
ProxyURL = http.ProxyFromEnvironment
} else {
u, err := url.Parse(conf.proxyAddress)
if err != nil {
log.Err("utils::newBlobfuse2HttpClient : Failed to parse proxy : %s [%s]", conf.proxyAddress, err.Error())
return nil, err
}
ProxyURL = http.ProxyURL(u)
}
return &http.Client{
@ -183,6 +183,7 @@ func newBlobfuse2HttpClient(conf AzStorageConfig) *http.Client {
}).Dial, /*Context*/
MaxIdleConns: MaxIdleConns, // No limit
MaxIdleConnsPerHost: MaxIdleConnsPerHost,
MaxConnsPerHost: MaxConnsPerHost, // No limit
IdleConnTimeout: IdleConnTimeout,
TLSHandshakeTimeout: TLSHandshakeTimeout,
ExpectContinueTimeout: ExpectContinueTimeout,
@ -191,85 +192,41 @@ func newBlobfuse2HttpClient(conf AzStorageConfig) *http.Client {
// make things ugly and hence user needs to disable this feature through config
DisableCompression: conf.disableCompression,
MaxResponseHeaderBytes: MaxResponseHeaderBytes,
//ResponseHeaderTimeout: time.Duration{},
//ExpectContinueTimeout: time.Duration{},
},
}, nil
}
// getCloudConfiguration : returns cloud configuration type on the basis of endpoint
func getCloudConfiguration(endpoint string) cloud.Configuration {
if strings.Contains(endpoint, "core.chinacloudapi.cn") {
return cloud.AzureChina
} else if strings.Contains(endpoint, "core.usgovcloudapi.net") {
return cloud.AzureGovernment
} else {
return cloud.AzurePublic
}
}
// newBlobfuse2HTTPClientFactory creates a custom HTTPClientPolicyFactory object that sends HTTP requests to the http client.
func newBlobfuse2HTTPClientFactory(pipelineHTTPClient *http.Client) pipeline.Factory {
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
r, err := pipelineHTTPClient.Do(request.WithContext(ctx))
if err != nil {
log.Err("BlockBlob::newBlobfuse2HTTPClientFactory : HTTP request failed [%s]", err.Error())
err = pipeline.NewError(err, "HTTP request failed")
}
return pipeline.NewHTTPResponse(r), err
}
})
// blobfuseTelemetryPolicy is a custom pipeline policy to prepend the blobfuse user agent string to the one coming from SDK.
// This is added in the PerCallPolicies which executes after the SDK's default telemetry policy.
type blobfuseTelemetryPolicy struct {
telemetryValue string
}
func getLogOptions(sdkLogging bool) pipeline.LogOptions {
return pipeline.LogOptions{
Log: func(logLevel pipeline.LogLevel, message string) {
if !sdkLogging {
return
}
// newBlobfuseTelemetryPolicy creates an object which prepends the blobfuse user agent string to the User-Agent request header
func newBlobfuseTelemetryPolicy(telemetryValue string) policy.Policy {
return &blobfuseTelemetryPolicy{telemetryValue: telemetryValue}
}
// message here is a log generated by SDK and it contains URLs as well
// These URLs have '%' as part of their data like / replaced with %2F
// If we pass down message as first argument to our logging api, it assumes it to be
// a format specifier and treat each % in URL to be a type specifier this will
// result into log strings saying we have given %d but no integer as argument.
// Only way to bypass this is to pass message as a second argument to logging method
// so that logging api does not treat it as format string.
switch logLevel {
case pipeline.LogFatal:
log.Crit("SDK : %s", message)
case pipeline.LogPanic:
log.Crit("SDK : %s", message)
case pipeline.LogError:
log.Err("SDK : %s", message)
case pipeline.LogWarning:
log.Warn("SDK : %s", message)
case pipeline.LogInfo:
log.Info("SDK : %s", message)
case pipeline.LogDebug:
log.Debug("SDK : %s", message)
case pipeline.LogNone:
default:
}
},
ShouldLog: func(level pipeline.LogLevel) bool {
if !sdkLogging {
return false
}
currentLogLevel := func(commonLog common.LogLevel) pipeline.LogLevel {
switch commonLog {
case common.ELogLevel.INVALID():
return pipeline.LogNone
case common.ELogLevel.LOG_OFF():
return pipeline.LogNone
case common.ELogLevel.LOG_CRIT():
return pipeline.LogPanic // Panic logs both Panic and Fatal
case common.ELogLevel.LOG_ERR():
return pipeline.LogError
case common.ELogLevel.LOG_WARNING():
return pipeline.LogWarning
case common.ELogLevel.LOG_INFO():
return pipeline.LogInfo
case common.ELogLevel.LOG_TRACE():
return pipeline.LogDebug // No Trace in pipeline.LogLevel
case common.ELogLevel.LOG_DEBUG():
return pipeline.LogDebug
}
return pipeline.LogNone
}(log.GetLogLevel())
return level <= currentLogLevel
},
func (p blobfuseTelemetryPolicy) Do(req *policy.Request) (*http.Response, error) {
userAgent := p.telemetryValue
// prepend the blobfuse user agent string
if ua := req.Raw().Header.Get(common.UserAgentHeader); ua != "" {
userAgent = fmt.Sprintf("%s %s", userAgent, ua)
}
req.Raw().Header.Set(common.UserAgentHeader, userAgent)
return req.Next()
}
// ----------- Store error code handling ---------------
@ -283,31 +240,24 @@ const (
InvalidPermission
)
// ErrStr : Store error to string mapping
var ErrStr = map[uint16]string{
ErrNoErr: "No Error found",
ErrUnknown: "Unknown store error",
ErrFileNotFound: "Blob not found",
ErrFileAlreadyExists: "Blob already exists",
}
// For detailed error list refert ServiceCodeType at below link
// https://godoc.org/github.com/Azure/azure-storage-blob-go/azblob#ListBlobsSegmentOptions
// For detailed error list refer below link,
// https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/bloberror/error_codes.go
// Convert blob storage error to common errors
func storeBlobErrToErr(err error) uint16 {
if serr, ok := err.(azblob.StorageError); ok {
switch serr.ServiceCode() {
case azblob.ServiceCodeBlobAlreadyExists:
var respErr *azcore.ResponseError
errors.As(err, &respErr)
if respErr != nil {
switch (bloberror.Code)(respErr.ErrorCode) {
case bloberror.BlobAlreadyExists:
return ErrFileAlreadyExists
case azblob.ServiceCodeBlobNotFound:
case bloberror.BlobNotFound:
return ErrFileNotFound
case azblob.ServiceCodeInvalidRange:
case bloberror.InvalidRange:
return InvalidRange
case azblob.ServiceCodeLeaseIDMissing:
case bloberror.LeaseIDMissing:
return BlobIsUnderLease
case azblob.ServiceCodeInsufficientAccountPermissions:
return InvalidPermission
case "AuthorizationPermissionMismatch":
case bloberror.InsufficientAccountPermissions, bloberror.AuthorizationPermissionMismatch:
return InvalidPermission
default:
return ErrUnknown
@ -318,17 +268,20 @@ func storeBlobErrToErr(err error) uint16 {
// Convert datalake storage error to common errors
func storeDatalakeErrToErr(err error) uint16 {
if serr, ok := err.(azbfs.StorageError); ok {
switch serr.ServiceCode() {
case azbfs.ServiceCodePathAlreadyExists:
var respErr *azcore.ResponseError
errors.As(err, &respErr)
if respErr != nil {
switch (datalakeerror.StorageErrorCode)(respErr.ErrorCode) {
case datalakeerror.PathAlreadyExists:
return ErrFileAlreadyExists
case azbfs.ServiceCodePathNotFound:
case datalakeerror.PathNotFound:
return ErrFileNotFound
case azbfs.ServiceCodeSourcePathNotFound:
case datalakeerror.SourcePathNotFound:
return ErrFileNotFound
case "LeaseIdMissing":
case datalakeerror.LeaseIDMissing:
return BlobIsUnderLease
case "AuthorizationPermissionMismatch":
case datalakeerror.AuthorizationPermissionMismatch:
return InvalidPermission
default:
return ErrUnknown
@ -339,43 +292,19 @@ func storeDatalakeErrToErr(err error) uint16 {
// ----------- Metadata handling ---------------
//
// Converts datalake properties to a metadata map
func newMetadata(properties string) map[string]string {
metadata := make(map[string]string)
if properties != "" {
// Create a map of the properties (metadata)
pairs := strings.Split(properties, ",")
for _, p := range pairs {
components := strings.SplitN(p, "=", 2)
key := components[0]
value, err := base64.StdEncoding.DecodeString(components[1])
if err == nil {
metadata[key] = string(value)
}
}
}
return metadata
}
// parseProperties : Parse the properties of a given datalake path and populate its attributes
func parseProperties(attr *internal.ObjAttr, properties string) {
metadata := newMetadata(properties)
// Parse the metadata
parseMetadata(attr, metadata)
}
// parseMetadata : Parse the metadata of a given path and populate its attributes
func parseMetadata(attr *internal.ObjAttr, metadata map[string]string) {
func parseMetadata(attr *internal.ObjAttr, metadata map[string]*string) {
// Save the metadata in attributes so that later if someone wants to add anything it can work
attr.Metadata = metadata
for k, v := range metadata {
if strings.ToLower(k) == folderKey && v == "true" {
attr.Flags = internal.NewDirBitMap()
attr.Mode = attr.Mode | os.ModeDir
} else if strings.ToLower(k) == symlinkKey && v == "true" {
attr.Flags = internal.NewSymlinkBitMap()
attr.Mode = attr.Mode | os.ModeSymlink
if v != nil {
if strings.ToLower(k) == folderKey && *v == "true" {
attr.Flags = internal.NewDirBitMap()
attr.Mode = attr.Mode | os.ModeDir
} else if strings.ToLower(k) == symlinkKey && *v == "true" {
attr.Flags = internal.NewSymlinkBitMap()
attr.Mode = attr.Mode | os.ModeSymlink
}
}
}
}
@ -469,34 +398,35 @@ func populateContentType(newSet string) error { //nolint
// ----------- Blob access tier type conversion ---------------
//
// AccessTierMap : Store config to access tier mapping
var AccessTiers = map[string]azblob.AccessTierType{
"none": azblob.AccessTierNone,
"hot": azblob.AccessTierHot,
"cool": azblob.AccessTierCool,
"archive": azblob.AccessTierArchive,
"p4": azblob.AccessTierP4,
"p6": azblob.AccessTierP6,
"p10": azblob.AccessTierP10,
"p15": azblob.AccessTierP15,
"p20": azblob.AccessTierP20,
"p30": azblob.AccessTierP30,
"p40": azblob.AccessTierP40,
"p50": azblob.AccessTierP50,
"p60": azblob.AccessTierP60,
"p70": azblob.AccessTierP70,
"p80": azblob.AccessTierP80,
var AccessTiers = map[string]blob.AccessTier{
"hot": blob.AccessTierHot,
"cool": blob.AccessTierCool,
"cold": blob.AccessTierCold,
"archive": blob.AccessTierArchive,
"p4": blob.AccessTierP4,
"p6": blob.AccessTierP6,
"p10": blob.AccessTierP10,
"p15": blob.AccessTierP15,
"p20": blob.AccessTierP20,
"p30": blob.AccessTierP30,
"p40": blob.AccessTierP40,
"p50": blob.AccessTierP50,
"p60": blob.AccessTierP60,
"p70": blob.AccessTierP70,
"p80": blob.AccessTierP80,
"premium": blob.AccessTierPremium,
}
func getAccessTierType(name string) azblob.AccessTierType {
func getAccessTierType(name string) *blob.AccessTier {
if name == "" {
return azblob.AccessTierNone
return nil
}
value, found := AccessTiers[strings.ToLower(name)]
if found {
return value
return &value
}
return azblob.AccessTierNone
return nil
}
// Called by x method

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -39,7 +39,8 @@ import (
"strconv"
"testing"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-storage-fuse/v2/common"
"github.com/Azure/azure-storage-fuse/v2/common/log"
"github.com/stretchr/testify/assert"
@ -180,29 +181,31 @@ func (s *utilsTestSuite) TestGetContentType() {
type accesTierVal struct {
val string
result azblob.AccessTierType
result *blob.AccessTier
}
func (s *utilsTestSuite) TestGetAccessTierType() {
assert := assert.New(s.T())
var inputs = []accesTierVal{
{val: "", result: azblob.AccessTierNone},
{val: "none", result: azblob.AccessTierNone},
{val: "hot", result: azblob.AccessTierHot},
{val: "cool", result: azblob.AccessTierCool},
{val: "archive", result: azblob.AccessTierArchive},
{val: "p4", result: azblob.AccessTierP4},
{val: "p6", result: azblob.AccessTierP6},
{val: "p10", result: azblob.AccessTierP10},
{val: "p15", result: azblob.AccessTierP15},
{val: "p20", result: azblob.AccessTierP20},
{val: "p30", result: azblob.AccessTierP30},
{val: "p40", result: azblob.AccessTierP40},
{val: "p50", result: azblob.AccessTierP50},
{val: "p60", result: azblob.AccessTierP60},
{val: "p70", result: azblob.AccessTierP70},
{val: "p80", result: azblob.AccessTierP80},
{val: "random", result: azblob.AccessTierNone},
{val: "", result: nil},
{val: "none", result: nil},
{val: "hot", result: to.Ptr(blob.AccessTierHot)},
{val: "cool", result: to.Ptr(blob.AccessTierCool)},
{val: "cold", result: to.Ptr(blob.AccessTierCold)},
{val: "archive", result: to.Ptr(blob.AccessTierArchive)},
{val: "p4", result: to.Ptr(blob.AccessTierP4)},
{val: "p6", result: to.Ptr(blob.AccessTierP6)},
{val: "p10", result: to.Ptr(blob.AccessTierP10)},
{val: "p15", result: to.Ptr(blob.AccessTierP15)},
{val: "p20", result: to.Ptr(blob.AccessTierP20)},
{val: "p30", result: to.Ptr(blob.AccessTierP30)},
{val: "p40", result: to.Ptr(blob.AccessTierP40)},
{val: "p50", result: to.Ptr(blob.AccessTierP50)},
{val: "p60", result: to.Ptr(blob.AccessTierP60)},
{val: "p70", result: to.Ptr(blob.AccessTierP70)},
{val: "p80", result: to.Ptr(blob.AccessTierP80)},
{val: "premium", result: to.Ptr(blob.AccessTierPremium)},
{val: "random", result: nil},
}
for _, i := range inputs {
s.Run(i.val, func() {
@ -319,30 +322,54 @@ func (s *utilsTestSuite) TestSanitizeSASKey() {
func (s *utilsTestSuite) TestBlockNonProxyOptions() {
assert := assert.New(s.T())
po, ro := getAzBlobPipelineOptions(AzStorageConfig{})
assert.EqualValues(ro.MaxTries, int(0))
assert.NotEqual(po.RequestLog.SyslogDisabled, true)
opt, err := getAzBlobServiceClientOptions(&AzStorageConfig{})
assert.Nil(err)
assert.EqualValues(opt.Retry.MaxRetries, 0)
assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1)
}
func (s *utilsTestSuite) TestBlockProxyOptions() {
assert := assert.New(s.T())
po, ro := getAzBlobPipelineOptions(AzStorageConfig{proxyAddress: "127.0.0.1", maxRetries: 3})
assert.EqualValues(ro.MaxTries, 3)
assert.NotEqual(po.RequestLog.SyslogDisabled, true)
opt, err := getAzBlobServiceClientOptions(&AzStorageConfig{proxyAddress: "127.0.0.1", maxRetries: 3})
assert.Nil(err)
assert.EqualValues(opt.Retry.MaxRetries, 3)
assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1)
opt, err = getAzBlobServiceClientOptions(&AzStorageConfig{proxyAddress: "http://127.0.0.1:8080", maxRetries: 3})
assert.Nil(err)
assert.EqualValues(opt.Retry.MaxRetries, 3)
assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1)
opt, err = getAzBlobServiceClientOptions(&AzStorageConfig{proxyAddress: "https://128.0.0.1:8080", maxRetries: 3})
assert.Nil(err)
assert.EqualValues(opt.Retry.MaxRetries, 3)
assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1)
}
func (s *utilsTestSuite) TestBfsNonProxyOptions() {
assert := assert.New(s.T())
po, ro := getAzBfsPipelineOptions(AzStorageConfig{})
assert.EqualValues(ro.MaxTries, int(0))
assert.NotEqual(po.RequestLog.SyslogDisabled, true)
opt, err := getAzDatalakeServiceClientOptions(&AzStorageConfig{})
assert.Nil(err)
assert.EqualValues(opt.Retry.MaxRetries, 0)
assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1)
}
func (s *utilsTestSuite) TestBfsProxyOptions() {
assert := assert.New(s.T())
po, ro := getAzBfsPipelineOptions(AzStorageConfig{proxyAddress: "127.0.0.1", maxRetries: 3})
assert.EqualValues(ro.MaxTries, 3)
assert.NotEqual(po.RequestLog.SyslogDisabled, true)
opt, err := getAzDatalakeServiceClientOptions(&AzStorageConfig{proxyAddress: "127.0.0.1", maxRetries: 3})
assert.Nil(err)
assert.EqualValues(opt.Retry.MaxRetries, 3)
assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1)
opt, err = getAzDatalakeServiceClientOptions(&AzStorageConfig{proxyAddress: "http://127.0.0.1:8080", maxRetries: 3})
assert.Nil(err)
assert.EqualValues(opt.Retry.MaxRetries, 3)
assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1)
opt, err = getAzDatalakeServiceClientOptions(&AzStorageConfig{proxyAddress: "https://128.0.0.1:8080", maxRetries: 3})
assert.Nil(err)
assert.EqualValues(opt.Retry.MaxRetries, 3)
assert.GreaterOrEqual(len(opt.Logging.AllowedHeaders), 1)
}
type endpointAccountType struct {

Просмотреть файл

@ -9,7 +9,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -43,10 +43,20 @@ import (
// Various flags denoting state of a block
const (
BlockFlagFresh uint16 = iota
BlockFlagDirty // Block has been written and data is not persisted yet
BlockFlagFailed // Block upload/download has failed
BlockFlagSynced // Block has been synced to the container
BlockFlagFresh uint16 = iota
BlockFlagDownloading // Block is being downloaded
BlockFlagUploading // Block is being uploaded
BlockFlagDirty // Block has been written and data is not persisted yet
BlockFlagSynced // Block has been written and data is persisted
BlockFlagFailed // Block upload/download has failed
)
// Flags to denote the status of upload/download of a block
const (
BlockStatusDownloaded int = iota + 1 // Download of this block is complete
BlockStatusUploaded // Upload of this block is complete
BlockStatusDownloadFailed // Download of this block has failed
BlockStatusUploadFailed // Upload of this block has failed
)
// Block is a memory mapped buffer with its state to hold data
@ -60,6 +70,12 @@ type Block struct {
node *list.Element // node representation of this block in the list inside handle
}
type blockInfo struct {
id string // blockID of the block
committed bool // flag to determine if the block has been committed or not
size uint64 // length of data in block
}
// AllocateBlock creates a new memory mapped buffer for the given size
func AllocateBlock(size uint64) (*Block, error) {
if size == 0 {
@ -119,9 +135,9 @@ func (b *Block) Uploading() {
}
// Ready marks this Block is now ready for reading by its first reader (data download completed)
func (b *Block) Ready() {
func (b *Block) Ready(val int) {
select {
case b.state <- 1:
case b.state <- val:
break
default:
break
@ -157,18 +173,3 @@ func (b *Block) Failed() {
func (b *Block) IsFailed() bool {
return b.flags.IsSet(BlockFlagFailed)
}
// Mark this block as synced to storage
func (b *Block) Synced() {
b.flags.Set(BlockFlagSynced)
}
// Mark this block as not synced to storage
func (b *Block) ClearSynced() {
b.flags.Clear(BlockFlagSynced)
}
// Check this block is synced to storage
func (b *Block) IsSynced() bool {
return b.flags.IsSet(BlockFlagSynced)
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -12,7 +12,7 @@
Licensed under the MIT License <http://opensource.org/licenses/MIT>.
Copyright © 2020-2023 Microsoft Corporation. All rights reserved.
Copyright © 2020-2024 Microsoft Corporation. All rights reserved.
Author : <blobfusedev@microsoft.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
@ -143,7 +143,7 @@ func (suite *blockTestSuite) TestReady() {
b.ReUse()
suite.assert.NotNil(b.state)
b.Ready()
b.Ready(BlockStatusDownloaded)
suite.assert.Equal(len(b.state), 1)
<-b.state
@ -167,7 +167,63 @@ func (suite *blockTestSuite) TestUnBlock() {
suite.assert.NotNil(b.state)
suite.assert.Nil(b.node)
b.Ready()
b.Ready(BlockStatusDownloaded)
suite.assert.Equal(len(b.state), 1)
<-b.state
suite.assert.Equal(len(b.state), 0)
b.Unblock()
suite.assert.NotNil(b.state)
suite.assert.Equal(len(b.state), 0)
<-b.state
suite.assert.Equal(len(b.state), 0)
_ = b.Delete()
}
func (suite *blockTestSuite) TestWriter() {
suite.assert = assert.New(suite.T())
b, err := AllocateBlock(1)
suite.assert.NotNil(b)
suite.assert.Nil(err)
suite.assert.Nil(b.state)
suite.assert.Nil(b.node)
suite.assert.False(b.IsDirty())
b.ReUse()
suite.assert.NotNil(b.state)
suite.assert.Nil(b.node)
suite.assert.Zero(b.offset)
suite.assert.Zero(b.endIndex)
suite.assert.Equal(b.id, int64(-1))
suite.assert.False(b.IsDirty())
b.Ready(BlockStatusDownloaded)
suite.assert.Equal(len(b.state), 1)
<-b.state
suite.assert.Equal(len(b.state), 0)
b.Unblock()
suite.assert.NotNil(b.state)
suite.assert.Equal(len(b.state), 0)
b.Uploading()
suite.assert.NotNil(b.state)
b.Dirty()
suite.assert.True(b.IsDirty())
b.Failed()
suite.assert.True(b.IsDirty())
b.NoMoreDirty()
suite.assert.False(b.IsDirty())
b.Ready(BlockStatusUploaded)
suite.assert.Equal(len(b.state), 1)
<-b.state

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше