Added MSI Tests and more to code coverage (#857)
This commit is contained in:
Родитель
39fd5969d6
Коммит
26030f2754
|
@ -18,4 +18,5 @@ build/
|
|||
tools/
|
||||
test/manual_scripts/create1000.go
|
||||
test/manual_scripts/cachetest.go
|
||||
lint.log
|
||||
lint.log
|
||||
azure-storage-fuse
|
|
@ -11,6 +11,7 @@
|
|||
- Fixed a bug in OAuth token parsing when expires_on denotes numbers of seconds
|
||||
- Fixed a bug in rmdir flow. Dont allow directory deletion if local cache says its empty. On container it might still have files.
|
||||
- Fixed a bug in background mode where auth validation would be run twice
|
||||
- Fixed a bug in content type parsing for a 7z compressed file
|
||||
|
||||
## 2.0.0-preview.2 (2022-05-31)
|
||||
**Performance Improvements**
|
||||
|
|
|
@ -12,17 +12,24 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
Ubuntu-18:
|
||||
AgentName: 'blobfuse-ubuntu18'
|
||||
imageName: 'ubuntu-18.04'
|
||||
containerName: 'test-cnt-ubn-18'
|
||||
fuselib: 'libfuse-dev'
|
||||
fuselib2: 'fuse'
|
||||
tags: 'fuse2'
|
||||
Ubuntu-20:
|
||||
AgentName: 'blobfuse-ubuntu20'
|
||||
imageName: 'ubuntu-20.04'
|
||||
containerName: 'test-cnt-ubn-20'
|
||||
fuselib: 'libfuse3-dev'
|
||||
fuselib2: 'fuse3'
|
||||
tags: 'fuse3'
|
||||
|
||||
pool:
|
||||
vmImage: $(imageName)
|
||||
name: "blobfuse-ubuntu-pool"
|
||||
demands:
|
||||
- ImageOverride -equals $(AgentName)
|
||||
|
||||
variables:
|
||||
- group: NightlyBlobFuse
|
||||
|
@ -59,10 +66,18 @@ jobs:
|
|||
workingDirectory: $(WORK_DIR)
|
||||
|
||||
- script: |
|
||||
sudo apt-get update --fix-missing
|
||||
sudo apt-get install $(fuselib) -y
|
||||
sudo apt-get update --fix-missing -o Dpkg::Options::="--force-confnew"
|
||||
sudo apt-get install make cmake gcc g++ parallel $(fuselib) $(fuselib2) -y -o Dpkg::Options::="--force-confnew"
|
||||
displayName: 'Install libfuse'
|
||||
|
||||
|
||||
# Create directory structure
|
||||
- script: |
|
||||
sudo mkdir -p $(ROOT_DIR)
|
||||
sudo chown -R `whoami` $(ROOT_DIR)
|
||||
chmod 777 $(ROOT_DIR)
|
||||
displayName: 'Create Directory Structure'
|
||||
|
||||
# -------------------------------------------------------
|
||||
# Pull and build the code
|
||||
- template: 'azure-pipeline-templates/build.yml'
|
||||
|
@ -75,6 +90,7 @@ jobs:
|
|||
container: $(containerName)
|
||||
tags: $(tags)
|
||||
fuselib: $(fuselib)
|
||||
skip_msi: "false"
|
||||
|
||||
# -------------------------------------------------------
|
||||
# UT based code coverage test
|
||||
|
@ -379,6 +395,7 @@ jobs:
|
|||
rm -rf $(MOUNT_DIR)/*
|
||||
rm -rf $(TEMP_DIR)/*
|
||||
./blobfuse2 unmount all
|
||||
./blobfuse2 gen-test-config --config-file=azure_key.yaml --container-name=$(containerName) --temp-path=$(TEMP_DIR) --output-file=$(BLOBFUSE2_CFG)
|
||||
|
||||
./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/secure_encrypt.cov secure encrypt --config-file=$(BLOBFUSE2_CFG) --output-file=$(Pipeline.Workspace)/blobfuse2.azsec --passphrase=123123123123123123123123
|
||||
if [ $? -ne 0 ]; then
|
||||
|
@ -389,14 +406,20 @@ jobs:
|
|||
ps -aux | grep blobfuse2
|
||||
rm -rf $(MOUNT_DIR)/*
|
||||
cd test/e2e_tests
|
||||
go test -v -timeout=7200s ./... -args -mnt-path=$(MOUNT_DIR) -adls=true -tmp-path=$(TEMP_DIR)
|
||||
go test -v -timeout=7200s ./... -args -mnt-path=$(MOUNT_DIR) -adls=false -tmp-path=$(TEMP_DIR)
|
||||
cd -
|
||||
|
||||
./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/secure_set.cov --config-file=$(BLOBFUSE2_CFG) --output-file=$(Pipeline.Workspace)/blobfuse2.azsec --passphrase=123123123123123123123123 --key=logging.level --value=log_debug
|
||||
./blobfuse2.test -test.v -test.coverprofile=$(WORK_DIR)/secure_set.cov secure set --config-file=$(Pipeline.Workspace)/blobfuse2.azsec --passphrase=123123123123123123123123 --key=logging.level --value=log_debug
|
||||
./blobfuse2 unmount all
|
||||
sleep 5
|
||||
workingDirectory: $(WORK_DIR)
|
||||
displayName: "CLI : Secure Config"
|
||||
env:
|
||||
NIGHTLY_STO_ACC_NAME: $(NIGHTLY_STO_BLOB_ACC_NAME)
|
||||
NIGHTLY_STO_ACC_KEY: $(NIGHTLY_STO_BLOB_ACC_KEY)
|
||||
ACCOUNT_TYPE: 'block'
|
||||
ACCOUNT_ENDPOINT: 'https://$(NIGHTLY_STO_BLOB_ACC_NAME).blob.core.windows.net'
|
||||
VERBOSE_LOG: false
|
||||
|
||||
# -------------------------------------------------------
|
||||
# Coverage report consolidation
|
||||
|
|
|
@ -232,6 +232,28 @@ func (suite *authTestSuite) TestBlockInvalidSharedKey() {
|
|||
}
|
||||
}
|
||||
|
||||
func (suite *authTestSuite) TestBlockInvalidSharedKey2() {
|
||||
defer suite.cleanupTest()
|
||||
stgConfig := AzStorageConfig{
|
||||
container: storageTestConfigurationParameters.BlockContainer,
|
||||
authConfig: azAuthConfig{
|
||||
AuthMode: EAuthType.KEY(),
|
||||
AccountType: EAccountType.BLOCK(),
|
||||
AccountName: storageTestConfigurationParameters.BlockAccount,
|
||||
AccountKey: "abcd>=", // string that will fail to base64 decode
|
||||
Endpoint: generateEndpoint(false, storageTestConfigurationParameters.BlockAccount, EAccountType.BLOCK()),
|
||||
},
|
||||
}
|
||||
assert := assert.New(suite.T())
|
||||
stg := NewAzStorageConnection(stgConfig)
|
||||
if stg == nil {
|
||||
assert.Fail("TestBlockInvalidSharedKey : Failed to create Storage object")
|
||||
}
|
||||
if err := stg.SetupPipeline(); err == nil {
|
||||
assert.Fail("TestBlockInvalidSharedKey : Setup pipeline even though shared key is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *authTestSuite) TestBlockSharedKey() {
|
||||
defer suite.cleanupTest()
|
||||
stgConfig := AzStorageConfig{
|
||||
|
|
|
@ -238,15 +238,6 @@ func (bb *BlockBlob) SetPrefixPath(path string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Exists : Check whether or not a given blob exists
|
||||
func (bb *BlockBlob) Exists(name string) bool {
|
||||
log.Trace("BlockBlob::Exists : name %s", name)
|
||||
if _, err := bb.GetAttr(name); err == syscall.ENOENT {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// CreateFile : Create a new file in the container/virtual directory
|
||||
func (bb *BlockBlob) CreateFile(name string, mode os.FileMode) error {
|
||||
log.Trace("BlockBlob::CreateFile : name %s", name)
|
||||
|
@ -742,7 +733,7 @@ func (bb *BlockBlob) GetFileBlockOffsets(name string) (*common.BlockOffsetList,
|
|||
return &common.BlockOffsetList{}, err
|
||||
}
|
||||
// if block list empty its a small file
|
||||
if len(blockList.BlockList) == 0 {
|
||||
if len(storageBlockList.CommittedBlocks) == 0 {
|
||||
blockList.Flags.Set(common.SmallFile)
|
||||
return &blockList, nil
|
||||
}
|
||||
|
|
|
@ -1123,7 +1123,7 @@ func (s *blockBlobTestSuite) TestWriteFile() {
|
|||
s.assert.EqualValues(testData, output)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestTruncateFileSmaller() {
|
||||
func (s *blockBlobTestSuite) TestTruncateSmallFileSmaller() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
|
@ -1145,7 +1145,33 @@ func (s *blockBlobTestSuite) TestTruncateFileSmaller() {
|
|||
s.assert.EqualValues(testData[:truncatedLength], output)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestTruncateFileEqual() {
|
||||
func (s *blockBlobTestSuite) TestTruncateChunkedFileSmaller() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
s.az.CreateFile(internal.CreateFileOptions{Name: name})
|
||||
testData := "test data"
|
||||
data := []byte(testData)
|
||||
truncatedLength := 5
|
||||
// use our method to make the max upload size (size before a blob is broken down to blocks) to 4 Bytes
|
||||
_, err := uploadReaderAtToBlockBlob(ctx, bytes.NewReader(data), int64(len(data)), 4, s.containerUrl.NewBlockBlobURL(name), azblob.UploadToBlockBlobOptions{
|
||||
BlockSize: 4,
|
||||
})
|
||||
s.assert.Nil(err)
|
||||
|
||||
err = s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)})
|
||||
s.assert.Nil(err)
|
||||
|
||||
// Blob should have updated data
|
||||
file := s.containerUrl.NewBlobURL(name)
|
||||
resp, err := file.Download(ctx, 0, int64(truncatedLength), azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
||||
s.assert.Nil(err)
|
||||
s.assert.EqualValues(truncatedLength, resp.ContentLength())
|
||||
output, _ := ioutil.ReadAll(resp.Body(azblob.RetryReaderOptions{}))
|
||||
s.assert.EqualValues(testData[:truncatedLength], output)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestTruncateSmallFileEqual() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
|
@ -1167,7 +1193,33 @@ func (s *blockBlobTestSuite) TestTruncateFileEqual() {
|
|||
s.assert.EqualValues(testData, output)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestTruncateFileBigger() {
|
||||
func (s *blockBlobTestSuite) TestTruncateChunkedFileEqual() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
s.az.CreateFile(internal.CreateFileOptions{Name: name})
|
||||
testData := "test data"
|
||||
data := []byte(testData)
|
||||
truncatedLength := 9
|
||||
// use our method to make the max upload size (size before a blob is broken down to blocks) to 4 Bytes
|
||||
_, err := uploadReaderAtToBlockBlob(ctx, bytes.NewReader(data), int64(len(data)), 4, s.containerUrl.NewBlockBlobURL(name), azblob.UploadToBlockBlobOptions{
|
||||
BlockSize: 4,
|
||||
})
|
||||
s.assert.Nil(err)
|
||||
|
||||
err = s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)})
|
||||
s.assert.Nil(err)
|
||||
|
||||
// Blob should have updated data
|
||||
file := s.containerUrl.NewBlobURL(name)
|
||||
resp, err := file.Download(ctx, 0, int64(truncatedLength), azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
||||
s.assert.Nil(err)
|
||||
s.assert.EqualValues(truncatedLength, resp.ContentLength())
|
||||
output, _ := ioutil.ReadAll(resp.Body(azblob.RetryReaderOptions{}))
|
||||
s.assert.EqualValues(testData, output)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestTruncateSmallFileBigger() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
|
@ -1189,6 +1241,32 @@ func (s *blockBlobTestSuite) TestTruncateFileBigger() {
|
|||
s.assert.EqualValues(testData, output[:len(data)])
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestTruncateChunkedFileBigger() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
s.az.CreateFile(internal.CreateFileOptions{Name: name})
|
||||
testData := "test data"
|
||||
data := []byte(testData)
|
||||
truncatedLength := 15
|
||||
// use our method to make the max upload size (size before a blob is broken down to blocks) to 4 Bytes
|
||||
_, err := uploadReaderAtToBlockBlob(ctx, bytes.NewReader(data), int64(len(data)), 4, s.containerUrl.NewBlockBlobURL(name), azblob.UploadToBlockBlobOptions{
|
||||
BlockSize: 4,
|
||||
})
|
||||
s.assert.Nil(err)
|
||||
|
||||
err = s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)})
|
||||
s.assert.Nil(err)
|
||||
|
||||
// Blob should have updated data
|
||||
file := s.containerUrl.NewBlobURL(name)
|
||||
resp, err := file.Download(ctx, 0, int64(truncatedLength), azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{})
|
||||
s.assert.Nil(err)
|
||||
s.assert.EqualValues(truncatedLength, resp.ContentLength())
|
||||
output, _ := ioutil.ReadAll(resp.Body(azblob.RetryReaderOptions{}))
|
||||
s.assert.EqualValues(testData, output[:len(data)])
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestTruncateFileError() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
|
@ -1443,7 +1521,7 @@ func (s *blockBlobTestSuite) TestOverwriteAndAppendBlocks() {
|
|||
s.assert.Nil(err)
|
||||
|
||||
f, _ = os.Open(f.Name())
|
||||
len, err := f.Read(output)
|
||||
len, _ := f.Read(output)
|
||||
s.assert.EqualValues(dataLen, len)
|
||||
s.assert.EqualValues(currentData, output)
|
||||
f.Close()
|
||||
|
@ -1476,7 +1554,7 @@ func (s *blockBlobTestSuite) TestAppendBlocks() {
|
|||
s.assert.Nil(err)
|
||||
|
||||
f, _ = os.Open(f.Name())
|
||||
len, err := f.Read(output)
|
||||
len, _ := f.Read(output)
|
||||
s.assert.EqualValues(dataLen, len)
|
||||
s.assert.EqualValues(currentData, output)
|
||||
f.Close()
|
||||
|
@ -1509,7 +1587,7 @@ func (s *blockBlobTestSuite) TestAppendOffsetLargerThanSize() {
|
|||
s.assert.Nil(err)
|
||||
|
||||
f, _ = os.Open(f.Name())
|
||||
len, err := f.Read(output)
|
||||
len, _ := f.Read(output)
|
||||
s.assert.EqualValues(dataLen, len)
|
||||
s.assert.EqualValues(currentData, output)
|
||||
f.Close()
|
||||
|
@ -1705,6 +1783,21 @@ func (s *blockBlobTestSuite) TestChmod() {
|
|||
s.assert.EqualValues(syscall.ENOTSUP, err)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestChmodIgnore() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
s.tearDownTestHelper(false) // Don't delete the generated container.
|
||||
|
||||
config := fmt.Sprintf("azstorage:\n account-name: %s\n endpoint: https://%s.blob.core.windows.net/\n type: block\n account-key: %s\n mode: key\n container: %s\n fail-unsupported-op: false\n",
|
||||
storageTestConfigurationParameters.BlockAccount, storageTestConfigurationParameters.BlockAccount, storageTestConfigurationParameters.BlockKey, s.container)
|
||||
s.setupTestHelper(config, s.container, true)
|
||||
name := generateFileName()
|
||||
s.az.CreateFile(internal.CreateFileOptions{Name: name})
|
||||
|
||||
err := s.az.Chmod(internal.ChmodOptions{Name: name, Mode: 0666})
|
||||
s.assert.Nil(err)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestChown() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
|
@ -1716,7 +1809,22 @@ func (s *blockBlobTestSuite) TestChown() {
|
|||
s.assert.EqualValues(syscall.ENOTSUP, err)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestXBlockSize() {
|
||||
func (s *blockBlobTestSuite) TestChownIgnore() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
s.tearDownTestHelper(false) // Don't delete the generated container.
|
||||
|
||||
config := fmt.Sprintf("azstorage:\n account-name: %s\n endpoint: https://%s.blob.core.windows.net/\n type: block\n account-key: %s\n mode: key\n container: %s\n fail-unsupported-op: false\n",
|
||||
storageTestConfigurationParameters.BlockAccount, storageTestConfigurationParameters.BlockAccount, storageTestConfigurationParameters.BlockKey, s.container)
|
||||
s.setupTestHelper(config, s.container, true)
|
||||
name := generateFileName()
|
||||
s.az.CreateFile(internal.CreateFileOptions{Name: name})
|
||||
|
||||
err := s.az.Chown(internal.ChownOptions{Name: name, Owner: 6, Group: 5})
|
||||
s.assert.Nil(err)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestBlockSize() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
|
@ -1804,6 +1912,46 @@ func (s *blockBlobTestSuite) TestXBlockSize() {
|
|||
s.assert.EqualValues(block, 0)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestGetFileBlockOffsetsSmallFile() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
h, _ := s.az.CreateFile(internal.CreateFileOptions{Name: name})
|
||||
testData := "testdatates1dat1tes2dat2tes3dat3tes4dat4"
|
||||
data := []byte(testData)
|
||||
|
||||
s.az.WriteFile(internal.WriteFileOptions{Handle: h, Offset: 0, Data: data})
|
||||
|
||||
// GetFileBlockOffsets
|
||||
offsetList, err := s.az.GetFileBlockOffsets(internal.GetFileBlockOffsetsOptions{Name: name})
|
||||
s.assert.Nil(err)
|
||||
s.assert.Len(offsetList.BlockList, 0)
|
||||
s.assert.True(offsetList.SmallFile())
|
||||
s.assert.EqualValues(0, offsetList.BlockIdLength)
|
||||
}
|
||||
|
||||
func (s *blockBlobTestSuite) TestGetFileBlockOffsetsChunkedFile() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
s.az.CreateFile(internal.CreateFileOptions{Name: name})
|
||||
testData := "testdatates1dat1tes2dat2tes3dat3tes4dat4"
|
||||
data := []byte(testData)
|
||||
|
||||
// use our method to make the max upload size (size before a blob is broken down to blocks) to 4 Bytes
|
||||
_, err := uploadReaderAtToBlockBlob(ctx, bytes.NewReader(data), int64(len(data)), 4, s.containerUrl.NewBlockBlobURL(name), azblob.UploadToBlockBlobOptions{
|
||||
BlockSize: 4,
|
||||
})
|
||||
s.assert.Nil(err)
|
||||
|
||||
// GetFileBlockOffsets
|
||||
offsetList, err := s.az.GetFileBlockOffsets(internal.GetFileBlockOffsetsOptions{Name: name})
|
||||
s.assert.Nil(err)
|
||||
s.assert.Len(offsetList.BlockList, 10)
|
||||
s.assert.Zero(offsetList.Flags)
|
||||
s.assert.EqualValues(16, offsetList.BlockIdLength)
|
||||
}
|
||||
|
||||
// func (s *blockBlobTestSuite) TestRAGRS() {
|
||||
// defer s.cleanupTest()
|
||||
// // Setup
|
||||
|
|
|
@ -91,7 +91,6 @@ type AzConnection interface {
|
|||
// This is just for test, shall not be used otherwise
|
||||
SetPrefixPath(string) error
|
||||
|
||||
Exists(name string) bool
|
||||
CreateFile(name string, mode os.FileMode) error
|
||||
CreateDirectory(name string) error
|
||||
CreateLink(source string, target string) error
|
||||
|
|
|
@ -217,12 +217,6 @@ func (dl *Datalake) SetPrefixPath(path string) error {
|
|||
return dl.BlockBlob.SetPrefixPath(path)
|
||||
}
|
||||
|
||||
// Exists : Check whether or not a given path exists
|
||||
func (dl *Datalake) Exists(name string) bool {
|
||||
log.Trace("Datalake::Exists : name %s", name)
|
||||
return dl.BlockBlob.Exists(name)
|
||||
}
|
||||
|
||||
// CreateFile : Create a new file in the filesystem/directory
|
||||
func (dl *Datalake) CreateFile(name string, mode os.FileMode) error {
|
||||
log.Trace("Datalake::CreateFile : name %s", name)
|
||||
|
|
|
@ -1305,7 +1305,7 @@ func (s *datalakeTestSuite) TestWriteFile() {
|
|||
s.assert.EqualValues(testData, output)
|
||||
}
|
||||
|
||||
func (s *datalakeTestSuite) TestTruncateFileSmaller() {
|
||||
func (s *datalakeTestSuite) TestTruncateSmallFileSmaller() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
|
@ -1327,7 +1327,34 @@ func (s *datalakeTestSuite) TestTruncateFileSmaller() {
|
|||
s.assert.EqualValues(testData[:truncatedLength], output)
|
||||
}
|
||||
|
||||
func (s *datalakeTestSuite) TestTruncateFileEqual() {
|
||||
func (s *datalakeTestSuite) TestTruncateChunkedFileSmaller() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
s.az.CreateFile(internal.CreateFileOptions{Name: name})
|
||||
testData := "test data"
|
||||
data := []byte(testData)
|
||||
truncatedLength := 5
|
||||
// use our method to make the max upload size (size before a blob is broken down to blocks) to 4 Bytes
|
||||
_, err := uploadReaderAtToBlockBlob(ctx, bytes.NewReader(data), int64(len(data)), 4,
|
||||
s.az.storage.(*Datalake).BlockBlob.Container.NewBlockBlobURL(name), azblob.UploadToBlockBlobOptions{
|
||||
BlockSize: 4,
|
||||
})
|
||||
s.assert.Nil(err)
|
||||
|
||||
err = s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)})
|
||||
s.assert.Nil(err)
|
||||
|
||||
// Blob should have updated data
|
||||
file := s.containerUrl.NewRootDirectoryURL().NewFileURL(name)
|
||||
resp, err := file.Download(ctx, 0, int64(truncatedLength))
|
||||
s.assert.Nil(err)
|
||||
s.assert.EqualValues(truncatedLength, resp.ContentLength())
|
||||
output, _ := ioutil.ReadAll(resp.Body(azbfs.RetryReaderOptions{}))
|
||||
s.assert.EqualValues(testData[:truncatedLength], output)
|
||||
}
|
||||
|
||||
func (s *datalakeTestSuite) TestTruncateSmallFileEqual() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
|
@ -1349,7 +1376,34 @@ func (s *datalakeTestSuite) TestTruncateFileEqual() {
|
|||
s.assert.EqualValues(testData, output)
|
||||
}
|
||||
|
||||
func (s *datalakeTestSuite) TestTruncateFileBigger() {
|
||||
func (s *datalakeTestSuite) TestTruncateChunkedFileEqual() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
s.az.CreateFile(internal.CreateFileOptions{Name: name})
|
||||
testData := "test data"
|
||||
data := []byte(testData)
|
||||
truncatedLength := 9
|
||||
// use our method to make the max upload size (size before a blob is broken down to blocks) to 4 Bytes
|
||||
_, err := uploadReaderAtToBlockBlob(ctx, bytes.NewReader(data), int64(len(data)), 4,
|
||||
s.az.storage.(*Datalake).BlockBlob.Container.NewBlockBlobURL(name), azblob.UploadToBlockBlobOptions{
|
||||
BlockSize: 4,
|
||||
})
|
||||
s.assert.Nil(err)
|
||||
|
||||
err = s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)})
|
||||
s.assert.Nil(err)
|
||||
|
||||
// Blob should have updated data
|
||||
file := s.containerUrl.NewRootDirectoryURL().NewFileURL(name)
|
||||
resp, err := file.Download(ctx, 0, int64(truncatedLength))
|
||||
s.assert.Nil(err)
|
||||
s.assert.EqualValues(truncatedLength, resp.ContentLength())
|
||||
output, _ := ioutil.ReadAll(resp.Body(azbfs.RetryReaderOptions{}))
|
||||
s.assert.EqualValues(testData, output)
|
||||
}
|
||||
|
||||
func (s *datalakeTestSuite) TestTruncateSmallFileBigger() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
|
@ -1371,6 +1425,33 @@ func (s *datalakeTestSuite) TestTruncateFileBigger() {
|
|||
s.assert.EqualValues(testData, output[:len(data)])
|
||||
}
|
||||
|
||||
func (s *datalakeTestSuite) TestTruncateChunkedFileBigger() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
name := generateFileName()
|
||||
s.az.CreateFile(internal.CreateFileOptions{Name: name})
|
||||
testData := "test data"
|
||||
data := []byte(testData)
|
||||
truncatedLength := 15
|
||||
// use our method to make the max upload size (size before a blob is broken down to blocks) to 4 Bytes
|
||||
_, err := uploadReaderAtToBlockBlob(ctx, bytes.NewReader(data), int64(len(data)), 4,
|
||||
s.az.storage.(*Datalake).BlockBlob.Container.NewBlockBlobURL(name), azblob.UploadToBlockBlobOptions{
|
||||
BlockSize: 4,
|
||||
})
|
||||
s.assert.Nil(err)
|
||||
|
||||
s.az.TruncateFile(internal.TruncateFileOptions{Name: name, Size: int64(truncatedLength)})
|
||||
s.assert.Nil(err)
|
||||
|
||||
// Blob should have updated data
|
||||
file := s.containerUrl.NewRootDirectoryURL().NewFileURL(name)
|
||||
resp, err := file.Download(ctx, 0, int64(truncatedLength))
|
||||
s.assert.Nil(err)
|
||||
s.assert.EqualValues(truncatedLength, resp.ContentLength())
|
||||
output, _ := ioutil.ReadAll(resp.Body(azbfs.RetryReaderOptions{}))
|
||||
s.assert.EqualValues(testData, output[:len(data)])
|
||||
}
|
||||
|
||||
func (s *datalakeTestSuite) TestTruncateFileError() {
|
||||
defer s.cleanupTest()
|
||||
// Setup
|
||||
|
|
|
@ -410,7 +410,7 @@ var ContentTypes = map[string]string{
|
|||
".rar": "application/vnd.rar",
|
||||
".tar": "application/x-tar",
|
||||
".zip": "application/x-zip-compressed",
|
||||
"7z": "application/x-7z-compressed",
|
||||
".7z": "application/x-7z-compressed",
|
||||
".3g2": "video/3gpp2",
|
||||
|
||||
".sh": "application/x-sh",
|
||||
|
|
|
@ -36,6 +36,7 @@ package azstorage
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
@ -65,6 +66,117 @@ func (s *utilsTestSuite) TestContentType() {
|
|||
assert.EqualValues(val, "video/mp4")
|
||||
}
|
||||
|
||||
type contentTypeVal struct {
|
||||
val string
|
||||
result string
|
||||
}
|
||||
|
||||
func (s *utilsTestSuite) TestGetContentType() {
|
||||
assert := assert.New(s.T())
|
||||
var inputs = []contentTypeVal{
|
||||
{val: "a.css", result: "text/css"},
|
||||
{val: "a.pdf", result: "application/pdf"},
|
||||
{val: "a.xml", result: "text/xml"},
|
||||
{val: "a.csv", result: "text/csv"},
|
||||
{val: "a.json", result: "application/json"},
|
||||
{val: "a.rtf", result: "application/rtf"},
|
||||
{val: "a.txt", result: "text/plain"},
|
||||
{val: "a.java", result: "text/plain"},
|
||||
{val: "a.dat", result: "text/plain"},
|
||||
{val: "a.htm", result: "text/html"},
|
||||
{val: "a.html", result: "text/html"},
|
||||
{val: "a.gif", result: "image/gif"},
|
||||
{val: "a.jpeg", result: "image/jpeg"},
|
||||
{val: "a.jpg", result: "image/jpeg"},
|
||||
{val: "a.png", result: "image/png"},
|
||||
{val: "a.bmp", result: "image/bmp"},
|
||||
{val: "a.js", result: "application/javascript"},
|
||||
{val: "a.mjs", result: "application/javascript"},
|
||||
{val: "a.svg", result: "image/svg+xml"},
|
||||
{val: "a.wasm", result: "application/wasm"},
|
||||
{val: "a.webp", result: "image/webp"},
|
||||
{val: "a.wav", result: "audio/wav"},
|
||||
{val: "a.mp3", result: "audio/mpeg"},
|
||||
{val: "a.mpeg", result: "video/mpeg"},
|
||||
{val: "a.aac", result: "audio/aac"},
|
||||
{val: "a.avi", result: "video/x-msvideo"},
|
||||
{val: "a.m3u8", result: "application/x-mpegURL"},
|
||||
{val: "a.ts", result: "video/MP2T"},
|
||||
{val: "a.mid", result: "audio/midiaudio/x-midi"},
|
||||
{val: "a.3gp", result: "video/3gpp"},
|
||||
{val: "a.mp4", result: "video/mp4"},
|
||||
{val: "a.doc", result: "application/msword"},
|
||||
{val: "a.docx", result: "application/vnd.openxmlformats-officedocument.wordprocessingml.document"},
|
||||
{val: "a.ppt", result: "application/vnd.ms-powerpoint"},
|
||||
{val: "a.pptx", result: "application/vnd.openxmlformats-officedocument.presentationml.presentation"},
|
||||
{val: "a.xls", result: "application/vnd.ms-excel"},
|
||||
{val: "a.xlsx", result: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"},
|
||||
{val: "a.gz", result: "application/x-gzip"},
|
||||
{val: "a.jar", result: "application/java-archive"},
|
||||
{val: "a.rar", result: "application/vnd.rar"},
|
||||
{val: "a.tar", result: "application/x-tar"},
|
||||
{val: "a.zip", result: "application/x-zip-compressed"},
|
||||
{val: "a.7z", result: "application/x-7z-compressed"},
|
||||
{val: "a.3g2", result: "video/3gpp2"},
|
||||
{val: "a.sh", result: "application/x-sh"},
|
||||
{val: "a.exe", result: "application/x-msdownload"},
|
||||
{val: "a.dll", result: "application/x-msdownload"},
|
||||
}
|
||||
for _, i := range inputs {
|
||||
s.Run(i.val, func() {
|
||||
output := getContentType(i.val)
|
||||
assert.EqualValues(i.result, output)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type accesTierVal struct {
|
||||
val string
|
||||
result azblob.AccessTierType
|
||||
}
|
||||
|
||||
func (s *utilsTestSuite) TestGetAccessTierType() {
|
||||
assert := assert.New(s.T())
|
||||
var inputs = []accesTierVal{
|
||||
{val: "", result: azblob.AccessTierNone},
|
||||
{val: "none", result: azblob.AccessTierNone},
|
||||
{val: "hot", result: azblob.AccessTierHot},
|
||||
{val: "cool", result: azblob.AccessTierCool},
|
||||
{val: "archive", result: azblob.AccessTierArchive},
|
||||
{val: "p4", result: azblob.AccessTierP4},
|
||||
{val: "p6", result: azblob.AccessTierP6},
|
||||
{val: "p10", result: azblob.AccessTierP10},
|
||||
{val: "p15", result: azblob.AccessTierP15},
|
||||
{val: "p20", result: azblob.AccessTierP20},
|
||||
{val: "p30", result: azblob.AccessTierP30},
|
||||
{val: "p40", result: azblob.AccessTierP40},
|
||||
{val: "p50", result: azblob.AccessTierP50},
|
||||
{val: "p60", result: azblob.AccessTierP60},
|
||||
{val: "p70", result: azblob.AccessTierP70},
|
||||
{val: "p80", result: azblob.AccessTierP80},
|
||||
{val: "random", result: azblob.AccessTierNone},
|
||||
}
|
||||
for _, i := range inputs {
|
||||
s.Run(i.val, func() {
|
||||
output := getAccessTierType(i.val)
|
||||
assert.EqualValues(i.result, output)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *utilsTestSuite) TestSanitizeSASKey() {
|
||||
assert := assert.New(s.T())
|
||||
|
||||
key := sanitizeSASKey("")
|
||||
assert.EqualValues("", key)
|
||||
|
||||
key = sanitizeSASKey("?abcd")
|
||||
assert.EqualValues("?abcd", key)
|
||||
|
||||
key = sanitizeSASKey("abcd")
|
||||
assert.EqualValues("?abcd", key)
|
||||
}
|
||||
|
||||
func TestUtilsTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(utilsTestSuite))
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче