HybridCache: relocate to dotnet/extensions (#57670)

* relocate HybridCache:

- remove src/Caching/Hybrid (leaves relocation README.md)
- remove from sln[f]
- remove from eng files (via automated rebuild)
- remove minimal usage from tangential benchmark project

* rmdir src/Caching/Hybrid
This commit is contained in:
Marc Gravell 2024-09-12 15:31:18 +01:00 коммит произвёл GitHub
Родитель a961b93bca
Коммит 6c0bf68345
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
44 изменённых файлов: 1 добавлений и 4243 удалений

Просмотреть файл

@ -1788,12 +1788,6 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Components.WasmRemoteAuthen
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Sample", "src\OpenApi\sample\Sample.csproj", "{6DEC24A8-A166-432F-8E3B-58FFCDA92F52}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Hybrid", "Hybrid", "{2D64CA23-6E81-488E-A7D3-9BDF87240098}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.Extensions.Caching.Hybrid", "src\Caching\Hybrid\src\Microsoft.Extensions.Caching.Hybrid.csproj", "{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.Extensions.Caching.Hybrid.Tests", "src\Caching\Hybrid\test\Microsoft.Extensions.Caching.Hybrid.Tests.csproj", "{CF63C942-895A-4F6B-888A-7653D7C4991A}"
EndProject
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "MicroBenchmarks", "MicroBenchmarks", "{6469F11E-8CEE-4292-820B-324DFFC88EBC}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.Extensions.Caching.MicroBenchmarks", "src\Caching\perf\MicroBenchmarks\Microsoft.Extensions.Caching.MicroBenchmarks\Microsoft.Extensions.Caching.MicroBenchmarks.csproj", "{8D2CC6ED-5105-4F52-8757-C21F4DE78589}"
@ -10821,38 +10815,6 @@ Global
{6DEC24A8-A166-432F-8E3B-58FFCDA92F52}.Release|x64.Build.0 = Release|Any CPU
{6DEC24A8-A166-432F-8E3B-58FFCDA92F52}.Release|x86.ActiveCfg = Release|Any CPU
{6DEC24A8-A166-432F-8E3B-58FFCDA92F52}.Release|x86.Build.0 = Release|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Debug|Any CPU.Build.0 = Debug|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Debug|arm64.ActiveCfg = Debug|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Debug|arm64.Build.0 = Debug|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Debug|x64.ActiveCfg = Debug|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Debug|x64.Build.0 = Debug|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Debug|x86.ActiveCfg = Debug|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Debug|x86.Build.0 = Debug|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Release|Any CPU.ActiveCfg = Release|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Release|Any CPU.Build.0 = Release|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Release|arm64.ActiveCfg = Release|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Release|arm64.Build.0 = Release|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Release|x64.ActiveCfg = Release|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Release|x64.Build.0 = Release|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Release|x86.ActiveCfg = Release|Any CPU
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9}.Release|x86.Build.0 = Release|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Debug|Any CPU.Build.0 = Debug|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Debug|arm64.ActiveCfg = Debug|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Debug|arm64.Build.0 = Debug|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Debug|x64.ActiveCfg = Debug|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Debug|x64.Build.0 = Debug|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Debug|x86.ActiveCfg = Debug|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Debug|x86.Build.0 = Debug|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|Any CPU.ActiveCfg = Release|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|Any CPU.Build.0 = Release|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|arm64.ActiveCfg = Release|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|arm64.Build.0 = Release|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|x64.ActiveCfg = Release|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|x64.Build.0 = Release|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|x86.ActiveCfg = Release|Any CPU
{CF63C942-895A-4F6B-888A-7653D7C4991A}.Release|x86.Build.0 = Release|Any CPU
{8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|Any CPU.Build.0 = Debug|Any CPU
{8D2CC6ED-5105-4F52-8757-C21F4DE78589}.Debug|arm64.ActiveCfg = Debug|Any CPU
@ -11880,9 +11842,6 @@ Global
{433F91E4-E39D-4EB0-B798-2998B3969A2C} = {6126DCE4-9692-4EE2-B240-C65743572995}
{8A021D6D-7935-4AB3-BB47-38D4FF9B0D13} = {6126DCE4-9692-4EE2-B240-C65743572995}
{6DEC24A8-A166-432F-8E3B-58FFCDA92F52} = {2299CCD8-8F9C-4F2B-A633-9BF4DA81022B}
{2D64CA23-6E81-488E-A7D3-9BDF87240098} = {0F39820F-F4A5-41C6-9809-D79B68F032EF}
{2B60E6D3-9E7C-427A-AD4E-BBE9A6D935B9} = {2D64CA23-6E81-488E-A7D3-9BDF87240098}
{CF63C942-895A-4F6B-888A-7653D7C4991A} = {2D64CA23-6E81-488E-A7D3-9BDF87240098}
{6469F11E-8CEE-4292-820B-324DFFC88EBC} = {0F39820F-F4A5-41C6-9809-D79B68F032EF}
{8D2CC6ED-5105-4F52-8757-C21F4DE78589} = {6469F11E-8CEE-4292-820B-324DFFC88EBC}
{9DC6B242-457B-4767-A84B-C3D23B76C642} = {2299CCD8-8F9C-4F2B-A633-9BF4DA81022B}

Просмотреть файл

@ -5,7 +5,6 @@
-->
<Project>
<ItemGroup>
<ProjectReferenceProvider Include="Microsoft.Extensions.Caching.Hybrid" ProjectPath="$(RepoRoot)src\Caching\Hybrid\src\Microsoft.Extensions.Caching.Hybrid.csproj" />
<ProjectReferenceProvider Include="Microsoft.Extensions.Caching.SqlServer" ProjectPath="$(RepoRoot)src\Caching\SqlServer\src\Microsoft.Extensions.Caching.SqlServer.csproj" />
<ProjectReferenceProvider Include="Microsoft.Extensions.Caching.StackExchangeRedis" ProjectPath="$(RepoRoot)src\Caching\StackExchangeRedis\src\Microsoft.Extensions.Caching.StackExchangeRedis.csproj" />
<ProjectReferenceProvider Include="Microsoft.AspNetCore" ProjectPath="$(RepoRoot)src\DefaultBuilder\src\Microsoft.AspNetCore.csproj" />

Просмотреть файл

@ -105,7 +105,6 @@
<AspNetCoreShippingAssembly Include="Microsoft.Extensions.Diagnostics.HealthChecks.Abstractions" />
<AspNetCoreShippingAssembly Include="Microsoft.Extensions.Diagnostics.HealthChecks" />
<AspNetCoreShippingAssembly Include="Microsoft.Extensions.Features" />
<AspNetCoreShippingAssembly Include="Microsoft.Extensions.Caching.Hybrid" />
<AspNetCoreShippingAssembly Include="Microsoft.Extensions.Caching.SqlServer" />
<AspNetCoreShippingAssembly Include="Microsoft.Extensions.Caching.StackExchangeRedis" />
<AspNetCoreShippingAssembly Include="Microsoft.AspNetCore.JsonPatch" />

Просмотреть файл

@ -2,8 +2,6 @@
"solution": {
"path": "..\\..\\AspNetCore.sln",
"projects": [
"src\\Caching\\Hybrid\\src\\Microsoft.Extensions.Caching.Hybrid.csproj",
"src\\Caching\\Hybrid\\test\\Microsoft.Extensions.Caching.Hybrid.Tests.csproj",
"src\\Caching\\SqlServer\\src\\Microsoft.Extensions.Caching.SqlServer.csproj",
"src\\Caching\\SqlServer\\test\\Microsoft.Extensions.Caching.SqlServer.Tests.csproj",
"src\\Caching\\StackExchangeRedis\\src\\Microsoft.Extensions.Caching.StackExchangeRedis.csproj",

Просмотреть файл

@ -1,59 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics.CodeAnalysis;
using Microsoft.Extensions.Caching.Hybrid;
namespace Microsoft.Extensions.DependencyInjection;
/// <summary>
/// Configuration extension methods for <see cref="IHybridCacheBuilder"/> / <see cref="HybridCache"/>.
/// </summary>
public static class HybridCacheBuilderExtensions
{
/// <summary>
/// Serialize values of type <typeparamref name="T"/> with the specified serializer from <paramref name="serializer"/>.
/// </summary>
public static IHybridCacheBuilder AddSerializer<T>(this IHybridCacheBuilder builder, IHybridCacheSerializer<T> serializer)
{
builder.Services.AddSingleton<IHybridCacheSerializer<T>>(serializer);
return builder;
}
/// <summary>
/// Serialize values of type <typeparamref name="T"/> with the serializer of type <typeparamref name="TImplementation"/>.
/// </summary>
public static IHybridCacheBuilder AddSerializer<T,
#if NET5_0_OR_GREATER
[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)]
#endif
TImplementation>(this IHybridCacheBuilder builder)
where TImplementation : class, IHybridCacheSerializer<T>
{
builder.Services.AddSingleton<IHybridCacheSerializer<T>, TImplementation>();
return builder;
}
/// <summary>
/// Add <paramref name="factory"/> as an additional serializer factory, which can provide serializers for multiple types.
/// </summary>
public static IHybridCacheBuilder AddSerializerFactory(this IHybridCacheBuilder builder, IHybridCacheSerializerFactory factory)
{
builder.Services.AddSingleton<IHybridCacheSerializerFactory>(factory);
return builder;
}
/// <summary>
/// Add a factory of type <typeparamref name="TImplementation"/> as an additional serializer factory, which can provide serializers for multiple types.
/// </summary>
public static IHybridCacheBuilder AddSerializerFactory<
#if NET5_0_OR_GREATER
[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)]
#endif
TImplementation>(this IHybridCacheBuilder builder)
where TImplementation : class, IHybridCacheSerializerFactory
{
builder.Services.AddSingleton<IHybridCacheSerializerFactory, TImplementation>();
return builder;
}
}

Просмотреть файл

@ -1,46 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace Microsoft.Extensions.Caching.Hybrid;
/// <summary>
/// Options for configuring the default <see cref="HybridCache"/> implementation.
/// </summary>
public class HybridCacheOptions // : IOptions<HybridCacheOptions>
{
// TODO: should we implement IOptions<T>?
/// <summary>
/// Default global options to be applied to <see cref="HybridCache"/> operations; if options are
/// specified at the individual call level, the non-null values are merged (with the per-call
/// options being used in preference to the global options). If no value is specified for a given
/// option (globally or per-call), the implementation may choose a reasonable default.
/// </summary>
public HybridCacheEntryOptions? DefaultEntryOptions { get; set; }
/// <summary>
/// Disallow compression for this <see cref="HybridCache"/> instance.
/// </summary>
public bool DisableCompression { get; set; }
/// <summary>
/// The maximum size of cache items; attempts to store values over this size will be logged
/// and the value will not be stored in cache.
/// </summary>
/// <remarks>The default value is 1 MiB.</remarks>
public long MaximumPayloadBytes { get; set; } = 1 << 20; // 1MiB
/// <summary>
/// The maximum permitted length (in characters) of keys; attempts to use keys over this size will be logged.
/// </summary>
/// <remark>The default value is 1024 characters.</remark>
public int MaximumKeyLength { get; set; } = 1024; // characters
/// <summary>
/// Use "tags" data as dimensions on metric reporting; if enabled, care should be used to ensure that
/// tags do not contain data that should not be visible in metrics systems.
/// </summary>
public bool ReportTagMetrics { get; set; }
// HybridCacheOptions IOptions<HybridCacheOptions>.Value => this;
}

Просмотреть файл

@ -1,53 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using Microsoft.Extensions.Caching.Hybrid;
using Microsoft.Extensions.Caching.Hybrid.Internal;
using Microsoft.Extensions.DependencyInjection.Extensions;
namespace Microsoft.Extensions.DependencyInjection;
/// <summary>
/// Configuration extension methods for <see cref="HybridCache"/>.
/// </summary>
public static class HybridCacheServiceExtensions
{
/// <summary>
/// Adds support for multi-tier caching services.
/// </summary>
/// <returns>A builder instance that allows further configuration of the <see cref="HybridCache"/> system.</returns>
public static IHybridCacheBuilder AddHybridCache(this IServiceCollection services, Action<HybridCacheOptions> setupAction)
{
#if NET7_0_OR_GREATER
ArgumentNullException.ThrowIfNull(setupAction);
#else
_ = setupAction ?? throw new ArgumentNullException(nameof(setupAction));
#endif
AddHybridCache(services);
services.Configure(setupAction);
return new HybridCacheBuilder(services);
}
/// <summary>
/// Adds support for multi-tier caching services.
/// </summary>
/// <returns>A builder instance that allows further configuration of the <see cref="HybridCache"/> system.</returns>
public static IHybridCacheBuilder AddHybridCache(this IServiceCollection services)
{
#if NET7_0_OR_GREATER
ArgumentNullException.ThrowIfNull(services);
#else
_ = services ?? throw new ArgumentNullException(nameof(services));
#endif
services.TryAddSingleton(TimeProvider.System);
services.AddOptions();
services.AddMemoryCache();
services.TryAddSingleton<IHybridCacheSerializerFactory, DefaultJsonSerializerFactory>();
services.TryAddSingleton<IHybridCacheSerializer<string>>(InbuiltTypeSerializer.Instance);
services.TryAddSingleton<IHybridCacheSerializer<byte[]>>(InbuiltTypeSerializer.Instance);
services.TryAddSingleton<HybridCache, DefaultHybridCache>();
return new HybridCacheBuilder(services);
}
}

Просмотреть файл

@ -1,22 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Extensions.DependencyInjection;
namespace Microsoft.Extensions.Caching.Hybrid;
/// <summary>
/// Helper API for configuring <see cref="HybridCache"/>.
/// </summary>
public interface IHybridCacheBuilder
{
/// <summary>
/// Gets the services collection associated with this instance.
/// </summary>
IServiceCollection Services { get; }
}
internal sealed class HybridCacheBuilder(IServiceCollection services) : IHybridCacheBuilder
{
public IServiceCollection Services { get; } = services;
}

Просмотреть файл

@ -1,82 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Buffers;
using System.Diagnostics;
using System.Runtime.CompilerServices;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
// used to convey buffer status; like ArraySegment<byte>, but Offset is always
// zero, and we use the most significant bit (MSB, the sign flag) of the length
// to track whether or not to recycle this value
internal readonly struct BufferChunk
{
private const int MSB = (1 << 31);
private readonly int _lengthAndPoolFlag;
public byte[]? Array { get; } // null for default
public int Length => _lengthAndPoolFlag & ~MSB;
public bool ReturnToPool => (_lengthAndPoolFlag & MSB) != 0;
public byte[] ToArray()
{
var length = Length;
if (length == 0)
{
return [];
}
var copy = new byte[length];
Buffer.BlockCopy(Array!, 0, copy, 0, length);
return copy;
}
public BufferChunk(byte[] array)
{
Debug.Assert(array is not null, "expected valid array input");
Array = array;
_lengthAndPoolFlag = array.Length;
// assume not pooled, if exact-sized
// (we don't expect array.Length to be negative; we're really just saying
// "we expect the result of assigning array.Length to _lengthAndPoolFlag
// to give the expected Length *and* not have the MSB set; we're just
// checking that we haven't fat-fingered our MSB logic)
Debug.Assert(!ReturnToPool, "do not return right-sized arrays");
Debug.Assert(Length == array.Length, "array length not respected");
}
public BufferChunk(byte[] array, int length, bool returnToPool)
{
Debug.Assert(array is not null, "expected valid array input");
Debug.Assert(length >= 0, "expected valid length");
Array = array;
_lengthAndPoolFlag = length | (returnToPool ? MSB : 0);
Debug.Assert(ReturnToPool == returnToPool, "return-to-pool not respected");
Debug.Assert(Length == length, "length not respected");
}
internal void RecycleIfAppropriate()
{
if (ReturnToPool)
{
ArrayPool<byte>.Shared.Return(Array!);
}
Unsafe.AsRef(in this) = default; // anti foot-shotgun double-return guard; not 100%, but worth doing
Debug.Assert(Array is null && !ReturnToPool, "expected clean slate after recycle");
}
internal ReadOnlySequence<byte> AsSequence() => Length == 0 ? default : new ReadOnlySequence<byte>(Array!, 0, Length);
internal BufferChunk DoNotReturnToPool()
{
var copy = this;
Unsafe.AsRef(in copy._lengthAndPoolFlag) &= ~MSB;
Debug.Assert(copy.Length == Length, "same length expected");
Debug.Assert(!copy.ReturnToPool, "do not return to pool");
return copy;
}
}

Просмотреть файл

@ -1,99 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Diagnostics;
using System.Threading;
using Microsoft.Extensions.Caching.Memory;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
internal abstract class CacheItem
{
private int _refCount = 1; // the number of pending operations against this cache item
// note: the ref count is the number of callers anticipating this value at any given time; initially,
// it is one for a simple "get the value" flow, but if another call joins with us, it'll be incremented;
// if either cancels, it will get decremented, with the entire flow being cancelled if it ever becomes
// zero
// this counter also drives cache lifetime, with the cache itself incrementing the count by one; in the
// case of mutable data, cache eviction may reduce this to zero (in cooperation with any concurrent readers,
// who incr/decr around their fetch), allowing safe buffer recycling
internal int RefCount => Volatile.Read(ref _refCount);
internal static readonly PostEvictionDelegate _sharedOnEviction = static (key, value, reason, state) =>
{
if (value is CacheItem item)
{
item.Release();
}
};
public virtual bool NeedsEvictionCallback => false; // do we need to call Release when evicted?
protected virtual void OnFinalRelease() { } // any required release semantics
public abstract bool TryReserveBuffer(out BufferChunk buffer);
public abstract bool DebugIsImmutable { get; }
public bool Release() // returns true ONLY for the final release step
{
var newCount = Interlocked.Decrement(ref _refCount);
Debug.Assert(newCount >= 0, "over-release detected");
if (newCount == 0)
{
// perform per-item clean-up, i.e. buffer recycling (if defensive copies needed)
OnFinalRelease();
return true;
}
return false;
}
public bool TryReserve()
{
// this is basically interlocked increment, but with a check against:
// a) incrementing upwards from zero
// b) overflowing *back* to zero
var oldValue = Volatile.Read(ref _refCount);
do
{
if (oldValue is 0 or -1)
{
return false; // already burned, or about to roll around back to zero
}
var updated = Interlocked.CompareExchange(ref _refCount, oldValue + 1, oldValue);
if (updated == oldValue)
{
return true; // we exchanged
}
oldValue = updated; // we failed, but we have an updated state
} while (true);
}
}
internal abstract class CacheItem<T> : CacheItem
{
internal static CacheItem<T> Create() => ImmutableTypeCache<T>.IsImmutable ? new ImmutableCacheItem<T>() : new MutableCacheItem<T>();
// attempt to get a value that was *not* previously reserved
public abstract bool TryGetValue(out T value);
// get a value that *was* reserved, countermanding our reservation in the process
public T GetReservedValue()
{
if (!TryGetValue(out var value))
{
Throw();
}
Release();
return value;
static void Throw() => throw new ObjectDisposedException("The cache item has been recycled before the value was obtained");
}
}
}

Просмотреть файл

@ -1,70 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Threading;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
internal bool DebugTryGetCacheItem(string key, [NotNullWhen(true)] out CacheItem? value)
{
if (_localCache.TryGetValue(key, out var untyped) && untyped is CacheItem typed)
{
value = typed;
return true;
}
value = null;
return false;
}
#if DEBUG // enable ref-counted buffers
private int _outstandingBufferCount;
internal int DebugOnlyGetOutstandingBuffers(bool flush = false)
=> flush ? Interlocked.Exchange(ref _outstandingBufferCount, 0) : Volatile.Read(ref _outstandingBufferCount);
[Conditional("DEBUG")]
internal void DebugOnlyDecrementOutstandingBuffers()
{
Interlocked.Decrement(ref _outstandingBufferCount);
}
[Conditional("DEBUG")]
internal void DebugOnlyIncrementOutstandingBuffers()
{
Interlocked.Increment(ref _outstandingBufferCount);
}
#endif
partial class MutableCacheItem<T>
{
partial void DebugOnlyDecrementOutstandingBuffers();
partial void DebugOnlyTrackBufferCore(DefaultHybridCache cache);
[Conditional("DEBUG")]
internal void DebugOnlyTrackBuffer(DefaultHybridCache cache) => DebugOnlyTrackBufferCore(cache);
#if DEBUG
private DefaultHybridCache? _cache; // for buffer-tracking - only enabled in DEBUG
partial void DebugOnlyDecrementOutstandingBuffers()
{
if (_buffer.ReturnToPool)
{
_cache?.DebugOnlyDecrementOutstandingBuffers();
}
}
partial void DebugOnlyTrackBufferCore(DefaultHybridCache cache)
{
_cache = cache;
if (_buffer.ReturnToPool)
{
_cache?.DebugOnlyIncrementOutstandingBuffers();
}
}
#endif
}
}

Просмотреть файл

@ -1,46 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Threading;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
private sealed class ImmutableCacheItem<T> : CacheItem<T> // used to hold types that do not require defensive copies
{
private T _value = default!; // deferred until SetValue
public void SetValue(T value) => _value = value;
private static ImmutableCacheItem<T>? _sharedDefault;
// get a shared instance that passes as "reserved"; doesn't need to be 100% singleton,
// but we don't want to break the reservation rules either; if we can't reserve: create new
public static ImmutableCacheItem<T> GetReservedShared()
{
var obj = Volatile.Read(ref _sharedDefault);
if (obj is null || !obj.TryReserve())
{
obj = new();
obj.TryReserve(); // this is reliable on a new instance
Volatile.Write(ref _sharedDefault, obj);
}
return obj;
}
public override bool TryGetValue(out T value)
{
value = _value;
return true; // always available
}
public override bool TryReserveBuffer(out BufferChunk buffer)
{
buffer = default;
return false; // we don't have one to reserve!
}
public override bool DebugIsImmutable => true;
}
}

Просмотреть файл

@ -1,138 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Runtime.CompilerServices;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Memory;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
internal ValueTask<BufferChunk> GetFromL2Async(string key, CancellationToken token)
{
switch (GetFeatures(CacheFeatures.BackendCache | CacheFeatures.BackendBuffers))
{
case CacheFeatures.BackendCache: // legacy byte[]-based
var pendingLegacy = _backendCache!.GetAsync(key, token);
#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
if (!pendingLegacy.IsCompletedSuccessfully)
#else
if (pendingLegacy.Status != TaskStatus.RanToCompletion)
#endif
{
return new(AwaitedLegacy(pendingLegacy, this));
}
return new(GetValidPayloadSegment(pendingLegacy.Result)); // already complete
case CacheFeatures.BackendCache | CacheFeatures.BackendBuffers: // IBufferWriter<byte>-based
var writer = RecyclableArrayBufferWriter<byte>.Create(MaximumPayloadBytes);
var cache = Unsafe.As<IBufferDistributedCache>(_backendCache!); // type-checked already
var pendingBuffers = cache.TryGetAsync(key, writer, token);
if (!pendingBuffers.IsCompletedSuccessfully)
{
return new(AwaitedBuffers(pendingBuffers, writer));
}
BufferChunk result = pendingBuffers.GetAwaiter().GetResult()
? new(writer.DetachCommitted(out var length), length, returnToPool: true)
: default;
writer.Dispose(); // it is not accidental that this isn't "using"; avoid recycling if not 100% sure what happened
return new(result);
}
return default;
static async Task<BufferChunk> AwaitedLegacy(Task<byte[]?> pending, DefaultHybridCache @this)
{
var bytes = await pending.ConfigureAwait(false);
return @this.GetValidPayloadSegment(bytes);
}
static async Task<BufferChunk> AwaitedBuffers(ValueTask<bool> pending, RecyclableArrayBufferWriter<byte> writer)
{
BufferChunk result = await pending.ConfigureAwait(false)
? new(writer.DetachCommitted(out var length), length, returnToPool: true)
: default;
writer.Dispose(); // it is not accidental that this isn't "using"; avoid recycling if not 100% sure what happened
return result;
}
}
private BufferChunk GetValidPayloadSegment(byte[]? payload)
{
if (payload is not null)
{
if (payload.Length > MaximumPayloadBytes)
{
ThrowPayloadLengthExceeded(payload.Length);
}
return new(payload);
}
return default;
}
[DoesNotReturn, MethodImpl(MethodImplOptions.NoInlining)]
private void ThrowPayloadLengthExceeded(int size) // splitting the exception bits out to a different method
{
// TODO: also log to logger (hence instance method)
throw new InvalidOperationException($"Maximum cache length ({MaximumPayloadBytes} bytes) exceeded");
}
internal ValueTask SetL2Async(string key, in BufferChunk buffer, HybridCacheEntryOptions? options, CancellationToken token)
{
Debug.Assert(buffer.Array is not null);
switch (GetFeatures(CacheFeatures.BackendCache | CacheFeatures.BackendBuffers))
{
case CacheFeatures.BackendCache: // legacy byte[]-based
var arr = buffer.Array;
if (arr.Length != buffer.Length)
{
// we'll need a right-sized snapshot
arr = buffer.ToArray();
}
return new(_backendCache!.SetAsync(key, arr, GetOptions(options), token));
case CacheFeatures.BackendCache | CacheFeatures.BackendBuffers: // ReadOnlySequence<byte>-based
var cache = Unsafe.As<IBufferDistributedCache>(_backendCache!); // type-checked already
return cache.SetAsync(key, buffer.AsSequence(), GetOptions(options), token);
}
return default;
}
private DistributedCacheEntryOptions GetOptions(HybridCacheEntryOptions? options)
{
DistributedCacheEntryOptions? result = null;
if (options is not null && options.Expiration.HasValue && options.Expiration.GetValueOrDefault() != _defaultExpiration)
{
result = ToDistributedCacheEntryOptions(options);
}
return result ?? _defaultDistributedCacheExpiration;
#if NET8_0_OR_GREATER
// internal method memoizes this allocation; since it is "init", it is immutable (outside reflection)
[UnsafeAccessor(UnsafeAccessorKind.Method, Name = nameof(ToDistributedCacheEntryOptions))]
extern static DistributedCacheEntryOptions? ToDistributedCacheEntryOptions(HybridCacheEntryOptions options);
#else
// withoug that helper method, we'll just eat the alloc (down-level TFMs)
static DistributedCacheEntryOptions ToDistributedCacheEntryOptions(HybridCacheEntryOptions options)
=> new() { AbsoluteExpirationRelativeToNow = options.Expiration };
#endif
}
internal void SetL1<T>(string key, CacheItem<T> value, HybridCacheEntryOptions? options)
{
if (value.TryReserve()) // incr ref-count for the the cache itself; this *may* be released via the NeedsEvictionCallback path
{
// based on CacheExtensions.Set<TItem>, but with post-eviction recycling
using var cacheEntry = _localCache.CreateEntry(key);
cacheEntry.AbsoluteExpirationRelativeToNow = options?.LocalCacheExpiration ?? _defaultLocalCacheExpiration;
cacheEntry.Value = value;
if (value.NeedsEvictionCallback)
{
cacheEntry.RegisterPostEvictionCallback(CacheItem._sharedOnEviction);
}
}
}
}

Просмотреть файл

@ -1,70 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
private sealed partial class MutableCacheItem<T> : CacheItem<T> // used to hold types that require defensive copies
{
private IHybridCacheSerializer<T> _serializer = null!; // deferred until SetValue
private BufferChunk _buffer;
public void SetValue(ref BufferChunk buffer, IHybridCacheSerializer<T> serializer)
{
_serializer = serializer;
_buffer = buffer;
buffer = default; // we're taking over the lifetime; the caller no longer has it!
}
public void SetValue(T value, IHybridCacheSerializer<T> serializer, int maxLength)
{
_serializer = serializer;
var writer = RecyclableArrayBufferWriter<byte>.Create(maxLength);
serializer.Serialize(value, writer);
_buffer = new(writer.DetachCommitted(out var length), length, returnToPool: true);
writer.Dispose(); // no buffers left (we just detached them), but just in case of other logic
}
public override bool NeedsEvictionCallback => _buffer.ReturnToPool;
protected override void OnFinalRelease()
{
DebugOnlyDecrementOutstandingBuffers();
_buffer.RecycleIfAppropriate();
}
public override bool TryGetValue(out T value)
{
if (!TryReserve()) // only if we haven't already burned
{
value = default!;
return false;
}
try
{
value = _serializer.Deserialize(_buffer.AsSequence());
return true;
}
finally
{
Release();
}
}
public override bool TryReserveBuffer(out BufferChunk buffer)
{
if (TryReserve()) // only if we haven't already burned
{
buffer = _buffer.DoNotReturnToPool(); // not up to them!
return true;
}
buffer = default;
return false;
}
public override bool DebugIsImmutable => false;
}
}

Просмотреть файл

@ -1,112 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Concurrent;
using System.ComponentModel;
using System.Linq;
using System.Reflection;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Serialization;
using Microsoft.Extensions.DependencyInjection;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
// per instance cache of typed serializers; each serializer is a
// IHybridCacheSerializer<T> for the corresponding Type, but we can't
// know which here - and undesirable to add an artificial non-generic
// IHybridCacheSerializer base that serves no other purpose
private readonly ConcurrentDictionary<Type, object> _serializers = new();
internal int MaximumPayloadBytes { get; }
internal IHybridCacheSerializer<T> GetSerializer<T>()
{
return _serializers.TryGetValue(typeof(T), out var serializer)
? Unsafe.As<IHybridCacheSerializer<T>>(serializer) : ResolveAndAddSerializer(this);
static IHybridCacheSerializer<T> ResolveAndAddSerializer(DefaultHybridCache @this)
{
// it isn't critical that we get only one serializer instance during start-up; what matters
// is that we don't get a new serializer instance *every time*
var serializer = @this._services.GetService<IHybridCacheSerializer<T>>();
if (serializer is null)
{
foreach (var factory in @this._serializerFactories)
{
if (factory.TryCreateSerializer<T>(out var current))
{
serializer = current;
break; // we've already reversed the factories, so: the first hit is what we want
}
}
}
if (serializer is null)
{
throw new InvalidOperationException($"No {nameof(IHybridCacheSerializer<T>)} configured for type '{typeof(T).Name}'");
}
// store the result so we don't repeat this in future
@this._serializers[typeof(T)] = serializer;
return serializer;
}
}
internal static class ImmutableTypeCache<T> // lazy memoize; T doesn't change per cache instance
{
// note for blittable types: a pure struct will be a full copy every time - nothing shared to mutate
public static readonly bool IsImmutable = (typeof(T).IsValueType && IsBlittable<T>()) || IsImmutable(typeof(T));
}
private static bool IsBlittable<T>() // minimize the generic portion
{
#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
return !RuntimeHelpers.IsReferenceOrContainsReferences<T>();
#else
try // down-level: only blittable types can be pinned
{
// get a typed, zeroed, non-null boxed instance of the appropriate type
// (can't use (object)default(T), as that would box to null for nullable types)
var obj = FormatterServices.GetUninitializedObject(Nullable.GetUnderlyingType(typeof(T)) ?? typeof(T));
GCHandle.Alloc(obj, GCHandleType.Pinned).Free();
return true;
}
catch
{
return false;
}
#endif
}
private static bool IsImmutable(Type type)
{
// check for known types
if (type == typeof(string))
{
return true;
}
if (type.IsValueType)
{
// switch from Foo? to Foo if necessary
if (Nullable.GetUnderlyingType(type) is { } nullable)
{
type = nullable;
}
}
if (type.IsValueType || (type.IsClass & type.IsSealed))
{
// check for [ImmutableObject(true)]; note we're looking at this as a statement about
// the overall nullability; for example, a type could contain a private int[] field,
// where the field is mutable and the list is mutable; but if the type is annotated:
// we're trusting that the API and use-case is such that the type is immutable
return type.GetCustomAttribute<ImmutableObjectAttribute>() is { Immutable: true };
}
// don't trust interfaces and non-sealed types; we might have any concrete
// type that has different behaviour
return false;
}
}

Просмотреть файл

@ -1,107 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Concurrent;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
private readonly ConcurrentDictionary<StampedeKey, StampedeState> _currentOperations = new();
internal int DebugGetCallerCount(string key, HybridCacheEntryFlags? flags = null)
{
var stampedeKey = new StampedeKey(key, flags ?? _defaultFlags);
return _currentOperations.TryGetValue(stampedeKey, out var state) ? state.DebugCallerCount : 0;
}
// returns true for a new session (in which case: we need to start the work), false for a pre-existing session
public bool GetOrCreateStampedeState<TState, T>(string key, HybridCacheEntryFlags flags, out StampedeState<TState, T> stampedeState, bool canBeCanceled)
{
var stampedeKey = new StampedeKey(key, flags);
// double-checked locking to try to avoid unnecessary sessions in race conditions,
// while avoiding the lock completely whenever possible
if (TryJoinExistingSession(this, stampedeKey, out var existing))
{
stampedeState = existing;
return false; // someone ELSE is running the work
}
// most common scenario here, then, is that we're not fighting with anyone else;
// go ahead and create a placeholder state object and *try* to add it
stampedeState = new StampedeState<TState, T>(this, stampedeKey, canBeCanceled);
if (_currentOperations.TryAdd(stampedeKey, stampedeState))
{
// successfully added; indeed, no-one else was fighting: we're done
return true; // the CURRENT caller is responsible for making the work happen
}
// hmm; failed to add - there's concurrent activity on the same key; we're now
// in very rare race condition territory; go ahead and take a lock while we
// collect our thoughts
lock (GetPartitionedSyncLock(in stampedeKey)) // see notes in SyncLock.cs
{
// check again while we hold the lock
if (TryJoinExistingSession(this, stampedeKey, out existing))
{
// we found an existing state we can join; do that
stampedeState.SetCanceled(); // to be thorough: mark our speculative one as doomed (no-one has seen it, though)
stampedeState = existing; // and replace with the one we found
return false; // someone ELSE is running the work
// note that in this case we allocated a StampedeState<TState, T> that got dropped on
// the floor; in the grand scheme of things, that's OK; this is a rare outcome
}
// and check whether the value was L1-cached by an outgoing operation (for *us* to check needs local-cache-read,
// and for *them* to have updated needs local-cache-write, but since the shared us/them key includes flags,
// we can skip this if *either* flag is set)
if ((flags & HybridCacheEntryFlags.DisableLocalCache) == 0 && _localCache.TryGetValue(key, out var untyped)
&& untyped is CacheItem<T> typed && typed.TryReserve())
{
stampedeState.SetResultDirect(typed);
return false; // the work has ALREADY been done
}
// otherwise, either nothing existed - or the thing that already exists can't be joined;
// in that case, go ahead and use the state that we invented a moment ago (outside of the lock)
_currentOperations[stampedeKey] = stampedeState;
return true; // the CURRENT caller is responsible for making the work happen
}
static bool TryJoinExistingSession(DefaultHybridCache @this, in StampedeKey stampedeKey,
[NotNullWhen(true)] out StampedeState<TState, T>? stampedeState)
{
if (@this._currentOperations.TryGetValue(stampedeKey, out var found))
{
if (found is not StampedeState<TState, T> tmp)
{
ThrowWrongType(stampedeKey.Key, found.Type, typeof(T));
}
if (tmp.TryAddCaller())
{
// we joined an existing session
stampedeState = tmp;
return true;
}
}
stampedeState = null;
return false;
}
[DoesNotReturn]
static void ThrowWrongType(string key, Type existingType, Type newType)
{
Debug.Assert(existingType != newType);
throw new InvalidOperationException($"All calls to {nameof(HybridCache)} with the same key should use the same data type; the same key is being used for '{existingType.FullName}' and '{newType.FullName}' data")
{
Data = { { "CacheKey", key } }
};
}
}
}

Просмотреть файл

@ -1,51 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Diagnostics.CodeAnalysis;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
internal readonly struct StampedeKey : IEquatable<StampedeKey>
{
private readonly string _key;
private readonly HybridCacheEntryFlags _flags;
private readonly int _hashCode; // we know we'll need it; compute it once only
public StampedeKey(string key, HybridCacheEntryFlags flags)
{
// We'll use both the key *and* the flags as combined flag; in reality, we *expect*
// the flags to be consistent between calls on the same operation, and it must be
// noted that the *cache items* only use the key (not the flags), but: it gets
// very hard to grok what the correct behaviour should be if combining two calls
// with different flags, since they could have mutually exclusive behaviours!
// As such, we'll treat conflicting calls entirely separately from a stampede
// perspective.
_key = key;
_flags = flags;
#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
_hashCode = System.HashCode.Combine(key, flags);
#else
_hashCode = key.GetHashCode() ^ (int)flags;
#endif
}
public string Key => _key;
public HybridCacheEntryFlags Flags => _flags;
// allow direct access to the pre-computed hash-code, semantically emphasizing that
// this is a constant-time operation against a known value
internal int HashCode => _hashCode;
public bool Equals(StampedeKey other) => _flags == other._flags & _key == other._key;
public override bool Equals([NotNullWhen(true)] object? obj)
=> obj is StampedeKey other && Equals(other);
public override int GetHashCode() => _hashCode;
public override string ToString() => $"{_key} ({_flags})";
}
}

Просмотреть файл

@ -1,102 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.CompilerServices;
using System.Threading;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
internal abstract class StampedeState
#if NETCOREAPP3_0_OR_GREATER
: IThreadPoolWorkItem
#endif
{
private readonly DefaultHybridCache _cache;
private readonly CacheItem _cacheItem;
// because multiple callers can enlist, we need to track when the *last* caller cancels
// (and keep going until then); that means we need to run with custom cancellation
private readonly CancellationTokenSource? _sharedCancellation;
internal readonly CancellationToken SharedToken; // this might have a value even when _sharedCancellation is null
// we expose the key as a by-ref readonly; this minimizes the stack work involved in passing the key around
// (both in terms of width and copy-semantics)
private readonly StampedeKey _key;
public ref readonly StampedeKey Key => ref _key;
protected CacheItem CacheItem => _cacheItem;
/// <summary>
/// Create a stamped token optionally with shared cancellation support
/// </summary>
protected StampedeState(DefaultHybridCache cache, in StampedeKey key, CacheItem cacheItem, bool canBeCanceled)
{
_cache = cache;
_key = key;
_cacheItem = cacheItem;
if (canBeCanceled)
{
// if the first (or any) caller can't be cancelled; we'll never get to zero; no point tracking
// (in reality, all callers usually use the same path, so cancellation is usually "all" or "none")
_sharedCancellation = new();
SharedToken = _sharedCancellation.Token;
}
else
{
SharedToken = CancellationToken.None;
}
}
/// <summary>
/// Create a stamped token using a fixed cancellation token
/// </summary>
protected StampedeState(DefaultHybridCache cache, in StampedeKey key, CacheItem cacheItem, CancellationToken token)
{
_cache = cache;
_key = key;
_cacheItem = cacheItem;
SharedToken = token;
}
#if !NETCOREAPP3_0_OR_GREATER
protected static readonly WaitCallback SharedWaitCallback = static obj => Unsafe.As<StampedeState>(obj).Execute();
#endif
protected DefaultHybridCache Cache => _cache;
public abstract void Execute();
protected int MaximumPayloadBytes => _cache.MaximumPayloadBytes;
public override string ToString() => Key.ToString();
public abstract void SetCanceled();
public int DebugCallerCount => _cacheItem.RefCount;
public abstract Type Type { get; }
public void CancelCaller()
{
// note that TryAddCaller has protections to avoid getting back from zero
if (_cacheItem.Release())
{
// we're the last to leave; turn off the lights
_sharedCancellation?.Cancel();
SetCanceled();
}
}
public bool TryAddCaller() => _cacheItem.TryReserve();
}
private void RemoveStampedeState(in StampedeKey key)
{
lock (GetPartitionedSyncLock(in key)) // see notes in SyncLock.cs
{
_currentOperations.TryRemove(key, out _);
}
}
}

Просмотреть файл

@ -1,287 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Threading;
using System.Threading.Tasks;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
internal sealed class StampedeState<TState, T> : StampedeState
{
private readonly TaskCompletionSource<CacheItem<T>>? _result;
private TState? _state;
private Func<TState, CancellationToken, ValueTask<T>>? _underlying; // main data factory
private HybridCacheEntryOptions? _options;
private Task<T>? _sharedUnwrap; // allows multiple non-cancellable callers to share a single task (when no defensive copy needed)
public StampedeState(DefaultHybridCache cache, in StampedeKey key, bool canBeCanceled)
: base(cache, key, CacheItem<T>.Create(), canBeCanceled)
{
_result = new(TaskCreationOptions.RunContinuationsAsynchronously);
}
public override Type Type => typeof(T);
public StampedeState(DefaultHybridCache cache, in StampedeKey key, CancellationToken token)
: base(cache, key, CacheItem<T>.Create(), token) { } // no TCS in this case - this is for SetValue only
public void QueueUserWorkItem(in TState state, Func<TState, CancellationToken, ValueTask<T>> underlying, HybridCacheEntryOptions? options)
{
Debug.Assert(_underlying is null);
Debug.Assert(underlying is not null);
// initialize the callback state
_state = state;
_underlying = underlying;
_options = options;
#if NETCOREAPP3_0_OR_GREATER
ThreadPool.UnsafeQueueUserWorkItem(this, false);
#else
ThreadPool.UnsafeQueueUserWorkItem(SharedWaitCallback, this);
#endif
}
public Task ExecuteDirectAsync(in TState state, Func<TState, CancellationToken, ValueTask<T>> underlying, HybridCacheEntryOptions? options)
{
Debug.Assert(_underlying is null);
Debug.Assert(underlying is not null);
// initialize the callback state
_state = state;
_underlying = underlying;
_options = options;
return BackgroundFetchAsync();
}
public override void Execute() => _ = BackgroundFetchAsync();
private async Task BackgroundFetchAsync()
{
try
{
// read from L2 if appropriate
if ((Key.Flags & HybridCacheEntryFlags.DisableDistributedCacheRead) == 0)
{
var result = await Cache.GetFromL2Async(Key.Key, SharedToken).ConfigureAwait(false);
if (result.Array is not null)
{
SetResultAndRecycleIfAppropriate(ref result);
return;
}
}
// nothing from L2; invoke the underlying data store
if ((Key.Flags & HybridCacheEntryFlags.DisableUnderlyingData) == 0)
{
var cacheItem = SetResult(await _underlying!(_state!, SharedToken).ConfigureAwait(false));
// note that at this point we've already released most or all of the waiting callers; everything
// else here is background
// write to L2 if appropriate
if ((Key.Flags & HybridCacheEntryFlags.DisableDistributedCacheWrite) == 0)
{
if (cacheItem.TryReserveBuffer(out var buffer))
{
// mutable: we've already serialized it for the shared cache item
await Cache.SetL2Async(Key.Key, in buffer, _options, SharedToken).ConfigureAwait(false);
cacheItem.Release(); // because we reserved
}
else if (cacheItem.TryGetValue(out var value))
{
// immutable: we'll need to do the serialize ourselves
var writer = RecyclableArrayBufferWriter<byte>.Create(MaximumPayloadBytes); // note this lifetime spans the SetL2Async
Cache.GetSerializer<T>().Serialize(value, writer);
buffer = new(writer.GetBuffer(out var length), length, returnToPool: false); // writer still owns the buffer
await Cache.SetL2Async(Key.Key, in buffer, _options, SharedToken).ConfigureAwait(false);
writer.Dispose(); // recycle on success
}
}
}
else
{
// can't read from data store; implies we shouldn't write
// back to anywhere else, either
SetDefaultResult();
}
}
catch (Exception ex)
{
SetException(ex);
}
}
public Task<CacheItem<T>> Task
{
get
{
Debug.Assert(_result is not null);
return _result is null ? Invalid() : _result.Task;
static Task<CacheItem<T>> Invalid() => System.Threading.Tasks.Task.FromException<CacheItem<T>>(new InvalidOperationException("Task should not be accessed for non-shared instances"));
}
}
private void SetException(Exception ex)
{
if (_result is not null)
{
Cache.RemoveStampedeState(in Key);
_result.TrySetException(ex);
}
}
// ONLY set the result, without any other side-effects
internal void SetResultDirect(CacheItem<T> value)
=> _result?.TrySetResult(value);
private void SetResult(CacheItem<T> value)
{
if ((Key.Flags & HybridCacheEntryFlags.DisableLocalCacheWrite) == 0)
{
Cache.SetL1(Key.Key, value, _options); // we can do this without a TCS, for SetValue
}
if (_result is not null)
{
Cache.RemoveStampedeState(in Key);
_result.TrySetResult(value);
}
}
private void SetDefaultResult()
{
// note we don't store this dummy result in L1 or L2
if (_result is not null)
{
Cache.RemoveStampedeState(in Key);
_result.TrySetResult(ImmutableCacheItem<T>.GetReservedShared());
}
}
private void SetResultAndRecycleIfAppropriate(ref BufferChunk value)
{
// set a result from L2 cache
Debug.Assert(value.Array is not null, "expected buffer");
var serializer = Cache.GetSerializer<T>();
CacheItem<T> cacheItem;
switch (CacheItem)
{
case ImmutableCacheItem<T> immutable:
// deserialize; and store object; buffer can be recycled now
immutable.SetValue(serializer.Deserialize(new(value.Array!, 0, value.Length)));
value.RecycleIfAppropriate();
cacheItem = immutable;
break;
case MutableCacheItem<T> mutable:
// use the buffer directly as the backing in the cache-item; do *not* recycle now
mutable.SetValue(ref value, serializer);
mutable.DebugOnlyTrackBuffer(Cache);
cacheItem = mutable;
break;
default:
cacheItem = ThrowUnexpectedCacheItem();
break;
}
SetResult(cacheItem);
}
[DoesNotReturn]
private static CacheItem<T> ThrowUnexpectedCacheItem() => throw new InvalidOperationException("Unexpected cache item");
private CacheItem<T> SetResult(T value)
{
// set a result from a value we calculated directly
CacheItem<T> cacheItem;
switch (CacheItem)
{
case ImmutableCacheItem<T> immutable:
// no serialize needed
immutable.SetValue(value);
cacheItem = immutable;
break;
case MutableCacheItem<T> mutable:
// serialization happens here
mutable.SetValue(value, Cache.GetSerializer<T>(), MaximumPayloadBytes);
mutable.DebugOnlyTrackBuffer(Cache);
cacheItem = mutable;
break;
default:
cacheItem = ThrowUnexpectedCacheItem();
break;
}
SetResult(cacheItem);
return cacheItem;
}
public override void SetCanceled() => _result?.TrySetCanceled(SharedToken);
internal ValueTask<T> UnwrapReservedAsync()
{
var task = Task;
#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
if (task.IsCompletedSuccessfully)
#else
if (task.Status == TaskStatus.RanToCompletion)
#endif
{
return new(task.Result.GetReservedValue());
}
// if the type is immutable, callers can share the final step too (this may leave dangling
// reservation counters, but that's OK)
var result = ImmutableTypeCache<T>.IsImmutable ? (_sharedUnwrap ??= Awaited(Task)) : Awaited(Task);
return new(result);
static async Task<T> Awaited(Task<CacheItem<T>> task)
=> (await task.ConfigureAwait(false)).GetReservedValue();
}
public ValueTask<T> JoinAsync(CancellationToken token)
{
// if the underlying has already completed, and/or our local token can't cancel: we
// can simply wrap the shared task; otherwise, we need our own cancellation state
return token.CanBeCanceled && !Task.IsCompleted ? WithCancellation(this, token) : UnwrapReservedAsync();
static async ValueTask<T> WithCancellation(StampedeState<TState, T> stampede, CancellationToken token)
{
var cancelStub = new TaskCompletionSource<bool>(TaskCreationOptions.RunContinuationsAsynchronously);
using var reg = token.Register(static obj =>
{
((TaskCompletionSource<bool>)obj!).TrySetResult(true);
}, cancelStub);
CacheItem<T> result;
try
{
var first = await System.Threading.Tasks.Task.WhenAny(stampede.Task, cancelStub.Task).ConfigureAwait(false);
if (ReferenceEquals(first, cancelStub.Task))
{
// we expect this to throw, because otherwise we wouldn't have gotten here
token.ThrowIfCancellationRequested(); // get an appropriate exception
}
Debug.Assert(ReferenceEquals(first, stampede.Task));
// this has already completed, but we'll get the stack nicely
result = await stampede.Task.ConfigureAwait(false);
}
catch
{
stampede.CancelCaller();
throw;
}
// outside the catch, so we know we only decrement one way or the other
return result.GetReservedValue();
}
}
}
}

Просмотреть файл

@ -1,34 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
partial class DefaultHybridCache
{
// HybridCache's stampede protection requires some level of synchronization to avoid unnecessary runs
// of the underlying data fetch; this is *minimized* by the use of double-checked locking and
// interlocked join (adding a new request to an existing execution), but: that would leave a race
// condition where the *remove* step of the stampede would be in a race with the *add new* step; the
// *add new* step is inside a lock, but we need to *remove* step to share that lock, to avoid
// the race. We deal with that by taking the same lock during remove, but *that* means we're locking
// on all executions.
//
// To minimize lock contention, we will therefore use partitioning of the lock-token, by using the
// low 3 bits of the hash-code (which we calculate eagerly only once, so: already known). This gives
// us a fast way to split contention by 8, almost an order-of-magnitude, which is sufficient. We *could*
// use an array for this, but: for directness, let's inline it instead (avoiding bounds-checks,
// an extra layer of dereferencing, and the allocation; I will acknowledge these are miniscule, but:
// it costs us nothing to do)
private readonly object _syncLock0 = new(), _syncLock1 = new(), _syncLock2 = new(), _syncLock3 = new(),
_syncLock4 = new(), _syncLock5 = new(), _syncLock6 = new(), _syncLock7 = new();
internal object GetPartitionedSyncLock(in StampedeKey key)
=> (key.HashCode & 0b111) switch // generate 8 partitions using the low 3 bits
{
0 => _syncLock0, 1 => _syncLock1,
2 => _syncLock2, 3 => _syncLock3,
4 => _syncLock4, 5 => _syncLock5,
6 => _syncLock6, _ => _syncLock7,
};
}

Просмотреть файл

@ -1,164 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Memory;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Logging.Abstractions;
using Microsoft.Extensions.Options;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
/// <summary>
/// The inbuilt ASP.NET implementation of <see cref="HybridCache"/>.
/// </summary>
internal sealed partial class DefaultHybridCache : HybridCache
{
private readonly IDistributedCache? _backendCache;
private readonly IMemoryCache _localCache;
private readonly IServiceProvider _services; // we can't resolve per-type serializers until we see each T
private readonly IHybridCacheSerializerFactory[] _serializerFactories;
private readonly HybridCacheOptions _options;
private readonly ILogger _logger;
private readonly CacheFeatures _features; // used to avoid constant type-testing
private readonly HybridCacheEntryFlags _hardFlags; // *always* present (for example, because no L2)
private readonly HybridCacheEntryFlags _defaultFlags; // note this already includes hardFlags
private readonly TimeSpan _defaultExpiration;
private readonly TimeSpan _defaultLocalCacheExpiration;
private readonly DistributedCacheEntryOptions _defaultDistributedCacheExpiration;
[Flags]
internal enum CacheFeatures
{
None = 0,
BackendCache = 1 << 0,
BackendBuffers = 1 << 1,
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private CacheFeatures GetFeatures(CacheFeatures mask) => _features & mask;
internal CacheFeatures GetFeatures() => _features;
// used to restrict features in test suite
internal void DebugRemoveFeatures(CacheFeatures features) => Unsafe.AsRef(in _features) &= ~features;
public DefaultHybridCache(IOptions<HybridCacheOptions> options, IServiceProvider services)
{
_services = services ?? throw new ArgumentNullException(nameof(services));
_localCache = services.GetRequiredService<IMemoryCache>();
_options = options.Value;
_logger = services.GetService<ILoggerFactory>()?.CreateLogger(typeof(HybridCache)) ?? NullLogger.Instance;
_backendCache = services.GetService<IDistributedCache>(); // note optional
// ignore L2 if it is really just the same L1, wrapped
// (note not just an "is" test; if someone has a custom subclass, who knows what it does?)
if (_backendCache is not null
&& _backendCache.GetType() == typeof(MemoryDistributedCache)
&& _localCache.GetType() == typeof(MemoryCache))
{
_backendCache = null;
}
// perform type-tests on the backend once only
_features |= _backendCache switch
{
IBufferDistributedCache => CacheFeatures.BackendCache | CacheFeatures.BackendBuffers,
not null => CacheFeatures.BackendCache,
_ => CacheFeatures.None
};
// When resolving serializers via the factory API, we will want the *last* instance,
// i.e. "last added wins"; we can optimize by reversing the array ahead of time, and
// taking the first match
var factories = services.GetServices<IHybridCacheSerializerFactory>().ToArray();
Array.Reverse(factories);
_serializerFactories = factories;
MaximumPayloadBytes = checked((int)_options.MaximumPayloadBytes); // for now hard-limit to 2GiB
var defaultEntryOptions = _options.DefaultEntryOptions;
if (_backendCache is null)
{
_hardFlags |= HybridCacheEntryFlags.DisableDistributedCache;
}
_defaultFlags = (defaultEntryOptions?.Flags ?? HybridCacheEntryFlags.None) | _hardFlags;
_defaultExpiration = defaultEntryOptions?.Expiration ?? TimeSpan.FromMinutes(5);
_defaultLocalCacheExpiration = defaultEntryOptions?.LocalCacheExpiration ?? TimeSpan.FromMinutes(1);
_defaultDistributedCacheExpiration = new DistributedCacheEntryOptions { AbsoluteExpirationRelativeToNow = _defaultExpiration };
}
internal IDistributedCache? BackendCache => _backendCache;
internal IMemoryCache LocalCache => _localCache;
internal HybridCacheOptions Options => _options;
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private HybridCacheEntryFlags GetEffectiveFlags(HybridCacheEntryOptions? options)
=> (options?.Flags | _hardFlags) ?? _defaultFlags;
public override ValueTask<T> GetOrCreateAsync<TState, T>(string key, TState state, Func<TState, CancellationToken, ValueTask<T>> underlyingDataCallback, HybridCacheEntryOptions? options = null, IEnumerable<string>? tags = null, CancellationToken cancellationToken = default)
{
var canBeCanceled = cancellationToken.CanBeCanceled;
if (canBeCanceled)
{
cancellationToken.ThrowIfCancellationRequested();
}
var flags = GetEffectiveFlags(options);
if ((flags & HybridCacheEntryFlags.DisableLocalCacheRead) == 0 && _localCache.TryGetValue(key, out var untyped)
&& untyped is CacheItem<T> typed && typed.TryGetValue(out var value))
{
// short-circuit
return new(value);
}
if (GetOrCreateStampedeState<TState, T>(key, flags, out var stampede, canBeCanceled))
{
// new query; we're responsible for making it happen
if (canBeCanceled)
{
// *we* might cancel, but someone else might be depending on the result; start the
// work independently, then we'll with join the outcome
stampede.QueueUserWorkItem(in state, underlyingDataCallback, options);
}
else
{
// we're going to run to completion; no need to get complicated
_ = stampede.ExecuteDirectAsync(in state, underlyingDataCallback, options); // this larger task includes L2 write etc
return stampede.UnwrapReservedAsync();
}
}
return stampede.JoinAsync(cancellationToken);
}
public override ValueTask RemoveAsync(string key, CancellationToken token = default)
{
_localCache.Remove(key);
return _backendCache is null ? default : new(_backendCache.RemoveAsync(key, token));
}
public override ValueTask RemoveByTagAsync(string tag, CancellationToken token = default)
=> default; // tags not yet implemented
public override ValueTask SetAsync<T>(string key, T value, HybridCacheEntryOptions? options = null, IEnumerable<string>? tags = null, CancellationToken token = default)
{
// since we're forcing a write: disable L1+L2 read; we'll use a direct pass-thru of the value as the callback, to reuse all the code;
// note also that stampede token is not shared with anyone else
var flags = GetEffectiveFlags(options) | (HybridCacheEntryFlags.DisableLocalCacheRead | HybridCacheEntryFlags.DisableDistributedCacheRead);
var state = new StampedeState<T, T>(this, new StampedeKey(key, flags), token);
return new(state.ExecuteDirectAsync(value, static (state, _) => new(state), options)); // note this spans L2 write etc
}
}

Просмотреть файл

@ -1,42 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Buffers;
using System.Diagnostics.CodeAnalysis;
using System.Text.Json;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
internal sealed class DefaultJsonSerializerFactory : IHybridCacheSerializerFactory
{
public bool TryCreateSerializer<T>([NotNullWhen(true)] out IHybridCacheSerializer<T>? serializer)
{
// no restriction
serializer = new DefaultJsonSerializer<T>();
return true;
}
internal sealed class DefaultJsonSerializer<T> : IHybridCacheSerializer<T>
{
T IHybridCacheSerializer<T>.Deserialize(ReadOnlySequence<byte> source)
{
var reader = new Utf8JsonReader(source);
#pragma warning disable IDE0079 // unnecessary suppression: TFM-dependent
#pragma warning disable IL2026, IL3050 // AOT bits
return JsonSerializer.Deserialize<T>(ref reader)!;
#pragma warning restore IL2026, IL3050
#pragma warning restore IDE0079
}
void IHybridCacheSerializer<T>.Serialize(T value, IBufferWriter<byte> target)
{
using var writer = new Utf8JsonWriter(target);
#pragma warning disable IDE0079 // unnecessary suppression: TFM-dependent
#pragma warning disable IL2026, IL3050 // AOT bits
JsonSerializer.Serialize<T>(writer, value, JsonSerializerOptions.Default);
#pragma warning restore IL2026, IL3050
#pragma warning restore IDE0079
}
}
}

Просмотреть файл

@ -1,56 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Buffers;
using System.Diagnostics;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
internal sealed class InbuiltTypeSerializer : IHybridCacheSerializer<string>, IHybridCacheSerializer<byte[]>
{
public static InbuiltTypeSerializer Instance { get; } = new();
string IHybridCacheSerializer<string>.Deserialize(ReadOnlySequence<byte> source)
{
#if NET5_0_OR_GREATER
return Encoding.UTF8.GetString(source);
#else
if (source.IsSingleSegment && MemoryMarshal.TryGetArray(source.First, out var segment))
{
// we can use the existing single chunk as-is
return Encoding.UTF8.GetString(segment.Array, segment.Offset, segment.Count);
}
var length = checked((int)source.Length);
var oversized = ArrayPool<byte>.Shared.Rent(length);
source.CopyTo(oversized);
var s = Encoding.UTF8.GetString(oversized, 0, length);
ArrayPool<byte>.Shared.Return(oversized);
return s;
#endif
}
void IHybridCacheSerializer<string>.Serialize(string value, IBufferWriter<byte> target)
{
#if NET5_0_OR_GREATER
Encoding.UTF8.GetBytes(value, target);
#else
var length = Encoding.UTF8.GetByteCount(value);
var oversized = ArrayPool<byte>.Shared.Rent(length);
var actual = Encoding.UTF8.GetBytes(value, 0, value.Length, oversized, 0);
Debug.Assert(actual == length);
target.Write(new(oversized, 0, length));
ArrayPool<byte>.Shared.Return(oversized);
#endif
}
byte[] IHybridCacheSerializer<byte[]>.Deserialize(ReadOnlySequence<byte> source)
=> source.ToArray();
void IHybridCacheSerializer<byte[]>.Serialize(byte[] value, IBufferWriter<byte> target)
=> target.Write(value);
}

Просмотреть файл

@ -1,200 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Buffers;
using System.Diagnostics;
using System.Threading;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
// this is effectively a cut-down re-implementation of ArrayBufferWriter
// from https://github.com/dotnet/runtime/blob/6cd9bf1937c3b4d2f7304a6c534aacde58a202b6/src/libraries/Common/src/System/Buffers/ArrayBufferWriter.cs
// except it uses the array pool for allocations
internal sealed class RecyclableArrayBufferWriter<T> : IBufferWriter<T>, IDisposable
{
// Usage note: *normally* you might want to use "using" for this, and that is fine;
// however, caution should be exercised in exception scenarios where we don't 100%
// know that the caller has stopped touching the buffer; in particular, this means
// scenarios involving a combination of external code and (for example) "async".
// In those cases, it may be preferable to manually dispose in the success case,
// and just drop the buffers in the failure case, i.e. instead of:
//
// using (writer)
// { DoStuff(); }
//
// simply:
//
// DoStuff();
// writer.Dispose();
//
// This does not represent a problem, and is consistent with many ArrayPool use-cases.
// Copy of Array.MaxLength.
// Used by projects targeting .NET Framework.
private const int ArrayMaxLength = 0x7FFFFFC7;
private const int DefaultInitialBufferSize = 256;
private T[] _buffer;
private int _index;
private int _maxLength;
public int CommittedBytes => _index;
public int FreeCapacity => _buffer.Length - _index;
private static RecyclableArrayBufferWriter<T>? _spare;
public static RecyclableArrayBufferWriter<T> Create(int maxLength)
{
var obj = Interlocked.Exchange(ref _spare, null) ?? new();
Debug.Assert(obj._index == 0);
obj._maxLength = maxLength;
return obj;
}
private RecyclableArrayBufferWriter()
{
_buffer = [];
_index = 0;
_maxLength = int.MaxValue;
}
public void Dispose()
{
// attempt to reuse everything via "spare"; if that isn't possible,
// recycle the buffers instead
_index = 0;
if (Interlocked.CompareExchange(ref _spare, this, null) != null)
{
var tmp = _buffer;
_buffer = [];
if (tmp.Length != 0)
{
ArrayPool<T>.Shared.Return(tmp);
}
}
}
public void Advance(int count)
{
if (count < 0)
{
throw new ArgumentException(null, nameof(count));
}
if (_index > _buffer.Length - count)
{
ThrowCount();
}
if (_index + count > _maxLength)
{
ThrowQuota();
}
_index += count;
static void ThrowCount()
=> throw new ArgumentOutOfRangeException(nameof(count));
static void ThrowQuota()
=> throw new InvalidOperationException("Max length exceeded");
}
/// <summary>
/// Disconnect the current buffer so that we can store it without it being recycled
/// </summary>
internal T[] DetachCommitted(out int length)
{
var tmp = _index == 0 ? [] : _buffer;
length = _index;
_buffer = [];
_index = 0;
return tmp;
}
public void ResetInPlace()
{
// resets the writer *without* resetting the buffer;
// the existing memory should be considered "gone"
// (to claim the buffer instead, use DetachCommitted)
_index = 0;
}
internal T[] GetBuffer(out int length)
{
length = _index;
return _index == 0 ? [] : _buffer;
}
public ReadOnlyMemory<T> GetCommittedMemory() => new(_buffer, 0, _index); // could also directly expose a ReadOnlySpan<byte> if useful
public Memory<T> GetMemory(int sizeHint = 0)
{
CheckAndResizeBuffer(sizeHint);
Debug.Assert(_buffer.Length > _index);
return _buffer.AsMemory(_index);
}
public Span<T> GetSpan(int sizeHint = 0)
{
CheckAndResizeBuffer(sizeHint);
Debug.Assert(_buffer.Length > _index);
return _buffer.AsSpan(_index);
}
// create a standalone isolated copy of the buffer
public T[] ToArray() => _buffer.AsSpan(0, _index).ToArray();
private void CheckAndResizeBuffer(int sizeHint)
{
if (sizeHint <= 0)
{
sizeHint = 1;
}
if (sizeHint > FreeCapacity)
{
var currentLength = _buffer.Length;
// Attempt to grow by the larger of the sizeHint and double the current size.
var growBy = Math.Max(sizeHint, currentLength);
if (currentLength == 0)
{
growBy = Math.Max(growBy, DefaultInitialBufferSize);
}
var newSize = currentLength + growBy;
if ((uint)newSize > int.MaxValue)
{
// Attempt to grow to ArrayMaxLength.
var needed = (uint)(currentLength - FreeCapacity + sizeHint);
Debug.Assert(needed > currentLength);
if (needed > ArrayMaxLength)
{
ThrowOutOfMemoryException();
}
newSize = ArrayMaxLength;
}
// resize the backing buffer
var oldArray = _buffer;
_buffer = ArrayPool<T>.Shared.Rent(newSize);
oldArray.AsSpan(0, _index).CopyTo(_buffer);
if (oldArray.Length != 0)
{
ArrayPool<T>.Shared.Return(oldArray);
}
}
Debug.Assert(FreeCapacity > 0 && FreeCapacity >= sizeHint);
static void ThrowOutOfMemoryException() => throw new InvalidOperationException("Unable to grow buffer as requested");
}
}

Просмотреть файл

@ -1,27 +0,0 @@
# HybridCache internal design
`HybridCache` encapsulates serialization, caching and stampede protection.
The `DefaultHybridCache` implementation keeps a collection of `StampedeState` entries
that represent the current in-flight operations (keyed by `StampedeKey`); if a duplicate
operation occurs during the execution, the second operation will be joined with that
same flow, rather than executing independently. When attempting to merge with an
existing flow, interlocked counting is used: we can only join if we can successfully
increment the value from a non-zero value (zero meaning all existing consumers have
canceled, and the shared token is therefore canceled)
The `StampedeState<>` performs back-end fetch operations, resulting not in a `T` (of the final
value), but instead a `CacheItem<T>`; this is the object that gets put into L1 cache,
and can describe both mutable and immutable types; the significance here is that for
mutable types, we need a defensive copy per-call to prevent callers impacting each-other.
`StampedeState<>` combines cancellation (so that operations proceed as long as *a* caller
is still active); this covers all L2 access and serialization operations, releasing all pending
shared callers for the same operation. Note that L2 storage can occur *after* callers
have been released.
To ensure correct buffer recycling, when dealing with cache entries that need defensive copies
we use more ref-counting while reading the buffer, combined with an eviction callback which
decrements that counter. This means that we recycle things when evicted, without impacting
in-progress deserialize operations. To simplify tracking, `BufferChunk` acts like a `byte[]`+`int`
(we don't need non-zero offset), but also tracking "should this be returned to the pool?".

Просмотреть файл

@ -1,39 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<Description>Multi-level caching implementation building on and extending IDistributedCache</Description>
<!--
relevant TFM feature summary (all "nice to have", with reasonable workarounds):
nca 2.0 / ns 2.1: RuntimeHelpers.IsReferenceOrContainsReferences; Task.IsCompletedSuccessfully; HashCode
nca 3.0 : IThreadPoolWorkItem
net 5.0 : [DynamicallyAccessedMembers], EncodingExtensions.GetString/GetBytes, IsExternalInit
net 7.0 : ArgumentNullException.ThrowIfNull
-->
<TargetFrameworks>$(DefaultNetCoreTargetFramework);$(DefaultNetFxTargetFramework);netstandard2.0;netstandard2.1;$(CurrentLtsTargetFramework)</TargetFrameworks>
<GenerateDocumentationFile>true</GenerateDocumentationFile>
<PackageTags>cache;distributedcache;hybrid</PackageTags>
<ExcludeFromSourceOnlyBuild>true</ExcludeFromSourceOnlyBuild>
<IsAspNetCoreApp>false</IsAspNetCoreApp>
<IsPackable>true</IsPackable>
<IsShipping>true</IsShipping>
</PropertyGroup>
<ItemGroup>
<Compile Include="$(SharedSourceRoot)IsExternalInit.cs" LinkBase="Shared" />
</ItemGroup>
<ItemGroup>
<Reference Include="Microsoft.Extensions.Caching.Abstractions" />
<Reference Include="Microsoft.Extensions.Logging.Abstractions" />
<Reference Include="Microsoft.Extensions.Options" />
<InternalsVisibleTo Include="Microsoft.Extensions.Caching.Hybrid.Tests" />
<!-- this provides the default L1 implementation; L2 is optional -->
<Reference Include="Microsoft.Extensions.Caching.Memory" />
</ItemGroup>
<ItemGroup Condition="'$(TargetFramework)' == '$(DefaultNetFxTargetFramework)' or '$(TargetFramework)' == 'netstandard2.0' or '$(TargetFramework)' == 'netstandard2.1'">
<Reference Include="System.Text.Json" />
<Reference Include="Microsoft.Bcl.TimeProvider" />
</ItemGroup>
</Project>

Просмотреть файл

@ -1 +0,0 @@
#nullable enable

Просмотреть файл

@ -1,23 +0,0 @@
#nullable enable
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.DefaultEntryOptions.get -> Microsoft.Extensions.Caching.Hybrid.HybridCacheEntryOptions?
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.DefaultEntryOptions.set -> void
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.DisableCompression.get -> bool
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.DisableCompression.set -> void
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.HybridCacheOptions() -> void
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.MaximumKeyLength.get -> int
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.MaximumKeyLength.set -> void
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.MaximumPayloadBytes.get -> long
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.MaximumPayloadBytes.set -> void
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.ReportTagMetrics.get -> bool
Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions.ReportTagMetrics.set -> void
Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder
Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder.Services.get -> Microsoft.Extensions.DependencyInjection.IServiceCollection!
Microsoft.Extensions.DependencyInjection.HybridCacheBuilderExtensions
Microsoft.Extensions.DependencyInjection.HybridCacheServiceExtensions
static Microsoft.Extensions.DependencyInjection.HybridCacheBuilderExtensions.AddSerializer<T, TImplementation>(this Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder! builder) -> Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder!
static Microsoft.Extensions.DependencyInjection.HybridCacheBuilderExtensions.AddSerializer<T>(this Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder! builder, Microsoft.Extensions.Caching.Hybrid.IHybridCacheSerializer<T>! serializer) -> Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder!
static Microsoft.Extensions.DependencyInjection.HybridCacheBuilderExtensions.AddSerializerFactory(this Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder! builder, Microsoft.Extensions.Caching.Hybrid.IHybridCacheSerializerFactory! factory) -> Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder!
static Microsoft.Extensions.DependencyInjection.HybridCacheBuilderExtensions.AddSerializerFactory<TImplementation>(this Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder! builder) -> Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder!
static Microsoft.Extensions.DependencyInjection.HybridCacheServiceExtensions.AddHybridCache(this Microsoft.Extensions.DependencyInjection.IServiceCollection! services) -> Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder!
static Microsoft.Extensions.DependencyInjection.HybridCacheServiceExtensions.AddHybridCache(this Microsoft.Extensions.DependencyInjection.IServiceCollection! services, System.Action<Microsoft.Extensions.Caching.Hybrid.HybridCacheOptions!>! setupAction) -> Microsoft.Extensions.Caching.Hybrid.IHybridCacheBuilder!

Просмотреть файл

@ -1,12 +0,0 @@
{
"no_entry_options": {
"MaximumKeyLength": 937
},
"with_entry_options": {
"MaximumKeyLength": 937,
"DefaultEntryOptions": {
"LocalCacheExpiration": "00:02:00",
"Flags": "DisableCompression,DisableLocalCacheRead"
}
}
}

Просмотреть файл

@ -1,227 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Buffers;
using System.Runtime.CompilerServices;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Hybrid.Internal;
using Microsoft.Extensions.Caching.Memory;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Options;
using static Microsoft.Extensions.Caching.Hybrid.Internal.DefaultHybridCache;
namespace Microsoft.Extensions.Caching.Hybrid.Tests;
public class BufferReleaseTests // note that buffer ref-counting is only enabled for DEBUG builds; can only verify general behaviour without that
{
static ServiceProvider GetDefaultCache(out DefaultHybridCache cache, Action<ServiceCollection>? config = null)
{
var services = new ServiceCollection();
config?.Invoke(services);
services.AddHybridCache();
var provider = services.BuildServiceProvider();
cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
return provider;
}
[Fact]
public async Task BufferGetsReleased_NoL2()
{
using var provider = GetDefaultCache(out var cache);
#if DEBUG
cache.DebugOnlyGetOutstandingBuffers(flush: true);
#endif
var key = Me();
#if DEBUG
Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
#endif
var first = await cache.GetOrCreateAsync(key, _ => GetAsync());
Assert.NotNull(first);
#if DEBUG
Assert.Equal(1, cache.DebugOnlyGetOutstandingBuffers());
#endif
Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
// assert that we can reserve the buffer *now* (mostly to see that it behaves differently later)
Assert.True(cacheItem.NeedsEvictionCallback, "should be pooled memory");
Assert.True(cacheItem.TryReserveBuffer(out _));
cacheItem.Release(); // for the above reserve
var second = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
Assert.NotNull(second);
Assert.NotSame(first, second);
Assert.Equal(1, cacheItem.RefCount);
await cache.RemoveAsync(key);
var third = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
Assert.Null(third);
// give it a moment for the eviction callback to kick in
for (var i = 0; i < 10 && cacheItem.NeedsEvictionCallback; i++)
{
await Task.Delay(250);
}
#if DEBUG
Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
#endif
// assert that we can *no longer* reserve this buffer, because we've already recycled it
Assert.False(cacheItem.TryReserveBuffer(out _));
Assert.Equal(0, cacheItem.RefCount);
Assert.False(cacheItem.NeedsEvictionCallback, "should be recycled now");
static ValueTask<Customer> GetAsync() => new(new Customer { Id = 42, Name = "Fred" });
}
private static readonly HybridCacheEntryOptions _noUnderlying = new() { Flags = HybridCacheEntryFlags.DisableUnderlyingData };
class TestCache : MemoryDistributedCache, IBufferDistributedCache
{
public TestCache(IOptions<MemoryDistributedCacheOptions> options) : base(options) { }
void IBufferDistributedCache.Set(string key, ReadOnlySequence<byte> value, DistributedCacheEntryOptions options)
=> Set(key, value.ToArray(), options); // efficiency not important for this
ValueTask IBufferDistributedCache.SetAsync(string key, ReadOnlySequence<byte> value, DistributedCacheEntryOptions options, CancellationToken token)
=> new(SetAsync(key, value.ToArray(), options, token)); // efficiency not important for this
bool IBufferDistributedCache.TryGet(string key, IBufferWriter<byte> destination)
=> Write(destination, Get(key));
async ValueTask<bool> IBufferDistributedCache.TryGetAsync(string key, IBufferWriter<byte> destination, CancellationToken token)
=> Write(destination, await GetAsync(key, token));
static bool Write(IBufferWriter<byte> destination, byte[]? buffer)
{
if (buffer is null)
{
return false;
}
destination.Write(buffer);
return true;
}
}
[Fact]
public async Task BufferDoesNotNeedRelease_LegacyL2() // byte[] API; not pooled
{
using var provider = GetDefaultCache(out var cache,
services => services.AddSingleton<IDistributedCache, TestCache>());
cache.DebugRemoveFeatures(CacheFeatures.BackendBuffers);
// prep the backend with our data
var key = Me();
Assert.NotNull(cache.BackendCache);
var serializer = cache.GetSerializer<Customer>();
using (var writer = RecyclableArrayBufferWriter<byte>.Create(int.MaxValue))
{
serializer.Serialize(await GetAsync(), writer);
cache.BackendCache.Set(key, writer.ToArray());
}
#if DEBUG
cache.DebugOnlyGetOutstandingBuffers(flush: true);
Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
#endif
var first = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying); // we expect this to come from L2, hence NoUnderlying
Assert.NotNull(first);
#if DEBUG
Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
#endif
Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
// assert that we can reserve the buffer *now* (mostly to see that it behaves differently later)
Assert.False(cacheItem.NeedsEvictionCallback, "should NOT be pooled memory");
Assert.True(cacheItem.TryReserveBuffer(out _));
cacheItem.Release(); // for the above reserve
var second = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
Assert.NotNull(second);
Assert.NotSame(first, second);
Assert.Equal(1, cacheItem.RefCount);
await cache.RemoveAsync(key);
var third = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
Assert.Null(third);
Assert.Null(await cache.BackendCache.GetAsync(key)); // should be gone from L2 too
// give it a moment for the eviction callback to kick in
for (var i = 0; i < 10 && cacheItem.NeedsEvictionCallback; i++)
{
await Task.Delay(250);
}
#if DEBUG
Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
#endif
// assert that we can *no longer* reserve this buffer, because we've already recycled it
Assert.True(cacheItem.TryReserveBuffer(out _)); // always readable
cacheItem.Release();
Assert.Equal(1, cacheItem.RefCount); // not decremented because there was no need to add the hook
Assert.False(cacheItem.NeedsEvictionCallback, "should still not need recycling");
static ValueTask<Customer> GetAsync() => new(new Customer { Id = 42, Name = "Fred" });
}
[Fact]
public async Task BufferGetsReleased_BufferL2() // IBufferWriter<byte> API; pooled
{
using var provider = GetDefaultCache(out var cache,
services => services.AddSingleton<IDistributedCache, TestCache>());
// prep the backend with our data
var key = Me();
Assert.NotNull(cache.BackendCache);
var serializer = cache.GetSerializer<Customer>();
using (var writer = RecyclableArrayBufferWriter<byte>.Create(int.MaxValue))
{
serializer.Serialize(await GetAsync(), writer);
cache.BackendCache.Set(key, writer.ToArray());
}
#if DEBUG
cache.DebugOnlyGetOutstandingBuffers(flush: true);
Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
#endif
var first = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying); // we expect this to come from L2, hence NoUnderlying
Assert.NotNull(first);
#if DEBUG
Assert.Equal(1, cache.DebugOnlyGetOutstandingBuffers());
#endif
Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
// assert that we can reserve the buffer *now* (mostly to see that it behaves differently later)
Assert.True(cacheItem.NeedsEvictionCallback, "should be pooled memory");
Assert.True(cacheItem.TryReserveBuffer(out _));
cacheItem.Release(); // for the above reserve
var second = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
Assert.NotNull(second);
Assert.NotSame(first, second);
Assert.Equal(1, cacheItem.RefCount);
await cache.RemoveAsync(key);
var third = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
Assert.Null(third);
Assert.Null(await cache.BackendCache.GetAsync(key)); // should be gone from L2 too
// give it a moment for the eviction callback to kick in
for (var i = 0; i < 10 && cacheItem.NeedsEvictionCallback; i++)
{
await Task.Delay(250);
}
#if DEBUG
Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
#endif
// assert that we can *no longer* reserve this buffer, because we've already recycled it
Assert.False(cacheItem.TryReserveBuffer(out _)); // released now
Assert.Equal(0, cacheItem.RefCount);
Assert.False(cacheItem.NeedsEvictionCallback, "should be recycled by now");
static ValueTask<Customer> GetAsync() => new(new Customer { Id = 42, Name = "Fred" });
}
public class Customer
{
public int Id { get; set; }
public string Name { get; set; } = "";
}
private static string Me([CallerMemberName] string caller = "") => caller;
}

Просмотреть файл

@ -1,381 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Buffers;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Hybrid.Internal;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Internal;
using Xunit.Abstractions;
namespace Microsoft.Extensions.Caching.Hybrid.Tests;
/// <summary>
/// Validate over-arching expectations of DC implementations, in particular behaviour re IBufferDistributedCache added for HybridCache
/// </summary>
public abstract class DistributedCacheTests
{
public DistributedCacheTests(ITestOutputHelper log) => Log = log;
protected ITestOutputHelper Log { get; }
protected abstract ValueTask ConfigureAsync(IServiceCollection services);
protected abstract bool CustomClockSupported { get; }
protected FakeTime Clock { get; } = new();
protected class FakeTime : TimeProvider, ISystemClock
{
private DateTimeOffset _now = DateTimeOffset.UtcNow;
public void Reset() => _now = DateTimeOffset.UtcNow;
DateTimeOffset ISystemClock.UtcNow => _now;
public override DateTimeOffset GetUtcNow() => _now;
public void Add(TimeSpan delta) => _now += delta;
}
private async ValueTask<IServiceCollection> InitAsync()
{
Clock.Reset();
var services = new ServiceCollection();
services.AddSingleton<TimeProvider>(Clock);
services.AddSingleton<ISystemClock>(Clock);
await ConfigureAsync(services);
return services;
}
[Theory]
[InlineData(0)]
[InlineData(128)]
[InlineData(1024)]
[InlineData(16 * 1024)]
public async Task SimpleBufferRoundtrip(int size)
{
var cache = (await InitAsync()).BuildServiceProvider().GetService<IDistributedCache>();
if (cache is null)
{
Log.WriteLine("Cache is not available");
return; // inconclusive
}
var key = $"{Me()}:{size}";
cache.Remove(key);
Assert.Null(cache.Get(key));
var expected = new byte[size];
new Random().NextBytes(expected);
cache.Set(key, expected, _fiveMinutes);
var actual = cache.Get(key);
Assert.NotNull(actual);
Assert.True(expected.SequenceEqual(actual));
Log.WriteLine("Data validated");
if (CustomClockSupported)
{
Clock.Add(TimeSpan.FromMinutes(4));
actual = cache.Get(key);
Assert.NotNull(actual);
Assert.True(expected.SequenceEqual(actual));
Clock.Add(TimeSpan.FromMinutes(2));
actual = cache.Get(key);
Assert.Null(actual);
Log.WriteLine("Expiration validated");
}
else
{
Log.WriteLine("Expiration not validated - TimeProvider not supported");
}
}
[Theory]
[InlineData(0)]
[InlineData(128)]
[InlineData(1024)]
[InlineData(16 * 1024)]
public async Task SimpleBufferRoundtripAsync(int size)
{
var cache = (await InitAsync()).BuildServiceProvider().GetService<IDistributedCache>();
if (cache is null)
{
Log.WriteLine("Cache is not available");
return; // inconclusive
}
var key = $"{Me()}:{size}";
await cache.RemoveAsync(key);
Assert.Null(cache.Get(key));
var expected = new byte[size];
new Random().NextBytes(expected);
await cache.SetAsync(key, expected, _fiveMinutes);
var actual = await cache.GetAsync(key);
Assert.NotNull(actual);
Assert.True(expected.SequenceEqual(actual));
Log.WriteLine("Data validated");
if (CustomClockSupported)
{
Clock.Add(TimeSpan.FromMinutes(4));
actual = await cache.GetAsync(key);
Assert.NotNull(actual);
Assert.True(expected.SequenceEqual(actual));
Clock.Add(TimeSpan.FromMinutes(2));
actual = await cache.GetAsync(key);
Assert.Null(actual);
Log.WriteLine("Expiration validated");
}
else
{
Log.WriteLine("Expiration not validated - TimeProvider not supported");
}
}
public enum SequenceKind
{
FullArray,
PaddedArray,
CustomMemory,
MultiSegment,
}
[Theory]
[InlineData(0, SequenceKind.FullArray)]
[InlineData(128, SequenceKind.FullArray)]
[InlineData(1024, SequenceKind.FullArray)]
[InlineData(16 * 1024, SequenceKind.FullArray)]
[InlineData(0, SequenceKind.PaddedArray)]
[InlineData(128, SequenceKind.PaddedArray)]
[InlineData(1024, SequenceKind.PaddedArray)]
[InlineData(16 * 1024, SequenceKind.PaddedArray)]
[InlineData(0, SequenceKind.CustomMemory)]
[InlineData(128, SequenceKind.CustomMemory)]
[InlineData(1024, SequenceKind.CustomMemory)]
[InlineData(16 * 1024, SequenceKind.CustomMemory)]
[InlineData(0, SequenceKind.MultiSegment)]
[InlineData(128, SequenceKind.MultiSegment)]
[InlineData(1024, SequenceKind.MultiSegment)]
[InlineData(16 * 1024, SequenceKind.MultiSegment)]
public async Task ReadOnlySequenceBufferRoundtrip(int size, SequenceKind kind)
{
var cache = (await InitAsync()).BuildServiceProvider().GetService<IDistributedCache>() as IBufferDistributedCache;
if (cache is null)
{
Log.WriteLine("Cache is not available or does not support IBufferDistributedCache");
return; // inconclusive
}
var key = $"{Me()}:{size}/{kind}";
cache.Remove(key);
Assert.Null(cache.Get(key));
var payload = Invent(size, kind);
ReadOnlyMemory<byte> expected = payload.ToArray(); // simplify for testing
Assert.Equal(size, expected.Length);
cache.Set(key, payload, _fiveMinutes);
var writer = RecyclableArrayBufferWriter<byte>.Create(int.MaxValue);
Assert.True(cache.TryGet(key, writer));
Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
writer.ResetInPlace();
Log.WriteLine("Data validated");
if (CustomClockSupported)
{
Clock.Add(TimeSpan.FromMinutes(4));
Assert.True(cache.TryGet(key, writer));
Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
writer.ResetInPlace();
Clock.Add(TimeSpan.FromMinutes(2));
Assert.False(cache.TryGet(key, writer));
Assert.Equal(0, writer.CommittedBytes);
Log.WriteLine("Expiration validated");
}
else
{
Log.WriteLine("Expiration not validated - TimeProvider not supported");
}
}
[Theory]
[InlineData(0, SequenceKind.FullArray)]
[InlineData(128, SequenceKind.FullArray)]
[InlineData(1024, SequenceKind.FullArray)]
[InlineData(16 * 1024, SequenceKind.FullArray)]
[InlineData(0, SequenceKind.PaddedArray)]
[InlineData(128, SequenceKind.PaddedArray)]
[InlineData(1024, SequenceKind.PaddedArray)]
[InlineData(16 * 1024, SequenceKind.PaddedArray)]
[InlineData(0, SequenceKind.CustomMemory)]
[InlineData(128, SequenceKind.CustomMemory)]
[InlineData(1024, SequenceKind.CustomMemory)]
[InlineData(16 * 1024, SequenceKind.CustomMemory)]
[InlineData(0, SequenceKind.MultiSegment)]
[InlineData(128, SequenceKind.MultiSegment)]
[InlineData(1024, SequenceKind.MultiSegment)]
[InlineData(16 * 1024, SequenceKind.MultiSegment)]
public async Task ReadOnlySequenceBufferRoundtripAsync(int size, SequenceKind kind)
{
var cache = (await InitAsync()).BuildServiceProvider().GetService<IDistributedCache>() as IBufferDistributedCache;
if (cache is null)
{
Log.WriteLine("Cache is not available or does not support IBufferDistributedCache");
return; // inconclusive
}
var key = $"{Me()}:{size}/{kind}";
await cache.RemoveAsync(key);
Assert.Null(await cache.GetAsync(key));
var payload = Invent(size, kind);
ReadOnlyMemory<byte> expected = payload.ToArray(); // simplify for testing
Assert.Equal(size, expected.Length);
await cache.SetAsync(key, payload, _fiveMinutes);
var writer = RecyclableArrayBufferWriter<byte>.Create(int.MaxValue);
Assert.True(await cache.TryGetAsync(key, writer));
Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
writer.ResetInPlace();
Log.WriteLine("Data validated");
if (CustomClockSupported)
{
Clock.Add(TimeSpan.FromMinutes(4));
Assert.True(await cache.TryGetAsync(key, writer));
Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
writer.ResetInPlace();
Clock.Add(TimeSpan.FromMinutes(2));
Assert.False(await cache.TryGetAsync(key, writer));
Assert.Equal(0, writer.CommittedBytes);
Log.WriteLine("Expiration validated");
}
else
{
Log.WriteLine("Expiration not validated - TimeProvider not supported");
}
}
static ReadOnlySequence<byte> Invent(int size, SequenceKind kind)
{
var rand = new Random();
ReadOnlySequence<byte> payload;
switch (kind)
{
case SequenceKind.FullArray:
var arr = new byte[size];
rand.NextBytes(arr);
payload = new(arr);
break;
case SequenceKind.PaddedArray:
arr = new byte[size + 10];
rand.NextBytes(arr);
payload = new(arr, 5, arr.Length - 10);
break;
case SequenceKind.CustomMemory:
var mem = new CustomMemory(size, rand).Memory;
payload = new(mem);
break;
case SequenceKind.MultiSegment:
if (size == 0)
{
payload = default;
break;
}
if (size < 10)
{
throw new ArgumentException("small segments not considered"); // a pain to construct
}
CustomSegment first = new(10, rand, null), // we'll take the last 3 of this 10
second = new(size - 7, rand, first), // we'll take all of this one
third = new(10, rand, second); // we'll take the first 4 of this 10
payload = new(first, 7, third, 4);
break;
default:
throw new ArgumentOutOfRangeException(nameof(kind));
}
// now validate what we expect of that payload
Assert.Equal(size, payload.Length);
switch (kind)
{
case SequenceKind.CustomMemory or SequenceKind.MultiSegment when size == 0:
Assert.True(payload.IsSingleSegment);
Assert.True(MemoryMarshal.TryGetArray(payload.First, out _));
break;
case SequenceKind.MultiSegment:
Assert.False(payload.IsSingleSegment);
break;
case SequenceKind.CustomMemory:
Assert.True(payload.IsSingleSegment);
Assert.False(MemoryMarshal.TryGetArray(payload.First, out _));
break;
case SequenceKind.FullArray:
Assert.True(payload.IsSingleSegment);
Assert.True(MemoryMarshal.TryGetArray(payload.First, out var segment));
Assert.Equal(0, segment.Offset);
Assert.NotNull(segment.Array);
Assert.Equal(size, segment.Count);
Assert.Equal(size, segment.Array.Length);
break;
case SequenceKind.PaddedArray:
Assert.True(payload.IsSingleSegment);
Assert.True(MemoryMarshal.TryGetArray(payload.First, out segment));
Assert.NotEqual(0, segment.Offset);
Assert.NotNull(segment.Array);
Assert.Equal(size, segment.Count);
Assert.NotEqual(size, segment.Array.Length);
break;
}
return payload;
}
class CustomSegment : ReadOnlySequenceSegment<byte>
{
public CustomSegment(int size, Random? rand, CustomSegment? previous)
{
var arr = new byte[size + 10];
rand?.NextBytes(arr);
Memory = new(arr, 5, arr.Length - 10);
if (previous is not null)
{
RunningIndex = previous.RunningIndex + previous.Memory.Length;
previous.Next = this;
}
}
}
class CustomMemory : MemoryManager<byte>
{
private readonly byte[] _data;
public CustomMemory(int size, Random? rand = null)
{
_data = new byte[size + 10];
rand?.NextBytes(_data);
}
public override Span<byte> GetSpan() => new(_data, 5, _data.Length - 10);
public override MemoryHandle Pin(int elementIndex = 0) => throw new NotSupportedException();
public override void Unpin() => throw new NotSupportedException();
protected override void Dispose(bool disposing) { }
protected override bool TryGetArray(out ArraySegment<byte> segment)
{
segment = default;
return false;
}
}
private static readonly DistributedCacheEntryOptions _fiveMinutes
= new() { AbsoluteExpirationRelativeToNow = TimeSpan.FromMinutes(5) };
protected static string Me([CallerMemberName] string caller = "") => caller;
}

Просмотреть файл

@ -1,82 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Runtime.CompilerServices;
using Microsoft.Extensions.Caching.Hybrid.Internal;
using Microsoft.Extensions.DependencyInjection;
namespace Microsoft.Extensions.Caching.Hybrid.Tests;
public class FunctionalTests
{
static ServiceProvider GetDefaultCache(out DefaultHybridCache cache, Action<ServiceCollection>? config = null)
{
var services = new ServiceCollection();
config?.Invoke(services);
services.AddHybridCache();
var provider = services.BuildServiceProvider();
cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
return provider;
}
[Fact]
public async Task RemoveSingleKey()
{
using var provider = GetDefaultCache(out var cache);
var key = Me();
Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(42)));
// now slightly different func to show delta; should use cached value initially
await cache.RemoveAsync("unrelated");
Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(96)));
// now remove and repeat - should get updated value
await cache.RemoveAsync(key);
Assert.Equal(96, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(96)));
}
[Fact]
public async Task RemoveNoKeyViaArray()
{
using var provider = GetDefaultCache(out var cache);
var key = Me();
Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(42)));
// now slightly different func to show delta; should use same cached value
await cache.RemoveAsync([]);
Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(96)));
}
[Fact]
public async Task RemoveSingleKeyViaArray()
{
using var provider = GetDefaultCache(out var cache);
var key = Me();
Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(42)));
// now slightly different func to show delta; should use cached value initially
await cache.RemoveAsync(["unrelated"]);
Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(96)));
// now remove and repeat - should get updated value
await cache.RemoveAsync([key]);
Assert.Equal(96, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(96)));
}
[Fact]
public async Task RemoveMultipleKeysViaArray()
{
using var provider = GetDefaultCache(out var cache);
var key = Me();
Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(42)));
// now slightly different func to show delta; should use cached value initially
Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(96)));
// now remove and repeat - should get updated value
await cache.RemoveAsync([key, "unrelated"]);
Assert.Equal(96, await cache.GetOrCreateAsync(key, _ => new ValueTask<int>(96)));
}
private static string Me([CallerMemberName] string caller = "") => caller;
}

Просмотреть файл

@ -1,261 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Buffers;
using System.Collections;
using System.Runtime.CompilerServices;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Hybrid.Internal;
using Microsoft.Extensions.Caching.Memory;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using Xunit.Abstractions;
namespace Microsoft.Extensions.Caching.Hybrid.Tests;
public class L2Tests(ITestOutputHelper Log)
{
class Options<T>(T Value) : IOptions<T> where T : class
{
T IOptions<T>.Value => Value;
}
ServiceProvider GetDefaultCache(bool buffers, out DefaultHybridCache cache)
{
var services = new ServiceCollection();
var localCacheOptions = new Options<MemoryDistributedCacheOptions>(new());
var localCache = new MemoryDistributedCache(localCacheOptions);
services.AddSingleton<IDistributedCache>(buffers ? new BufferLoggingCache(Log, localCache) : new LoggingCache(Log, localCache));
services.AddHybridCache();
var provider = services.BuildServiceProvider();
cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
return provider;
}
static string CreateString(bool work = false)
{
Assert.True(work, "we didn't expect this to be invoked");
return Guid.NewGuid().ToString();
}
private static readonly HybridCacheEntryOptions Expiry = new() { Expiration = TimeSpan.FromMinutes(3.5) };
private static readonly HybridCacheEntryOptions ExpiryNoL1 = new() { Flags = HybridCacheEntryFlags.DisableLocalCache, Expiration = TimeSpan.FromMinutes(3.5) };
[Theory]
[InlineData(true)]
[InlineData(false)]
public async Task AssertL2Operations_Immutable(bool buffers)
{
using var provider = GetDefaultCache(buffers, out var cache);
var backend = Assert.IsAssignableFrom<LoggingCache>(cache.BackendCache);
Log.WriteLine("Inventing key...");
var s = await cache.GetOrCreateAsync(Me(), ct => new ValueTask<string>(CreateString(true)));
Assert.Equal(2, backend.OpCount); // GET, SET
Log.WriteLine("Reading with L1...");
for (var i = 0; i < 5; i++)
{
var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask<string>(CreateString()));
Assert.Equal(s, x);
Assert.Same(s, x);
}
Assert.Equal(2, backend.OpCount); // shouldn't be hit
Log.WriteLine("Reading without L1...");
for (var i = 0; i < 5; i++)
{
var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask<string>(CreateString()), ExpiryNoL1);
Assert.Equal(s, x);
Assert.NotSame(s, x);
}
Assert.Equal(7, backend.OpCount); // should be read every time
Log.WriteLine("Setting value directly");
s = CreateString(true);
await cache.SetAsync(Me(), s);
for (var i = 0; i < 5; i++)
{
var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask<string>(CreateString()));
Assert.Equal(s, x);
Assert.Same(s, x);
}
Assert.Equal(8, backend.OpCount); // SET
Log.WriteLine("Removing key...");
await cache.RemoveAsync(Me());
Assert.Equal(9, backend.OpCount); // DEL
Log.WriteLine("Fetching new...");
var t = await cache.GetOrCreateAsync(Me(), ct => new ValueTask<string>(CreateString(true)));
Assert.NotEqual(s, t);
Assert.Equal(11, backend.OpCount); // GET, SET
}
public sealed class Foo
{
public string Value { get; set; } = "";
}
[Theory]
[InlineData(true)]
[InlineData(false)]
public async Task AssertL2Operations_Mutable(bool buffers)
{
using var provider = GetDefaultCache(buffers, out var cache);
var backend = Assert.IsAssignableFrom<LoggingCache>(cache.BackendCache);
Log.WriteLine("Inventing key...");
var s = await cache.GetOrCreateAsync(Me(), ct => new ValueTask<Foo>(new Foo { Value = CreateString(true) }), Expiry);
Assert.Equal(2, backend.OpCount); // GET, SET
Log.WriteLine("Reading with L1...");
for (var i = 0; i < 5; i++)
{
var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask<Foo>(new Foo { Value = CreateString() }), Expiry);
Assert.Equal(s.Value, x.Value);
Assert.NotSame(s, x);
}
Assert.Equal(2, backend.OpCount); // shouldn't be hit
Log.WriteLine("Reading without L1...");
for (var i = 0; i < 5; i++)
{
var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask<Foo>(new Foo { Value = CreateString() }), ExpiryNoL1);
Assert.Equal(s.Value, x.Value);
Assert.NotSame(s, x);
}
Assert.Equal(7, backend.OpCount); // should be read every time
Log.WriteLine("Setting value directly");
s = new Foo { Value = CreateString(true) };
await cache.SetAsync(Me(), s);
for (var i = 0; i < 5; i++)
{
var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask<Foo>(new Foo { Value = CreateString() }), Expiry);
Assert.Equal(s.Value, x.Value);
Assert.NotSame(s, x);
}
Assert.Equal(8, backend.OpCount); // SET
Log.WriteLine("Removing key...");
await cache.RemoveAsync(Me());
Assert.Equal(9, backend.OpCount); // DEL
Log.WriteLine("Fetching new...");
var t = await cache.GetOrCreateAsync(Me(), ct => new ValueTask<Foo>(new Foo { Value = CreateString(true) }), Expiry);
Assert.NotEqual(s.Value, t.Value);
Assert.Equal(11, backend.OpCount); // GET, SET
}
class BufferLoggingCache : LoggingCache, IBufferDistributedCache
{
public BufferLoggingCache(ITestOutputHelper log, IDistributedCache tail) : base(log, tail) { }
void IBufferDistributedCache.Set(string key, ReadOnlySequence<byte> value, DistributedCacheEntryOptions options)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"Set (ROS-byte): {key}");
Tail.Set(key, value.ToArray(), options);
}
ValueTask IBufferDistributedCache.SetAsync(string key, ReadOnlySequence<byte> value, DistributedCacheEntryOptions options, CancellationToken token)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"SetAsync (ROS-byte): {key}");
return new(Tail.SetAsync(key, value.ToArray(), options, token));
}
bool IBufferDistributedCache.TryGet(string key, IBufferWriter<byte> destination)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"TryGet: {key}");
var buffer = Tail.Get(key);
if (buffer is null)
{
return false;
}
destination.Write(buffer);
return true;
}
async ValueTask<bool> IBufferDistributedCache.TryGetAsync(string key, IBufferWriter<byte> destination, CancellationToken token)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"TryGetAsync: {key}");
var buffer = await Tail.GetAsync(key, token);
if (buffer is null)
{
return false;
}
destination.Write(buffer);
return true;
}
}
class LoggingCache(ITestOutputHelper log, IDistributedCache tail) : IDistributedCache
{
protected ITestOutputHelper Log => log;
protected IDistributedCache Tail => tail;
protected int opcount;
public int OpCount => Volatile.Read(ref opcount);
byte[]? IDistributedCache.Get(string key)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"Get: {key}");
return Tail.Get(key);
}
Task<byte[]?> IDistributedCache.GetAsync(string key, CancellationToken token)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"GetAsync: {key}");
return Tail.GetAsync(key, token);
}
void IDistributedCache.Refresh(string key)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"Refresh: {key}");
Tail.Refresh(key);
}
Task IDistributedCache.RefreshAsync(string key, CancellationToken token)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"RefreshAsync: {key}");
return Tail.RefreshAsync(key, token);
}
void IDistributedCache.Remove(string key)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"Remove: {key}");
Tail.Remove(key);
}
Task IDistributedCache.RemoveAsync(string key, CancellationToken token)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"RemoveAsync: {key}");
return Tail.RemoveAsync(key, token);
}
void IDistributedCache.Set(string key, byte[] value, DistributedCacheEntryOptions options)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"Set (byte[]): {key}");
Tail.Set(key, value, options);
}
Task IDistributedCache.SetAsync(string key, byte[] value, DistributedCacheEntryOptions options, CancellationToken token)
{
Interlocked.Increment(ref opcount);
Log.WriteLine($"SetAsync (byte[]): {key}");
return Tail.SetAsync(key, value, options, token);
}
}
private static string Me([CallerMemberName] string caller = "") => caller;
}

Просмотреть файл

@ -1,23 +0,0 @@
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFrameworks>$(DefaultNetCoreTargetFramework);$(DefaultNetFxTargetFramework)</TargetFrameworks>
<ImplicitUsings>enable</ImplicitUsings>
<Nullable>enable</Nullable>
</PropertyGroup>
<ItemGroup>
<Reference Include="Microsoft.Extensions.Caching.Abstractions" />
<Reference Include="Microsoft.Extensions.Caching.Hybrid" />
<Reference Include="Microsoft.Extensions.Configuration.Json" />
<Reference Include="Microsoft.Extensions.Caching.StackExchangeRedis" />
<Reference Include="Microsoft.Extensions.Caching.SqlServer" />
</ItemGroup>
<ItemGroup>
<None Update="BasicConfig.json">
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</None>
</ItemGroup>
</Project>

Просмотреть файл

@ -1,103 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Extensions.Caching.Hybrid.Internal;
using Microsoft.Extensions.Caching.StackExchangeRedis;
using Microsoft.Extensions.DependencyInjection;
using StackExchange.Redis;
using Xunit.Abstractions;
namespace Microsoft.Extensions.Caching.Hybrid.Tests;
public sealed class RedisFixture : IDisposable
{
private ConnectionMultiplexer? _muxer;
private Task<IConnectionMultiplexer?>? _sharedConnect;
public Task<IConnectionMultiplexer?> ConnectAsync() => _sharedConnect ??= DoConnectAsync();
public void Dispose() => _muxer?.Dispose();
async Task<IConnectionMultiplexer?> DoConnectAsync()
{
try
{
_muxer = await ConnectionMultiplexer.ConnectAsync("127.0.0.1:6379");
await _muxer.GetDatabase().PingAsync();
return _muxer;
}
catch
{
return null;
}
}
}
public class RedisTests : DistributedCacheTests, IClassFixture<RedisFixture>
{
private readonly RedisFixture _fixture;
public RedisTests(RedisFixture fixture, ITestOutputHelper log) : base(log) => _fixture = fixture;
protected override bool CustomClockSupported => false;
protected override async ValueTask ConfigureAsync(IServiceCollection services)
{
var redis = await _fixture.ConnectAsync();
if (redis is null)
{
Log.WriteLine("Redis is not available");
return; // inconclusive
}
Log.WriteLine("Redis is available");
services.AddSingleton<IConnectionMultiplexer>(redis);
services.AddStackExchangeRedisCache(options =>
{
options.ConnectionMultiplexerFactory = () => Task.FromResult(redis);
});
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task BasicUsage(bool useBuffers)
{
var services = new ServiceCollection();
await ConfigureAsync(services);
services.AddHybridCache();
var provider = services.BuildServiceProvider(); // not "using" - that will tear down our redis; use the fixture for that
var cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
if (cache.BackendCache is null)
{
Log.WriteLine("Backend cache not available; inconclusive");
return;
}
Assert.IsAssignableFrom<RedisCache>(cache.BackendCache);
if (!useBuffers) // force byte[] mode
{
cache.DebugRemoveFeatures(DefaultHybridCache.CacheFeatures.BackendBuffers);
}
Log.WriteLine($"features: {cache.GetFeatures()}");
var key = Me();
var redis = provider.GetRequiredService<IConnectionMultiplexer>();
await redis.GetDatabase().KeyDeleteAsync(key); // start from known state
Assert.False(await redis.GetDatabase().KeyExistsAsync(key));
var count = 0;
for (var i = 0; i < 10; i++)
{
await cache.GetOrCreateAsync<Guid>(key, _ =>
{
Interlocked.Increment(ref count);
return new(Guid.NewGuid());
});
}
Assert.Equal(1, count);
await Task.Delay(500); // the L2 write continues in the background; give it a chance
var ttl = await redis.GetDatabase().KeyTimeToLiveAsync(key);
Log.WriteLine($"ttl: {ttl}");
Assert.NotNull(ttl);
}
}

Просмотреть файл

@ -1,195 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.ComponentModel;
using System.Text.Json;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.DependencyInjection;
namespace Microsoft.Extensions.Caching.Hybrid.Tests;
public class SampleUsage
{
[Fact]
public async Task DistributedCacheWorks()
{
var services = new ServiceCollection();
services.AddDistributedMemoryCache();
services.AddTransient<SomeDCService>();
using var provider = services.BuildServiceProvider();
var obj = provider.GetRequiredService<SomeDCService>();
string name = "abc";
int id = 42;
var x = await obj.GetSomeInformationAsync(name, id);
var y = await obj.GetSomeInformationAsync(name, id);
Assert.NotSame(x, y);
Assert.Equal(id, x.Id);
Assert.Equal(name, x.Name);
Assert.Equal(id, y.Id);
Assert.Equal(name, y.Name);
}
[Fact]
public async Task HybridCacheWorks()
{
var services = new ServiceCollection();
services.AddHybridCache();
services.AddTransient<SomeHCService>();
using var provider = services.BuildServiceProvider();
var obj = provider.GetRequiredService<SomeHCService>();
string name = "abc";
int id = 42;
var x = await obj.GetSomeInformationAsync(name, id);
var y = await obj.GetSomeInformationAsync(name, id);
Assert.NotSame(x, y);
Assert.Equal(id, x.Id);
Assert.Equal(name, x.Name);
Assert.Equal(id, y.Id);
Assert.Equal(name, y.Name);
}
[Fact]
public async Task HybridCacheNoCaptureWorks()
{
var services = new ServiceCollection();
services.AddHybridCache();
services.AddTransient<SomeHCServiceNoCapture>();
using var provider = services.BuildServiceProvider();
var obj = provider.GetRequiredService<SomeHCServiceNoCapture>();
string name = "abc";
int id = 42;
var x = await obj.GetSomeInformationAsync(name, id);
var y = await obj.GetSomeInformationAsync(name, id);
Assert.NotSame(x, y);
Assert.Equal(id, x.Id);
Assert.Equal(name, x.Name);
Assert.Equal(id, y.Id);
Assert.Equal(name, y.Name);
}
[Fact]
public async Task HybridCacheNoCaptureObjReuseWorks()
{
var services = new ServiceCollection();
services.AddHybridCache();
services.AddTransient<SomeHCServiceNoCaptureObjReuse>();
using var provider = services.BuildServiceProvider();
var obj = provider.GetRequiredService<SomeHCServiceNoCaptureObjReuse>();
string name = "abc";
int id = 42;
var x = await obj.GetSomeInformationAsync(name, id);
var y = await obj.GetSomeInformationAsync(name, id);
Assert.Same(x, y);
Assert.Equal(id, x.Id);
Assert.Equal(name, x.Name);
}
public class SomeDCService(IDistributedCache cache)
{
public async Task<SomeInformation> GetSomeInformationAsync(string name, int id, CancellationToken token = default)
{
var key = $"someinfo:{name}:{id}"; // unique key for this combination
var bytes = await cache.GetAsync(key, token); // try to get from cache
SomeInformation info;
if (bytes is null)
{
// cache miss; get the data from the real source
info = await SomeExpensiveOperationAsync(name, id, token);
// serialize and cache it
bytes = SomeSerializer.Serialize(info);
await cache.SetAsync(key, bytes, token);
}
else
{
// cache hit; deserialize it
info = SomeSerializer.Deserialize<SomeInformation>(bytes);
}
return info;
}
}
public class SomeHCService(HybridCache cache)
{
public async Task<SomeInformation> GetSomeInformationAsync(string name, int id, CancellationToken token = default)
{
return await cache.GetOrCreateAsync(
$"someinfo:{name}:{id}", // unique key for this combination
async ct => await SomeExpensiveOperationAsync(name, id, ct),
cancellationToken: token
);
}
}
// this is the work we're trying to cache
private static Task<SomeInformation> SomeExpensiveOperationAsync(string name, int id,
CancellationToken token = default)
{
return Task.FromResult(new SomeInformation { Id = id, Name = name });
}
private static Task<SomeInformationReuse> SomeExpensiveOperationReuseAsync(string name, int id,
CancellationToken token = default)
{
return Task.FromResult(new SomeInformationReuse { Id = id, Name = name });
}
public class SomeHCServiceNoCapture(HybridCache cache)
{
public async Task<SomeInformation> GetSomeInformationAsync(string name, int id, CancellationToken token = default)
{
return await cache.GetOrCreateAsync(
$"someinfo:{name}:{id}", // unique key for this combination
(name, id), // all of the state we need for the final call, if needed
static async (state, token) =>
await SomeExpensiveOperationAsync(state.name, state.id, token),
cancellationToken: token
);
}
}
public class SomeHCServiceNoCaptureObjReuse(HybridCache cache, CancellationToken token = default)
{
public async Task<SomeInformationReuse> GetSomeInformationAsync(string name, int id)
{
return await cache.GetOrCreateAsync(
$"someinfo:{name}:{id}", // unique key for this combination
(name, id), // all of the state we need for the final call, if needed
static async (state, token) =>
await SomeExpensiveOperationReuseAsync(state.name, state.id, token),
cancellationToken: token
);
}
}
static class SomeSerializer
{
internal static T Deserialize<T>(byte[] bytes)
{
return JsonSerializer.Deserialize<T>(bytes)!;
}
internal static byte[] Serialize<T>(T info)
{
using var ms = new MemoryStream();
JsonSerializer.Serialize(ms, info);
return ms.ToArray();
}
}
public class SomeInformation
{
public int Id { get; set; }
public string? Name { get; set; }
}
[ImmutableObject(true)]
public sealed class SomeInformationReuse
{
public int Id { get; set; }
public string? Name { get; set; }
}
}

Просмотреть файл

@ -1,234 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Buffers;
using System.Runtime.CompilerServices;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Hybrid.Internal;
using Microsoft.Extensions.Caching.Memory;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Configuration.Json;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
#pragma warning disable CS1998 // Async method lacks 'await' operators and will run synchronously
#pragma warning disable CS8769 // Nullability of reference types in type of parameter doesn't match implemented member (possibly because of nullability attributes).
namespace Microsoft.Extensions.Caching.Hybrid.Tests;
public class ServiceConstructionTests
{
[Fact]
public void CanCreateDefaultService()
{
var services = new ServiceCollection();
services.AddHybridCache();
using var provider = services.BuildServiceProvider();
Assert.IsType<DefaultHybridCache>(provider.GetService<HybridCache>());
}
[Fact]
public void CanCreateServiceWithManualOptions()
{
var services = new ServiceCollection();
services.AddHybridCache(options =>
{
options.MaximumKeyLength = 937;
options.DefaultEntryOptions = new() { Expiration = TimeSpan.FromSeconds(120), Flags = HybridCacheEntryFlags.DisableLocalCacheRead };
});
using var provider = services.BuildServiceProvider();
var obj = Assert.IsType<DefaultHybridCache>(provider.GetService<HybridCache>());
var options = obj.Options;
Assert.Equal(937, options.MaximumKeyLength);
var defaults = options.DefaultEntryOptions;
Assert.NotNull(defaults);
Assert.Equal(TimeSpan.FromSeconds(120), defaults.Expiration);
Assert.Equal(HybridCacheEntryFlags.DisableLocalCacheRead, defaults.Flags);
Assert.Null(defaults.LocalCacheExpiration); // wasn't specified
}
[Fact]
public void CanParseOptions_NoEntryOptions()
{
var source = new JsonConfigurationSource { Path = "BasicConfig.json" };
var configBuilder = new ConfigurationBuilder { Sources = { source } };
var config = configBuilder.Build();
var options = new HybridCacheOptions();
ConfigurationBinder.Bind(config, "no_entry_options", options);
Assert.Equal(937, options.MaximumKeyLength);
Assert.Null(options.DefaultEntryOptions);
}
[Fact]
public void CanParseOptions_WithEntryOptions() // in particular, check we can parse the timespan and [Flags] enums
{
var source = new JsonConfigurationSource { Path = "BasicConfig.json" };
var configBuilder = new ConfigurationBuilder { Sources = { source } };
var config = configBuilder.Build();
var options = new HybridCacheOptions();
ConfigurationBinder.Bind(config, "with_entry_options", options);
Assert.Equal(937, options.MaximumKeyLength);
var defaults = options.DefaultEntryOptions;
Assert.NotNull(defaults);
Assert.Equal(HybridCacheEntryFlags.DisableCompression | HybridCacheEntryFlags.DisableLocalCacheRead, defaults.Flags);
Assert.Equal(TimeSpan.FromSeconds(120), defaults.LocalCacheExpiration);
Assert.Null(defaults.Expiration); // wasn't specified
}
[Fact]
public async Task BasicStatelessUsage()
{
var services = new ServiceCollection();
services.AddHybridCache();
using var provider = services.BuildServiceProvider();
var cache = provider.GetRequiredService<HybridCache>();
var expected = Guid.NewGuid().ToString();
var actual = await cache.GetOrCreateAsync(Me(), async _ => expected);
Assert.Equal(expected, actual);
}
[Fact]
public async Task BasicStatefulUsage()
{
var services = new ServiceCollection();
services.AddHybridCache();
using var provider = services.BuildServiceProvider();
var cache = provider.GetRequiredService<HybridCache>();
var expected = Guid.NewGuid().ToString();
var actual = await cache.GetOrCreateAsync(Me(), expected, async (state, _) => state);
Assert.Equal(expected, actual);
}
[Fact]
public void DefaultSerializerConfiguration()
{
var services = new ServiceCollection();
services.AddHybridCache();
using var provider = services.BuildServiceProvider();
var cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
Assert.IsType<InbuiltTypeSerializer>(cache.GetSerializer<string>());
Assert.IsType<InbuiltTypeSerializer>(cache.GetSerializer<byte[]>());
Assert.IsType<DefaultJsonSerializerFactory.DefaultJsonSerializer<Customer>>(cache.GetSerializer<Customer>());
Assert.IsType<DefaultJsonSerializerFactory.DefaultJsonSerializer<Order>>(cache.GetSerializer<Order>());
}
[Fact]
public void CustomSerializerConfiguration()
{
var services = new ServiceCollection();
services.AddHybridCache().AddSerializer<Customer, CustomerSerializer>();
using var provider = services.BuildServiceProvider();
var cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
Assert.IsType<CustomerSerializer>(cache.GetSerializer<Customer>());
Assert.IsType<DefaultJsonSerializerFactory.DefaultJsonSerializer<Order>>(cache.GetSerializer<Order>());
}
[Fact]
public void CustomSerializerFactoryConfiguration()
{
var services = new ServiceCollection();
services.AddHybridCache().AddSerializerFactory<CustomFactory>();
using var provider = services.BuildServiceProvider();
var cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
Assert.IsType<CustomerSerializer>(cache.GetSerializer<Customer>());
Assert.IsType<DefaultJsonSerializerFactory.DefaultJsonSerializer<Order>>(cache.GetSerializer<Order>());
}
[Theory]
[InlineData(true)]
[InlineData(false)]
public void DefaultMemoryDistributedCacheIsIgnored(bool manual)
{
var services = new ServiceCollection();
if (manual)
{
services.AddSingleton<IDistributedCache, MemoryDistributedCache>();
}
else
{
services.AddDistributedMemoryCache();
}
services.AddHybridCache();
using var provider = services.BuildServiceProvider();
var cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
Assert.Null(cache.BackendCache);
}
[Fact]
public void SubclassMemoryDistributedCacheIsNotIgnored()
{
var services = new ServiceCollection();
services.AddSingleton<IDistributedCache, CustomMemoryDistributedCache>();
services.AddHybridCache();
using var provider = services.BuildServiceProvider();
var cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
Assert.NotNull(cache.BackendCache);
}
[Theory]
[InlineData(true)]
[InlineData(false)]
public void SubclassMemoryCacheIsNotIgnored(bool manual)
{
var services = new ServiceCollection();
if (manual)
{
services.AddSingleton<IDistributedCache, MemoryDistributedCache>();
}
else
{
services.AddDistributedMemoryCache();
}
services.AddSingleton<IMemoryCache, CustomMemoryCache>();
services.AddHybridCache();
using var provider = services.BuildServiceProvider();
var cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
Assert.NotNull(cache.BackendCache);
}
class CustomMemoryCache : MemoryCache
{
public CustomMemoryCache(IOptions<MemoryCacheOptions> options) : base(options) { }
public CustomMemoryCache(IOptions<MemoryCacheOptions> options, ILoggerFactory loggerFactory) : base(options, loggerFactory) { }
}
class CustomMemoryDistributedCache : MemoryDistributedCache
{
public CustomMemoryDistributedCache(IOptions<MemoryDistributedCacheOptions> options) : base(options) { }
public CustomMemoryDistributedCache(IOptions<MemoryDistributedCacheOptions> options, ILoggerFactory loggerFactory) : base(options, loggerFactory) { }
}
class Customer { }
class Order { }
class CustomerSerializer : IHybridCacheSerializer<Customer>
{
Customer IHybridCacheSerializer<Customer>.Deserialize(ReadOnlySequence<byte> source) => throw new NotImplementedException();
void IHybridCacheSerializer<Customer>.Serialize(Customer value, IBufferWriter<byte> target) => throw new NotImplementedException();
}
class CustomFactory : IHybridCacheSerializerFactory
{
bool IHybridCacheSerializerFactory.TryCreateSerializer<T>(out IHybridCacheSerializer<T>? serializer)
{
if (typeof(T) == typeof(Customer))
{
serializer = (IHybridCacheSerializer<T>)new CustomerSerializer();
return true;
}
serializer = null;
return false;
}
}
private static string Me([CallerMemberName] string caller = "") => caller;
}

Просмотреть файл

@ -1,46 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using Microsoft.Data.SqlClient;
using Microsoft.Extensions.DependencyInjection;
using Xunit.Abstractions;
namespace Microsoft.Extensions.Caching.Hybrid.Tests;
public class SqlServerTests : DistributedCacheTests
{
public SqlServerTests(ITestOutputHelper log) : base(log) { }
protected override bool CustomClockSupported => true;
protected override async ValueTask ConfigureAsync(IServiceCollection services)
{
// create a local DB named CacheBench, then
// dotnet tool install --global dotnet-sql-cache
// dotnet sql-cache create "Data Source=.;Initial Catalog=CacheBench;Integrated Security=True;Trust Server Certificate=True" dbo BenchmarkCache
const string ConnectionString = "Data Source=.;Initial Catalog=CacheBench;Integrated Security=True;Trust Server Certificate=True";
try
{
using var conn = new SqlConnection(ConnectionString);
using var cmd = conn.CreateCommand();
cmd.CommandText = "truncate table dbo.BenchmarkCache";
await conn.OpenAsync();
await cmd.ExecuteNonQueryAsync();
// if that worked: we should be fine
services.AddDistributedSqlServerCache(options =>
{
options.SchemaName = "dbo";
options.TableName = "BenchmarkCache";
options.ConnectionString = ConnectionString;
options.SystemClock = Clock;
});
}
catch (Exception ex)
{
Log.WriteLine(ex.Message);
}
}
}

Просмотреть файл

@ -1,414 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading.Tasks;
using Microsoft.AspNetCore.InternalTesting;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Hybrid.Internal;
using Microsoft.Extensions.Caching.Memory;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.DependencyInjection.Extensions;
namespace Microsoft.Extensions.Caching.Hybrid.Tests;
public class StampedeTests
{
static ServiceProvider GetDefaultCache(out DefaultHybridCache cache)
{
var services = new ServiceCollection();
services.AddSingleton<IDistributedCache, InvalidCache>();
services.AddSingleton<IMemoryCache, InvalidCache>();
services.AddHybridCache(options =>
{
options.DefaultEntryOptions = new()
{
Flags = HybridCacheEntryFlags.DisableDistributedCache | HybridCacheEntryFlags.DisableLocalCache
};
});
var provider = services.BuildServiceProvider();
cache = Assert.IsType<DefaultHybridCache>(provider.GetRequiredService<HybridCache>());
return provider;
}
public sealed class InvalidCache : IDistributedCache, IMemoryCache
{
void IDisposable.Dispose() { }
ICacheEntry IMemoryCache.CreateEntry(object key) => throw new NotSupportedException("Intentionally not provided");
byte[]? IDistributedCache.Get(string key) => throw new NotSupportedException("Intentionally not provided");
Task<byte[]?> IDistributedCache.GetAsync(string key, CancellationToken token) => throw new NotSupportedException("Intentionally not provided");
void IDistributedCache.Refresh(string key) => throw new NotSupportedException("Intentionally not provided");
Task IDistributedCache.RefreshAsync(string key, CancellationToken token) => throw new NotSupportedException("Intentionally not provided");
void IDistributedCache.Remove(string key) => throw new NotSupportedException("Intentionally not provided");
void IMemoryCache.Remove(object key) => throw new NotSupportedException("Intentionally not provided");
Task IDistributedCache.RemoveAsync(string key, CancellationToken token) => throw new NotSupportedException("Intentionally not provided");
void IDistributedCache.Set(string key, byte[] value, DistributedCacheEntryOptions options) => throw new NotSupportedException("Intentionally not provided");
Task IDistributedCache.SetAsync(string key, byte[] value, DistributedCacheEntryOptions options, CancellationToken token) => throw new NotSupportedException("Intentionally not provided");
bool IMemoryCache.TryGetValue(object key, out object? value) => throw new NotSupportedException("Intentionally not provided");
}
[Theory]
[InlineData(1, false)]
[InlineData(1, true)]
[InlineData(10, false)]
[InlineData(10, true)]
public async Task MultipleCallsShareExecution_NoCancellation(int callerCount, bool canBeCanceled)
{
using var scope = GetDefaultCache(out var cache);
using var semaphore = new SemaphoreSlim(0);
var token = canBeCanceled ? new CancellationTokenSource().Token : CancellationToken.None;
int executeCount = 0, cancelCount = 0;
var results = new Task<Guid>[callerCount];
for (var i = 0; i < callerCount; i++)
{
results[i] = cache.GetOrCreateAsync(Me(), async ct =>
{
using var reg = ct.Register(() => Interlocked.Increment(ref cancelCount));
if (!await semaphore.WaitAsync(5_000, CancellationToken.None))
{
throw new TimeoutException("Failed to activate");
}
Interlocked.Increment(ref executeCount);
ct.ThrowIfCancellationRequested(); // assert not cancelled
return Guid.NewGuid();
}, cancellationToken: token).AsTask();
}
Assert.Equal(callerCount, cache.DebugGetCallerCount(Me()));
// everyone is queued up; release the hounds and check
// that we all got the same result
Assert.Equal(0, Volatile.Read(ref executeCount));
Assert.Equal(0, Volatile.Read(ref cancelCount));
semaphore.Release();
var first = await results[0];
Assert.Equal(1, Volatile.Read(ref executeCount));
Assert.Equal(0, Volatile.Read(ref cancelCount));
foreach (var result in results)
{
Assert.Equal(first, await result);
}
Assert.Equal(1, Volatile.Read(ref executeCount));
Assert.Equal(0, Volatile.Read(ref cancelCount));
// and do it a second time; we expect different results
Volatile.Write(ref executeCount, 0);
for (var i = 0; i < callerCount; i++)
{
results[i] = cache.GetOrCreateAsync(Me(), async ct =>
{
using var reg = ct.Register(() => Interlocked.Increment(ref cancelCount));
if (!await semaphore.WaitAsync(5_000, CancellationToken.None))
{
throw new TimeoutException("Failed to activate");
}
Interlocked.Increment(ref executeCount);
ct.ThrowIfCancellationRequested(); // assert not cancelled
return Guid.NewGuid();
}, cancellationToken: token).AsTask();
}
Assert.Equal(callerCount, cache.DebugGetCallerCount(Me()));
// everyone is queued up; release the hounds and check
// that we all got the same result
Assert.Equal(0, Volatile.Read(ref executeCount));
Assert.Equal(0, Volatile.Read(ref cancelCount));
semaphore.Release();
var second = await results[0];
Assert.NotEqual(first, second);
Assert.Equal(1, Volatile.Read(ref executeCount));
Assert.Equal(0, Volatile.Read(ref cancelCount));
foreach (var result in results)
{
Assert.Equal(second, await result);
}
Assert.Equal(1, Volatile.Read(ref executeCount));
Assert.Equal(0, Volatile.Read(ref cancelCount));
}
[Theory]
[InlineData(1)]
[InlineData(10)]
public async Task MultipleCallsShareExecution_EveryoneCancels(int callerCount)
{
// what we want to prove here is that everyone ends up cancelling promptly by
// *their own* cancellation (not dependent on the shared task), and that
// the shared task becomes cancelled (which can be later)
using var scope = GetDefaultCache(out var cache);
using var semaphore = new SemaphoreSlim(0);
int executeCount = 0, cancelCount = 0;
var results = new Task<Guid>[callerCount];
var cancels = new CancellationTokenSource[callerCount];
for (var i = 0; i < callerCount; i++)
{
cancels[i] = new CancellationTokenSource();
results[i] = cache.GetOrCreateAsync(Me(), async ct =>
{
using var reg = ct.Register(() => Interlocked.Increment(ref cancelCount));
if (!await semaphore.WaitAsync(5_000, CancellationToken.None))
{
throw new TimeoutException("Failed to activate");
}
try
{
Interlocked.Increment(ref executeCount);
ct.ThrowIfCancellationRequested();
return Guid.NewGuid();
}
finally
{
semaphore.Release(); // handshake so we can check when available again
}
}, cancellationToken: cancels[i].Token).AsTask();
}
Assert.Equal(callerCount, cache.DebugGetCallerCount(Me()));
// everyone is queued up; release the hounds and check
// that we all got the same result
foreach (var cancel in cancels)
{
cancel.Cancel();
}
await Task.Delay(500); // cancellation happens on a worker; need to allow a moment
for (var i = 0; i < callerCount; i++)
{
var result = results[i];
// should have already cancelled, even though underlying task hasn't finished yet
Assert.Equal(TaskStatus.Canceled, result.Status);
var ex = Assert.Throws<OperationCanceledException>(() => result.GetAwaiter().GetResult());
Assert.Equal(cancels[i].Token, ex.CancellationToken); // each gets the correct blame
}
Assert.Equal(0, Volatile.Read(ref executeCount));
semaphore.Release();
if (!await semaphore.WaitAsync(5_000)) // wait for underlying task to hand back to us
{
throw new TimeoutException("Didn't get handshake back from task");
}
Assert.Equal(1, Volatile.Read(ref executeCount));
Assert.Equal(1, Volatile.Read(ref cancelCount));
}
[Theory]
[InlineData(2, 0)]
[InlineData(2, 1)]
[InlineData(10, 0)]
[InlineData(10, 1)]
[InlineData(10, 7)]
[QuarantinedTest("https://github.com/dotnet/aspnetcore/issues/55474")]
public async Task MultipleCallsShareExecution_MostCancel(int callerCount, int remaining)
{
Assert.True(callerCount >= 2); // "most" is not "one"
// what we want to prove here is that everyone ends up cancelling promptly by
// *their own* cancellation (not dependent on the shared task), and that
// the shared task becomes cancelled (which can be later)
using var scope = GetDefaultCache(out var cache);
using var semaphore = new SemaphoreSlim(0);
int executeCount = 0, cancelCount = 0;
var results = new Task<Guid>[callerCount];
var cancels = new CancellationTokenSource[callerCount];
for (var i = 0; i < callerCount; i++)
{
cancels[i] = new CancellationTokenSource();
results[i] = cache.GetOrCreateAsync(Me(), async ct =>
{
using var reg = ct.Register(() => Interlocked.Increment(ref cancelCount));
if (!await semaphore.WaitAsync(5_000, CancellationToken.None))
{
throw new TimeoutException("Failed to activate");
}
try
{
Interlocked.Increment(ref executeCount);
ct.ThrowIfCancellationRequested();
return Guid.NewGuid();
}
finally
{
semaphore.Release(); // handshake so we can check when available again
}
}, cancellationToken: cancels[i].Token).AsTask();
}
Assert.Equal(callerCount, cache.DebugGetCallerCount(Me()));
// everyone is queued up; release the hounds and check
// that we all got the same result
for (var i = 0; i < callerCount; i++)
{
if (i != remaining)
{
cancels[i].Cancel();
}
}
await Task.Delay(500); // cancellation happens on a worker; need to allow a moment
for (var i = 0; i < callerCount; i++)
{
if (i != remaining)
{
var result = results[i];
// should have already cancelled, even though underlying task hasn't finished yet
Assert.Equal(TaskStatus.Canceled, result.Status);
var ex = Assert.Throws<OperationCanceledException>(() => result.GetAwaiter().GetResult());
Assert.Equal(cancels[i].Token, ex.CancellationToken); // each gets the correct blame
}
}
Assert.Equal(0, Volatile.Read(ref executeCount));
semaphore.Release();
if (!await semaphore.WaitAsync(5_000)) // wait for underlying task to hand back to us
{
throw new TimeoutException("Didn't get handshake back from task");
}
Assert.Equal(1, Volatile.Read(ref executeCount));
Assert.Equal(0, Volatile.Read(ref cancelCount)); // ran to completion
await results[remaining];
}
[Theory]
[InlineData(true)]
[InlineData(false)]
public async Task ImmutableTypesShareFinalTask(bool withCancelation)
{
var token = withCancelation ? new CancellationTokenSource().Token : CancellationToken.None;
using var scope = GetDefaultCache(out var cache);
using var semaphore = new SemaphoreSlim(0);
// note AsTask *in this scenario* fetches the underlying incomplete task
var first = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return Guid.NewGuid(); }, cancellationToken: token).AsTask();
var second = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return Guid.NewGuid(); }, cancellationToken: token).AsTask();
if (withCancelation)
{
Assert.NotSame(first, second);
}
else
{
Assert.Same(first, second);
}
semaphore.Release();
Assert.Equal(await first, await second);
}
[Theory]
[InlineData(true)]
[InlineData(false)]
public async Task ImmutableCustomTypesShareFinalTask(bool withCancelation)
{
var token = withCancelation ? new CancellationTokenSource().Token : CancellationToken.None;
using var scope = GetDefaultCache(out var cache);
using var semaphore = new SemaphoreSlim(0);
// AsTask *in this scenario* fetches the underlying incomplete task
var first = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return new Immutable(Guid.NewGuid()); }, cancellationToken: token).AsTask();
var second = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return new Immutable(Guid.NewGuid()); }, cancellationToken: token).AsTask();
if (withCancelation)
{
Assert.NotSame(first, second);
}
else
{
Assert.Same(first, second);
}
semaphore.Release();
var x = await first;
var y = await second;
Assert.Equal(x.Value, y.Value);
Assert.Same(x, y); // same instance regardless of whether the tasks were shared
}
[Theory]
[InlineData(true)]
[InlineData(false)]
public async Task MutableTypesNeverShareFinalTask(bool withCancelation)
{
var token = withCancelation ? new CancellationTokenSource().Token : CancellationToken.None;
using var scope = GetDefaultCache(out var cache);
using var semaphore = new SemaphoreSlim(0);
// AsTask *in this scenario* fetches the underlying incomplete task
var first = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return new Mutable(Guid.NewGuid()); }, cancellationToken: token).AsTask();
var second = cache.GetOrCreateAsync(Me(), async ct => { await semaphore.WaitAsync(CancellationToken.None); semaphore.Release(); return new Mutable(Guid.NewGuid()); }, cancellationToken: token).AsTask();
Assert.NotSame(first, second);
semaphore.Release();
var x = await first;
var y = await second;
Assert.Equal(x.Value, y.Value);
Assert.NotSame(x, y);
}
[Fact]
public void ValidatePartitioning()
{
// we just want to validate that key-level partitioning is
// happening to some degree, i.e. it isn't fundamentally broken
using var scope = GetDefaultCache(out var cache);
Dictionary<object, int> counts = [];
for(int i = 0; i < 1024; i++)
{
var key = new DefaultHybridCache.StampedeKey(Guid.NewGuid().ToString(), default);
var obj = cache.GetPartitionedSyncLock(in key);
if (!counts.TryGetValue(obj, out var count))
{
count = 0;
}
counts[obj] = count + 1;
}
// We just want to prove that we got 8 non-empty partitions.
// This is *technically* non-deterministic, but: we'd
// need to be having a very bad day for the math gods
// to conspire against us that badly - if this test
// starts failing, maybe buy a lottery ticket?
Assert.Equal(8, counts.Count);
foreach (var pair in counts)
{
// the *median* should be 128 here; let's
// not be aggressive about it, though
Assert.True(pair.Value > 16);
}
}
class Mutable(Guid value)
{
public Guid Value => value;
}
[ImmutableObject(true)]
public sealed class Immutable(Guid value)
{
public Guid Value => value;
}
private static string Me([CallerMemberName] string caller = "") => caller;
}

Просмотреть файл

@ -1,62 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Reflection;
using System.Text;
using System.Threading.Tasks;
using Microsoft.Extensions.Caching.Hybrid.Internal;
namespace Microsoft.Extensions.Caching.Hybrid.Tests;
public class TypeTests
{
[Theory]
[InlineData(typeof(string))]
[InlineData(typeof(int))] // primitive
[InlineData(typeof(int?))]
[InlineData(typeof(Guid))] // non-primitive but blittable
[InlineData(typeof(Guid?))]
[InlineData(typeof(SealedCustomClassAttribTrue))] // attrib says explicitly true, and sealed
[InlineData(typeof(CustomBlittableStruct))] // blittable, and we're copying each time
[InlineData(typeof(CustomNonBlittableStructAttribTrue))] // non-blittable, attrib says explicitly true
public void ImmutableTypes(Type type)
{
Assert.True((bool)typeof(DefaultHybridCache.ImmutableTypeCache<>).MakeGenericType(type)
.GetField(nameof(DefaultHybridCache.ImmutableTypeCache<string>.IsImmutable), BindingFlags.Static | BindingFlags.Public)!
.GetValue(null)!);
}
[Theory]
[InlineData(typeof(byte[]))]
[InlineData(typeof(string[]))]
[InlineData(typeof(object))]
[InlineData(typeof(CustomClassNoAttrib))] // no attrib, who knows?
[InlineData(typeof(CustomClassAttribFalse))] // attrib says explicitly no
[InlineData(typeof(CustomClassAttribTrue))] // attrib says explicitly true, but not sealed: we might have a sub-class
[InlineData(typeof(CustomNonBlittableStructNoAttrib))] // no attrib, who knows?
[InlineData(typeof(CustomNonBlittableStructAttribFalse))] // attrib says explicitly no
public void MutableTypes(Type type)
{
Assert.False((bool)typeof(DefaultHybridCache.ImmutableTypeCache<>).MakeGenericType(type)
.GetField(nameof(DefaultHybridCache.ImmutableTypeCache<string>.IsImmutable), BindingFlags.Static | BindingFlags.Public)!
.GetValue(null)!);
}
class CustomClassNoAttrib { }
[ImmutableObject(false)]
class CustomClassAttribFalse { }
[ImmutableObject(true)]
class CustomClassAttribTrue { }
[ImmutableObject(true)]
sealed class SealedCustomClassAttribTrue { }
struct CustomBlittableStruct(int x) { public int X => x; }
struct CustomNonBlittableStructNoAttrib(string x) { public string X => x; }
[ImmutableObject(false)]
struct CustomNonBlittableStructAttribFalse(string x) { public string X => x; }
[ImmutableObject(true)]
struct CustomNonBlittableStructAttribTrue(string x) { public string X => x; }
}

Просмотреть файл

@ -7,7 +7,6 @@ using System.Threading.Tasks;
using BenchmarkDotNet.Attributes;
using Microsoft.Data.SqlClient;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Hybrid.Internal;
using Microsoft.Extensions.DependencyInjection;
using StackExchange.Redis;
@ -125,23 +124,6 @@ public class DistributedCacheBenchmarks : IDisposable
return total;
}
[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public int GetSingleRandomBuffer()
{
var writer = RecyclableArrayBufferWriter<byte>.Create(int.MaxValue);
int total = 0;
for (int i = 0; i < OperationsPerInvoke; i++)
{
if (_backend.TryGet(RandomKey(), writer))
{
total += writer.CommittedBytes;
}
writer.ResetInPlace();
}
writer.Dispose();
return total;
}
[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public int GetConcurrentRandom()
{
@ -170,23 +152,6 @@ public class DistributedCacheBenchmarks : IDisposable
return total;
}
[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public async Task<int> GetSingleRandomBufferAsync()
{
var writer = RecyclableArrayBufferWriter<byte>.Create(int.MaxValue);
int total = 0;
for (int i = 0; i < OperationsPerInvoke; i++)
{
if (await _backend.TryGetAsync(RandomKey(), writer))
{
total += writer.CommittedBytes;
}
writer.ResetInPlace();
}
writer.Dispose();
return total;
}
[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public async Task<int> GetConcurrentRandomAsync()
{
@ -214,23 +179,6 @@ public class DistributedCacheBenchmarks : IDisposable
return total;
}
[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public int GetSingleFixedBuffer()
{
var writer = RecyclableArrayBufferWriter<byte>.Create(int.MaxValue);
int total = 0;
for (int i = 0; i < OperationsPerInvoke; i++)
{
if (_backend.TryGet(FixedKey(), writer))
{
total += writer.CommittedBytes;
}
writer.ResetInPlace();
}
writer.Dispose();
return total;
}
[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public int GetConcurrentFixed()
{
@ -258,23 +206,6 @@ public class DistributedCacheBenchmarks : IDisposable
return total;
}
[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public async Task<int> GetSingleFixedBufferAsync()
{
var writer = RecyclableArrayBufferWriter<byte>.Create(int.MaxValue);
int total = 0;
for (int i = 0; i < OperationsPerInvoke; i++)
{
if (await _backend.TryGetAsync(FixedKey(), writer))
{
total += writer.CommittedBytes;
}
writer.ResetInPlace();
}
writer.Dispose();
return total;
}
[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public async Task<int> GetConcurrentFixedAsync()
{

Просмотреть файл

@ -1,146 +0,0 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.ComponentModel;
using System.IO;
using System.Text.Json;
using System.Threading;
using System.Threading.Tasks;
using BenchmarkDotNet.Attributes;
using Microsoft.Extensions.Caching.Distributed;
using Microsoft.Extensions.Caching.Hybrid;
using Microsoft.Extensions.DependencyInjection;
using StackExchange.Redis;
namespace Microsoft.Extensions.Caching.Benchmarks;
[MemoryDiagnoser]
public class HybridCacheBenchmarks : IDisposable
{
private const string RedisConfigurationString = "127.0.0.1,AllowAdmin=true";
private readonly ConnectionMultiplexer _multiplexer;
private readonly IDistributedCache _distributed;
private readonly HybridCache _hybrid;
public HybridCacheBenchmarks()
{
_multiplexer = ConnectionMultiplexer.Connect(RedisConfigurationString);
var services = new ServiceCollection();
services.AddStackExchangeRedisCache(options =>
{
options.ConnectionMultiplexerFactory = () => Task.FromResult<IConnectionMultiplexer>(_multiplexer);
});
services.AddHybridCache();
var provider = services.BuildServiceProvider();
_distributed = provider.GetRequiredService<IDistributedCache>();
_distributed.Remove(KeyDirect);
_distributed.Remove(KeyHybrid);
_distributed.Remove(KeyHybridImmutable);
_hybrid = provider.GetRequiredService<HybridCache>();
}
private const string KeyDirect = "direct";
private const string KeyHybrid = "hybrid";
private const string KeyHybridImmutable = "I_brid"; // want 6 chars
public void Dispose() => _multiplexer.Dispose();
private const int CustomerId = 42;
private static readonly DistributedCacheEntryOptions OneHour = new DistributedCacheEntryOptions()
{
AbsoluteExpirationRelativeToNow = TimeSpan.FromHours(1)
};
// scenario: 100% (or as-near-as) cache hit rate
[Benchmark(Baseline = true)]
public async ValueTask<Customer> HitDistributedCache()
{
var bytes = await _distributed.GetAsync(KeyDirect);
if (bytes is null)
{
var cust = await Customer.GetAsync(CustomerId);
await _distributed.SetAsync(KeyDirect, Serialize(cust), OneHour);
return cust;
}
else
{
return Deserialize<Customer>(bytes)!;
}
}
// scenario: 100% (or as-near-as) cache hit rate
[Benchmark]
public ValueTask<Customer> HitCaptureHybridCache()
=> _hybrid.GetOrCreateAsync(KeyHybrid,
ct => Customer.GetAsync(CustomerId, ct));
// scenario: 100% (or as-near-as) cache hit rate
[Benchmark]
public ValueTask<Customer> HitHybridCache()
=> _hybrid.GetOrCreateAsync(KeyHybrid, CustomerId,
static (id, ct) => Customer.GetAsync(id, ct));
[Benchmark]
public ValueTask<ImmutableCustomer> HitHybridCacheImmutable() // scenario: 100% (or as-near-as) cache hit rate
=> _hybrid.GetOrCreateAsync(KeyHybridImmutable, CustomerId, static (id, ct) => ImmutableCustomer.GetAsync(id, ct));
private static byte[] Serialize<T>(T obj)
{
using var ms = new MemoryStream();
JsonSerializer.Serialize(ms, obj);
return ms.ToArray();
}
private static T? Deserialize<T>(byte[] bytes)
{
using var ms = new MemoryStream();
return JsonSerializer.Deserialize<T>(bytes);
}
public class Customer
{
public static ValueTask<Customer> GetAsync(int id, CancellationToken token = default)
=> new(new Customer
{
Id = id,
Name = "Random customer",
Region = 2,
Description = "Good for testing",
CreationDate = new DateTime(2024, 04, 17),
OrderValue = 123_456.789M
});
public int Id { get; set; }
public string? Name {get; set; }
public int Region { get; set; }
public string? Description { get; set; }
public DateTime CreationDate { get; set; }
public decimal OrderValue { get; set; }
}
[ImmutableObject(true)]
public sealed class ImmutableCustomer
{
public static ValueTask<ImmutableCustomer> GetAsync(int id, CancellationToken token = default)
=> new(new ImmutableCustomer
{
Id = id,
Name = "Random customer",
Region = 2,
Description = "Good for testing",
CreationDate = new DateTime(2024, 04, 17),
OrderValue = 123_456.789M
});
public int Id { get; init; }
public string? Name { get; init; }
public int Region { get; init; }
public string? Description { get; init; }
public DateTime CreationDate { get; init; }
public decimal OrderValue { get; init; }
}
}

Просмотреть файл

@ -12,13 +12,11 @@
<ItemGroup>
<Reference Include="BenchmarkDotNet" />
<Reference Include="Microsoft.Extensions.Caching.Hybrid" />
<Reference Include="Microsoft.Extensions.Caching.SqlServer" />
<Reference Include="Microsoft.Extensions.Caching.StackExchangeRedis" />
<Reference Include="Microsoft.Extensions.DependencyInjection.Abstractions" />
<Reference Include="Microsoft.Extensions.DependencyInjection" />
<Compile Include="..\..\..\Hybrid\src\Internal\RecyclableArrayBufferWriter.cs" LinkBase="Microsoft.Extensions.Caching.Hybrid" />
<!--<Compile Include="$(SharedSourceRoot)BenchmarkRunner\*.cs" />-->
<Compile Include="$(SharedSourceRoot)IsExternalInit.cs" LinkBase="Shared" />

Просмотреть файл

@ -6,17 +6,7 @@ using BenchmarkDotNet.Running;
using Microsoft.Extensions.Caching.Benchmarks;
#if DEBUG
// validation
using (var hc = new HybridCacheBenchmarks())
{
for (int i = 0; i < 10; i++)
{
Console.WriteLine((await hc.HitDistributedCache()).Name);
Console.WriteLine((await hc.HitHybridCache()).Name);
Console.WriteLine((await hc.HitHybridCacheImmutable()).Name);
}
}
Console.WriteLine("Release mode only");
/*
using (var obj = new DistributedCacheBenchmarks { PayloadSize = 11512, Sliding = true })
{