Merge pull request #101 from nayato/bufferpool

PooledByteBufferAllocator port, Byte Buffers improvements
This commit is contained in:
Max Gortman 2016-05-05 19:11:46 -07:00
Родитель 152b0375ae cef1295c3c
Коммит 45c8c70649
55 изменённых файлов: 7037 добавлений и 379 удалений

Просмотреть файл

@ -1,7 +1,7 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 14
VisualStudioVersion = 14.0.24720.0
VisualStudioVersion = 14.0.25123.0
MinimumVisualStudioVersion = 10.0.40219.1
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DotNetty.Common", "src\DotNetty.Common\DotNetty.Common.csproj", "{DE58FE41-5E99-44E5-86BC-FC9ED8761DAF}"
EndProject
@ -74,8 +74,8 @@ Global
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Debug|Any CPU.Build.0 = Debug|Any CPU
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Release|Any CPU.ActiveCfg = Release|Any CPU
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Release|Any CPU.Build.0 = Release|Any CPU
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Signed|Any CPU.ActiveCfg = Release|Any CPU
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Signed|Any CPU.Build.0 = Release|Any CPU
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Signed|Any CPU.ActiveCfg = Signed|Any CPU
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Signed|Any CPU.Build.0 = Signed|Any CPU
{8218C9EE-0A4A-432F-A12A-B54202F97B05}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{8218C9EE-0A4A-432F-A12A-B54202F97B05}.Debug|Any CPU.Build.0 = Debug|Any CPU
{8218C9EE-0A4A-432F-A12A-B54202F97B05}.Release|Any CPU.ActiveCfg = Release|Any CPU

Просмотреть файл

@ -2,8 +2,10 @@
<s:Boolean x:Key="/Default/CodeInspection/ExcludedFiles/FileMasksToSkip/=_002A_002Emin_002Ejs/@EntryIndexedValue">True</s:Boolean>
<s:Boolean x:Key="/Default/CodeInspection/Highlighting/IdentifierHighlightingEnabled/@EntryValue">True</s:Boolean>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=BuiltInTypeReferenceStyle/@EntryIndexedValue">WARNING</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertClosureToMethodGroup/@EntryIndexedValue">HINT</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertClosureToMethodGroup/@EntryIndexedValue">DO_NOT_SHOW</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertIfStatementToConditionalTernaryExpression/@EntryIndexedValue">HINT</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertMethodToExpressionBody/@EntryIndexedValue">SUGGESTION</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertToExpressionBodyWhenPossible/@EntryIndexedValue">SUGGESTION</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=IntroduceOptionalParameters_002ELocal/@EntryIndexedValue">DO_NOT_SHOW</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=LoopCanBeConvertedToQuery/@EntryIndexedValue">DO_NOT_SHOW</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=MemberCanBeMadeStatic_002ELocal/@EntryIndexedValue">HINT</s:String>
@ -19,6 +21,7 @@
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=SuggestVarOrType_005FBuiltInTypes/@EntryIndexedValue">WARNING</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=SuggestVarOrType_005FElsewhere/@EntryIndexedValue">WARNING</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=SuggestVarOrType_005FSimpleTypes/@EntryIndexedValue">WARNING</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ThreadStaticAtInstanceField/@EntryIndexedValue">ERROR</s:String>
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=UseObjectOrCollectionInitializer/@EntryIndexedValue">HINT</s:String>
<s:String x:Key="/Default/CodeStyle/CodeCleanup/Profiles/=Simple/@EntryIndexedValue">&lt;?xml version="1.0" encoding="utf-16"?&gt;&lt;Profile name="Simple"&gt;&lt;CSArrangeThisQualifier&gt;True&lt;/CSArrangeThisQualifier&gt;&lt;CSUseVar&gt;&lt;BehavourStyle&gt;CAN_CHANGE_TO_IMPLICIT&lt;/BehavourStyle&gt;&lt;LocalVariableStyle&gt;IMPLICIT_WHEN_INITIALIZER_HAS_TYPE&lt;/LocalVariableStyle&gt;&lt;ForeachVariableStyle&gt;IMPLICIT_EXCEPT_PRIMITIVE_TYPES&lt;/ForeachVariableStyle&gt;&lt;/CSUseVar&gt;&lt;CSUpdateFileHeader&gt;True&lt;/CSUpdateFileHeader&gt;&lt;CSOptimizeUsings&gt;&lt;OptimizeUsings&gt;True&lt;/OptimizeUsings&gt;&lt;EmbraceInRegion&gt;False&lt;/EmbraceInRegion&gt;&lt;RegionName&gt;&lt;/RegionName&gt;&lt;/CSOptimizeUsings&gt;&lt;CSReformatCode&gt;True&lt;/CSReformatCode&gt;&lt;StyleCop.Documentation&gt;&lt;SA1600ElementsMustBeDocumented&gt;False&lt;/SA1600ElementsMustBeDocumented&gt;&lt;SA1604ElementDocumentationMustHaveSummary&gt;False&lt;/SA1604ElementDocumentationMustHaveSummary&gt;&lt;SA1609PropertyDocumentationMustHaveValueDocumented&gt;False&lt;/SA1609PropertyDocumentationMustHaveValueDocumented&gt;&lt;SA1611ElementParametersMustBeDocumented&gt;False&lt;/SA1611ElementParametersMustBeDocumented&gt;&lt;SA1615ElementReturnValueMustBeDocumented&gt;False&lt;/SA1615ElementReturnValueMustBeDocumented&gt;&lt;SA1617VoidReturnValueMustNotBeDocumented&gt;False&lt;/SA1617VoidReturnValueMustNotBeDocumented&gt;&lt;SA1618GenericTypeParametersMustBeDocumented&gt;False&lt;/SA1618GenericTypeParametersMustBeDocumented&gt;&lt;SA1626SingleLineCommentsMustNotUseDocumentationStyleSlashes&gt;False&lt;/SA1626SingleLineCommentsMustNotUseDocumentationStyleSlashes&gt;&lt;SA1628DocumentationTextMustBeginWithACapitalLetter&gt;False&lt;/SA1628DocumentationTextMustBeginWithACapitalLetter&gt;&lt;SA1629DocumentationTextMustEndWithAPeriod&gt;False&lt;/SA1629DocumentationTextMustEndWithAPeriod&gt;&lt;SA1633SA1641UpdateFileHeader&gt;ReplaceAll&lt;/SA1633SA1641UpdateFileHeader&gt;&lt;SA1639FileHeaderMustHaveSummary&gt;False&lt;/SA1639FileHeaderMustHaveSummary&gt;&lt;SA1642ConstructorSummaryDocumentationMustBeginWithStandardText&gt;False&lt;/SA1642ConstructorSummaryDocumentationMustBeginWithStandardText&gt;&lt;SA1643DestructorSummaryDocumentationMustBeginWithStandardText&gt;False&lt;/SA1643DestructorSummaryDocumentationMustBeginWithStandardText&gt;&lt;SA1644DocumentationHeadersMustNotContainBlankLines&gt;False&lt;/SA1644DocumentationHeadersMustNotContainBlankLines&gt;&lt;/StyleCop.Documentation&gt;&lt;CSShortenReferences&gt;True&lt;/CSShortenReferences&gt;&lt;CSFixBuiltinTypeReferences&gt;True&lt;/CSFixBuiltinTypeReferences&gt;&lt;CSArrangeQualifiers&gt;True&lt;/CSArrangeQualifiers&gt;&lt;CSEnforceVarKeywordUsageSettings&gt;True&lt;/CSEnforceVarKeywordUsageSettings&gt;&lt;CSMakeFieldReadonly&gt;True&lt;/CSMakeFieldReadonly&gt;&lt;CSharpFormatDocComments&gt;True&lt;/CSharpFormatDocComments&gt;&lt;CSArrangeTypeModifiers&gt;True&lt;/CSArrangeTypeModifiers&gt;&lt;CSArrangeTypeMemberModifiers&gt;True&lt;/CSArrangeTypeMemberModifiers&gt;&lt;CSSortModifiers&gt;True&lt;/CSSortModifiers&gt;&lt;CSCodeStyleAttributes ArrangeTypeAccessModifier="True" ArrangeTypeMemberAccessModifier="True" SortModifiers="True" RemoveRedundantParentheses="False" AddMissingParentheses="True" ArrangeAttributes="False" /&gt;&lt;/Profile&gt;</s:String>
<s:String x:Key="/Default/CodeStyle/CodeCleanup/RecentlyUsedProfile/@EntryValue">Simple</s:String>

Просмотреть файл

@ -79,25 +79,13 @@ namespace DotNetty.Buffers
public virtual int MaxWritableBytes => this.MaxCapacity - this.WriterIndex;
public bool IsReadable()
{
return this.IsReadable(1);
}
public bool IsReadable() => this.IsReadable(1);
public bool IsReadable(int size)
{
return this.ReadableBytes >= size;
}
public bool IsReadable(int size) => this.ReadableBytes >= size;
public bool IsWritable()
{
return this.IsWritable(1);
}
public bool IsWritable() => this.IsWritable(1);
public bool IsWritable(int size)
{
return this.WritableBytes >= size;
}
public bool IsWritable(int size) => this.WritableBytes >= size;
public virtual IByteBuffer Clear()
{
@ -262,10 +250,7 @@ namespace DotNetty.Buffers
return Math.Min(newCapacity, maxCapacity);
}
public virtual bool GetBoolean(int index)
{
return this.GetByte(index) != 0;
}
public virtual bool GetBoolean(int index) => this.GetByte(index) != 0;
public virtual byte GetByte(int index)
{
@ -315,15 +300,9 @@ namespace DotNetty.Buffers
protected abstract long _GetLong(int index);
public virtual char GetChar(int index)
{
return Convert.ToChar(this.GetShort(index));
}
public virtual char GetChar(int index) => Convert.ToChar(this.GetShort(index));
public virtual double GetDouble(int index)
{
return BitConverter.Int64BitsToDouble(this.GetLong(index));
}
public virtual double GetDouble(int index) => BitConverter.Int64BitsToDouble(this.GetLong(index));
public virtual IByteBuffer GetBytes(int index, IByteBuffer destination)
{
@ -334,6 +313,7 @@ namespace DotNetty.Buffers
public virtual IByteBuffer GetBytes(int index, IByteBuffer destination, int length)
{
this.GetBytes(index, destination, destination.WriterIndex, length);
destination.SetWriterIndex(destination.WriterIndex + length);
return this;
}
@ -452,10 +432,7 @@ namespace DotNetty.Buffers
public abstract Task<int> SetBytesAsync(int index, Stream src, int length, CancellationToken cancellationToken);
public virtual bool ReadBoolean()
{
return this.ReadByte() != 0;
}
public virtual bool ReadBoolean() => this.ReadByte() != 0;
public virtual byte ReadByte()
{
@ -506,15 +483,9 @@ namespace DotNetty.Buffers
return v;
}
public virtual char ReadChar()
{
return (char)this.ReadShort();
}
public virtual char ReadChar() => (char)this.ReadShort();
public virtual double ReadDouble()
{
return BitConverter.Int64BitsToDouble(this.ReadLong());
}
public virtual double ReadDouble() => BitConverter.Int64BitsToDouble(this.ReadLong());
public IByteBuffer ReadBytes(int length)
{
@ -524,7 +495,7 @@ namespace DotNetty.Buffers
return Unpooled.Empty;
}
IByteBuffer buf = Unpooled.Buffer(length, this.MaxCapacity);
IByteBuffer buf = this.Allocator.Buffer(length, this.MaxCapacity);
buf.WriteBytes(this, this.ReaderIndex, length);
this.ReaderIndex += length;
return buf;
@ -592,6 +563,7 @@ namespace DotNetty.Buffers
public virtual IByteBuffer WriteByte(int value)
{
this.EnsureAccessible();
this.EnsureWritable(1);
this.SetByte(this.WriterIndex, value);
this.WriterIndex += 1;
@ -600,6 +572,7 @@ namespace DotNetty.Buffers
public virtual IByteBuffer WriteShort(int value)
{
this.EnsureAccessible();
this.EnsureWritable(2);
this._SetShort(this.WriterIndex, value);
this.WriterIndex += 2;
@ -617,6 +590,7 @@ namespace DotNetty.Buffers
public virtual IByteBuffer WriteInt(int value)
{
this.EnsureAccessible();
this.EnsureWritable(4);
this._SetInt(this.WriterIndex, value);
this.WriterIndex += 4;
@ -634,6 +608,7 @@ namespace DotNetty.Buffers
public virtual IByteBuffer WriteLong(long value)
{
this.EnsureAccessible();
this.EnsureWritable(8);
this._SetLong(this.WriterIndex, value);
this.WriterIndex += 8;
@ -710,10 +685,7 @@ namespace DotNetty.Buffers
this.SetWriterIndex(writerIndex + wrote);
}
public Task WriteBytesAsync(Stream stream, int length)
{
return this.WriteBytesAsync(stream, length, CancellationToken.None);
}
public Task WriteBytesAsync(Stream stream, int length) => this.WriteBytesAsync(stream, length, CancellationToken.None);
public abstract bool HasArray { get; }
@ -726,7 +698,7 @@ namespace DotNetty.Buffers
int readableBytes = this.ReadableBytes;
if (readableBytes == 0)
{
return ByteArrayExtensions.Empty;
return ArrayExtensions.ZeroBytes;
}
if (this.HasArray)
@ -739,10 +711,7 @@ namespace DotNetty.Buffers
return bytes;
}
public virtual IByteBuffer Duplicate()
{
return new DuplicatedByteBuffer(this);
}
public virtual IByteBuffer Duplicate() => new DuplicatedByteBuffer(this);
public abstract IByteBuffer Unwrap();
@ -767,10 +736,7 @@ namespace DotNetty.Buffers
/// Creates a new <see cref="SwappedByteBuffer" /> for this <see cref="IByteBuffer" /> instance.
/// </summary>
/// <returns>A <see cref="SwappedByteBuffer" /> for this buffer.</returns>
protected SwappedByteBuffer NewSwappedByteBuffer()
{
return new SwappedByteBuffer(this);
}
protected SwappedByteBuffer NewSwappedByteBuffer() => new SwappedByteBuffer(this);
protected void AdjustMarkers(int decrement)
{
@ -795,6 +761,51 @@ namespace DotNetty.Buffers
}
}
public override int GetHashCode() => ByteBufferUtil.HashCode(this);
public override bool Equals(object o) => this.Equals(o as IByteBuffer);
public bool Equals(IByteBuffer buffer)
{
if (ReferenceEquals(this, buffer))
{
return true;
}
if (buffer != null)
{
return ByteBufferUtil.Equals(this, buffer);
}
return false;
}
public int CompareTo(IByteBuffer that) => ByteBufferUtil.Compare(this, that);
public override string ToString()
{
if (this.ReferenceCount == 0)
{
return StringUtil.SimpleClassName(this) + "(freed)";
}
StringBuilder buf = new StringBuilder()
.Append(StringUtil.SimpleClassName(this))
.Append("(ridx: ").Append(this.ReaderIndex)
.Append(", widx: ").Append(this.WriterIndex)
.Append(", cap: ").Append(this.Capacity);
if (this.MaxCapacity != int.MaxValue)
{
buf.Append('/').Append(this.MaxCapacity);
}
IByteBuffer unwrapped = this.Unwrap();
if (unwrapped != null)
{
buf.Append(", unwrapped: ").Append(unwrapped);
}
buf.Append(')');
return buf.ToString();
}
protected void CheckIndex(int index)
{
this.EnsureAccessible();
@ -862,22 +873,13 @@ namespace DotNetty.Buffers
}
}
public IByteBuffer Copy()
{
return this.Copy(this.ReaderIndex, this.ReadableBytes);
}
public IByteBuffer Copy() => this.Copy(this.ReaderIndex, this.ReadableBytes);
public abstract IByteBuffer Copy(int index, int length);
public IByteBuffer Slice()
{
return this.Slice(this.ReaderIndex, this.ReadableBytes);
}
public IByteBuffer Slice() => this.Slice(this.ReaderIndex, this.ReadableBytes);
public virtual IByteBuffer Slice(int index, int length)
{
return new SlicedByteBuffer(this, index, length);
}
public virtual IByteBuffer Slice(int index, int length) => new SlicedByteBuffer(this, index, length);
public IByteBuffer ReadSlice(int length)
{
@ -905,15 +907,9 @@ namespace DotNetty.Buffers
this.markedReaderIndex = this.markedWriterIndex = 0;
}
public string ToString(Encoding encoding)
{
return this.ToString(this.ReaderIndex, this.ReadableBytes, encoding);
}
public string ToString(Encoding encoding) => this.ToString(this.ReaderIndex, this.ReadableBytes, encoding);
public string ToString(int index, int length, Encoding encoding)
{
return ByteBufferUtil.DecodeString(this, index, length, encoding);
}
public string ToString(int index, int length, Encoding encoding) => ByteBufferUtil.DecodeString(this, index, length, encoding);
public int ForEachByte(ByteProcessor processor)
{
@ -935,7 +931,7 @@ namespace DotNetty.Buffers
{
if (processor == null)
{
throw new NullReferenceException("processor");
throw new ArgumentNullException("processor");
}
if (length == 0)
@ -945,25 +941,18 @@ namespace DotNetty.Buffers
int endIndex = index + length;
int i = index;
try
do
{
do
if (processor.Process(this._GetByte(i)))
{
if (processor.Process(this._GetByte(i)))
{
i++;
}
else
{
return i;
}
i++;
}
else
{
return i;
}
while (i < endIndex);
}
catch
{
throw;
}
while (i < endIndex);
return -1;
}
@ -997,25 +986,18 @@ namespace DotNetty.Buffers
}
int i = index + length - 1;
try
do
{
do
if (processor.Process(this._GetByte(i)))
{
if (processor.Process(this._GetByte(i)))
{
i--;
}
else
{
return i;
}
i--;
}
else
{
return i;
}
while (i >= index);
}
catch
{
throw;
}
while (i >= index);
return -1;
}

Просмотреть файл

@ -23,7 +23,7 @@ namespace DotNetty.Buffers
leak = AbstractByteBuffer.LeakDetector.Open(buf);
if (leak != null)
{
buf = new SimpleLeakAwareByteBuf(buf, leak);
buf = new SimpleLeakAwareByteBuffer(buf, leak);
}
break;
case ResourceLeakDetector.DetectionLevel.Advanced:
@ -31,7 +31,7 @@ namespace DotNetty.Buffers
leak = AbstractByteBuffer.LeakDetector.Open(buf);
if (leak != null)
{
buf = new AdvancedLeakAwareByteBuf(buf, leak);
buf = new AdvancedLeakAwareByteBuffer(buf, leak);
}
break;
case ResourceLeakDetector.DetectionLevel.Disabled:
@ -71,15 +71,9 @@ namespace DotNetty.Buffers
return this.NewBuffer(initialCapacity, maxCapacity);
}
public CompositeByteBuffer CompositeBuffer()
{
return this.CompositeBuffer(DefaultMaxComponents);
}
public CompositeByteBuffer CompositeBuffer() => this.CompositeBuffer(DefaultMaxComponents);
public CompositeByteBuffer CompositeBuffer(int maxComponents)
{
return new CompositeByteBuffer(this, maxComponents);
}
public CompositeByteBuffer CompositeBuffer(int maxComponents) => new CompositeByteBuffer(this, maxComponents);
protected abstract IByteBuffer NewBuffer(int initialCapacity, int maxCapacity);

Просмотреть файл

@ -42,14 +42,8 @@ namespace DotNetty.Buffers
return this;
}
public sealed override bool Release()
{
return this.Unwrap().Release();
}
public sealed override bool Release() => this.Unwrap().Release();
public sealed override bool Release(int decrement)
{
return this.Unwrap().Release(decrement);
}
public sealed override bool Release(int decrement) => this.Unwrap().Release(decrement);
}
}

Просмотреть файл

@ -9,26 +9,22 @@ namespace DotNetty.Buffers
public abstract class AbstractReferenceCountedByteBuffer : AbstractByteBuffer
{
#pragma warning disable 420
volatile int referenceCount = 1;
int referenceCount = 1;
protected AbstractReferenceCountedByteBuffer(int maxCapacity)
: base(maxCapacity)
{
}
public override int ReferenceCount => this.referenceCount;
public override int ReferenceCount => Volatile.Read(ref this.referenceCount);
protected void SetReferenceCount(int value)
{
this.referenceCount = value;
}
protected void SetReferenceCount(int value) => Volatile.Write(ref this.referenceCount, value);
public override IReferenceCounted Retain()
{
while (true)
{
int refCnt = this.referenceCount;
int refCnt = this.ReferenceCount;
if (refCnt == 0)
{
throw new IllegalReferenceCountException(0, 1);
@ -55,7 +51,7 @@ namespace DotNetty.Buffers
while (true)
{
int refCnt = this.referenceCount;
int refCnt = this.ReferenceCount;
if (refCnt == 0)
{
throw new IllegalReferenceCountException(0, increment);
@ -77,7 +73,7 @@ namespace DotNetty.Buffers
{
while (true)
{
int refCnt = this.referenceCount;
int refCnt = this.ReferenceCount;
if (refCnt == 0)
{
throw new IllegalReferenceCountException(0, -1);
@ -104,7 +100,7 @@ namespace DotNetty.Buffers
while (true)
{
int refCnt = this.referenceCount;
int refCnt = this.ReferenceCount;
if (refCnt < decrement)
{
throw new IllegalReferenceCountException(refCnt, -decrement);
@ -122,15 +118,9 @@ namespace DotNetty.Buffers
}
}
public override IReferenceCounted Touch()
{
return this;
}
public override IReferenceCounted Touch() => this;
public override IReferenceCounted Touch(object hint)
{
return this;
}
public override IReferenceCounted Touch(object hint) => this;
protected abstract void Deallocate();
}

Просмотреть файл

@ -11,14 +11,14 @@ namespace DotNetty.Buffers
using DotNetty.Common.Internal;
using DotNetty.Common.Internal.Logging;
class AdvancedLeakAwareByteBuf : WrappedByteBuffer
class AdvancedLeakAwareByteBuffer : WrappedByteBuffer
{
static readonly string PropAcquireAndReleaseOnly = "io.netty.leakDetection.acquireAndReleaseOnly";
static readonly bool AcquireAndReleaseOnly;
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance<AdvancedLeakAwareByteBuf>();
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance<AdvancedLeakAwareByteBuffer>();
static AdvancedLeakAwareByteBuf()
static AdvancedLeakAwareByteBuffer()
{
AcquireAndReleaseOnly = SystemPropertyUtil.GetBoolean(PropAcquireAndReleaseOnly, false);
@ -30,7 +30,7 @@ namespace DotNetty.Buffers
readonly IResourceLeak leak;
internal AdvancedLeakAwareByteBuf(IByteBuffer buf, IResourceLeak leak)
internal AdvancedLeakAwareByteBuffer(IByteBuffer buf, IResourceLeak leak)
: base(buf)
{
this.leak = leak;
@ -53,32 +53,32 @@ namespace DotNetty.Buffers
}
else
{
return new AdvancedLeakAwareByteBuf(base.WithOrder(endianness), this.leak);
return new AdvancedLeakAwareByteBuffer(base.WithOrder(endianness), this.leak);
}
}
public override IByteBuffer Slice()
{
this.RecordLeakNonRefCountingOperation();
return new AdvancedLeakAwareByteBuf(base.Slice(), this.leak);
return new AdvancedLeakAwareByteBuffer(base.Slice(), this.leak);
}
public override IByteBuffer Slice(int index, int length)
{
this.RecordLeakNonRefCountingOperation();
return new AdvancedLeakAwareByteBuf(base.Slice(index, length), this.leak);
return new AdvancedLeakAwareByteBuffer(base.Slice(index, length), this.leak);
}
public override IByteBuffer Duplicate()
{
this.RecordLeakNonRefCountingOperation();
return new AdvancedLeakAwareByteBuf(base.Duplicate(), this.leak);
return new AdvancedLeakAwareByteBuffer(base.Duplicate(), this.leak);
}
public override IByteBuffer ReadSlice(int length)
{
this.RecordLeakNonRefCountingOperation();
return new AdvancedLeakAwareByteBuf(base.ReadSlice(length), this.leak);
return new AdvancedLeakAwareByteBuffer(base.ReadSlice(length), this.leak);
}
public override IByteBuffer DiscardReadBytes()

Просмотреть файл

@ -6,10 +6,14 @@ namespace DotNetty.Buffers
using System;
using System.Diagnostics.Contracts;
using System.Text;
using DotNetty.Common.Internal;
using DotNetty.Common.Internal.Logging;
using DotNetty.Common.Utilities;
public static class ByteBufferUtil
{
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance(typeof(ByteBufferUtil));
static readonly char[] HexdumpTable = new char[256 * 4];
static readonly string Newline = StringUtil.Newline;
static readonly string[] Byte2Hex = new string[256];
@ -18,6 +22,8 @@ namespace DotNetty.Buffers
static readonly char[] Byte2Char = new char[256];
static readonly string[] HexDumpRowPrefixes = new string[(int)((uint)65536 >> 4)];
public static readonly IByteBufferAllocator DefaultAllocator;
static ByteBufferUtil()
{
char[] digits = "0123456789abcdef".ToCharArray();
@ -81,42 +87,35 @@ namespace DotNetty.Buffers
HexDumpRowPrefixes[i] = buf.ToString();
}
//todo: port
//String allocType = SystemPropertyUtil.get(
// "io.netty.allocator.type", PlatformDependent.isAndroid() ? "unpooled" : "pooled");
//allocType = allocType.toLowerCase(Locale.US).trim();
string allocType = SystemPropertyUtil.Get(
"io.netty.allocator.type", "pooled");
allocType = allocType.Trim();
//ByteBufAllocator alloc;
//if ("unpooled".equals(allocType))
//{
// alloc = UnpooledByteBufAllocator.DEFAULT;
// logger.debug("-Dio.netty.allocator.type: {}", allocType);
//}
//else if ("pooled".equals(allocType))
//{
// alloc = PooledByteBufAllocator.DEFAULT;
// logger.debug("-Dio.netty.allocator.type: {}", allocType);
//}
//else
//{
// alloc = PooledByteBufAllocator.DEFAULT;
// logger.debug("-Dio.netty.allocator.type: pooled (unknown: {})", allocType);
//}
IByteBufferAllocator alloc;
if ("unpooled".Equals(allocType, StringComparison.OrdinalIgnoreCase))
{
alloc = UnpooledByteBufferAllocator.Default;
Logger.Debug("-Dio.netty.allocator.type: {}", allocType);
}
else if ("pooled".Equals(allocType, StringComparison.OrdinalIgnoreCase))
{
alloc = PooledByteBufferAllocator.Default;
Logger.Debug("-Dio.netty.allocator.type: {}", allocType);
}
else
{
alloc = PooledByteBufferAllocator.Default;
Logger.Debug("-Dio.netty.allocator.type: pooled (unknown: {})", allocType);
}
//DEFAULT_ALLOCATOR = alloc;
//THREAD_LOCAL_BUFFER_SIZE = SystemPropertyUtil.getInt("io.netty.threadLocalDirectBufferSize", 64 * 1024);
//logger.debug("-Dio.netty.threadLocalDirectBufferSize: {}", THREAD_LOCAL_BUFFER_SIZE);
DefaultAllocator = alloc;
}
/// <summary>
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
/// of the specified buffer's sub-region.
/// </summary>
public static string HexDump(IByteBuffer buffer)
{
return HexDump(buffer, buffer.ReaderIndex, buffer.ReadableBytes);
}
public static string HexDump(IByteBuffer buffer) => HexDump(buffer, buffer.ReaderIndex, buffer.ReadableBytes);
/// <summary>
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
@ -148,10 +147,7 @@ namespace DotNetty.Buffers
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
/// of the specified buffer's sub-region.
/// </summary>
public static string HexDump(byte[] array)
{
return HexDump(array, 0, array.Length);
}
public static string HexDump(byte[] array) => HexDump(array, 0, array.Length);
/// <summary>
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
@ -371,27 +367,20 @@ namespace DotNetty.Buffers
/// Toggles the endianness of the specified 64-bit long integer.
/// </summary>
public static long SwapLong(long value)
{
return (((long)SwapInt((int)value) & 0xFFFFFFFF) << 32)
| ((long)SwapInt((int)(value >> 32)) & 0xFFFFFFFF);
}
=> (((long)SwapInt((int)value) & 0xFFFFFFFF) << 32)
| ((long)SwapInt((int)(value >> 32)) & 0xFFFFFFFF);
/// <summary>
/// Toggles the endianness of the specified 32-bit integer.
/// </summary>
public static int SwapInt(int value)
{
return ((SwapShort((short)value) & 0xFFFF) << 16)
| (SwapShort((short)(value >> 16)) & 0xFFFF);
}
=> ((SwapShort((short)value) & 0xFFFF) << 16)
| (SwapShort((short)(value >> 16)) & 0xFFFF);
/// <summary>
/// Toggles the endianness of the specified 16-bit integer.
/// </summary>
public static short SwapShort(short value)
{
return (short)(((value & 0xFF) << 8) | (value >> 8) & 0xFF);
}
public static short SwapShort(short value) => (short)(((value & 0xFF) << 8) | (value >> 8) & 0xFF);
/// <summary>
/// Read the given amount of bytes into a new {@link ByteBuf} that is allocated from the {@link ByteBufAllocator}.
@ -431,10 +420,7 @@ namespace DotNetty.Buffers
/// <summary>
/// Returns a multi-line hexadecimal dump of the specified {@link ByteBuf} that is easy to read by humans.
/// </summary>
public static string PrettyHexDump(IByteBuffer buffer)
{
return PrettyHexDump(buffer, buffer.ReaderIndex, buffer.ReadableBytes);
}
public static string PrettyHexDump(IByteBuffer buffer) => PrettyHexDump(buffer, buffer.ReaderIndex, buffer.ReadableBytes);
/// <summary>
/// Returns a multi-line hexadecimal dump of the specified {@link ByteBuf} that is easy to read by humans,
@ -459,10 +445,7 @@ namespace DotNetty.Buffers
/// Appends the prettified multi-line hexadecimal dump of the specified {@link ByteBuf} to the specified
/// {@link StringBuilder} that is easy to read by humans.
/// </summary>
public static void AppendPrettyHexDump(StringBuilder dump, IByteBuffer buf)
{
AppendPrettyHexDump(dump, buf, buf.ReaderIndex, buf.ReadableBytes);
}
public static void AppendPrettyHexDump(StringBuilder dump, IByteBuffer buf) => AppendPrettyHexDump(dump, buf, buf.ReaderIndex, buf.ReadableBytes);
/// <summary>
/// Appends the prettified multi-line hexadecimal dump of the specified {@link ByteBuf} to the specified
@ -566,10 +549,7 @@ namespace DotNetty.Buffers
/// <param name="alloc">The <see cref="IByteBufferAllocator" /> to allocate {@link IByteBuffer}.</param>
/// <param name="src">src The <see cref="string" /> to encode.</param>
/// <param name="encoding">charset The specified <see cref="Encoding" /></param>
public static IByteBuffer EncodeString(IByteBufferAllocator alloc, string src, Encoding encoding)
{
return EncodeString0(alloc, src, encoding, 0);
}
public static IByteBuffer EncodeString(IByteBufferAllocator alloc, string src, Encoding encoding) => EncodeString0(alloc, src, encoding, 0);
/// <summary>
/// Encode the given <see cref="CharBuffer" /> using the given <see cref="Encoding" /> into a new
@ -580,10 +560,7 @@ namespace DotNetty.Buffers
/// <param name="src">src The <see cref="string" /> to encode.</param>
/// <param name="encoding">charset The specified <see cref="Encoding" /></param>
/// <param name="extraCapacity">the extra capacity to alloc except the space for decoding.</param>
public static IByteBuffer EncodeString(IByteBufferAllocator alloc, string src, Encoding encoding, int extraCapacity)
{
return EncodeString0(alloc, src, encoding, extraCapacity);
}
public static IByteBuffer EncodeString(IByteBufferAllocator alloc, string src, Encoding encoding, int extraCapacity) => EncodeString0(alloc, src, encoding, extraCapacity);
static IByteBuffer EncodeString0(IByteBufferAllocator alloc, string src, Encoding encoding, int extraCapacity)
{

Просмотреть файл

@ -427,7 +427,7 @@ namespace DotNetty.Buffers
switch (this.components.Count)
{
case 0:
return ByteArrayExtensions.Empty;
return ArrayExtensions.ZeroBytes;
case 1:
return this.components[0].Buffer.Array;
default:

Просмотреть файл

@ -31,6 +31,15 @@
<WarningLevel>4</WarningLevel>
<CheckForOverflowUnderflow>false</CheckForOverflowUnderflow>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Signed|AnyCPU'">
<OutputPath>bin\Signed\</OutputPath>
<DefineConstants>TRACE;NOTEST</DefineConstants>
<Optimize>true</Optimize>
<DebugType>pdbonly</DebugType>
<PlatformTarget>AnyCPU</PlatformTarget>
<ErrorReport>prompt</ErrorReport>
<CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
</PropertyGroup>
<ItemGroup>
<Reference Include="System" />
<Reference Include="System.Core" />
@ -40,8 +49,12 @@
<Compile Include="..\SharedAssemblyInfo.cs">
<Link>Properties\SharedAssemblyInfo.cs</Link>
</Compile>
<Compile Include="AdvancedLeakAwareByteBuf.cs" />
<Compile Include="SimpleLeakAwareByteBuf.cs" />
<Compile Include="AdvancedLeakAwareByteBuffer.cs" />
<Compile Include="Properties\Friends.cs" />
<Compile Include="IPoolArenaMetric.cs" />
<Compile Include="IPoolChunkListMetric.cs" />
<Compile Include="IPoolSubpageMetric.cs" />
<Compile Include="SimpleLeakAwareByteBuffer.cs" />
<Compile Include="AbstractByteBuffer.cs" />
<Compile Include="AbstractByteBufferAllocator.cs" />
<Compile Include="AbstractDerivedByteBuffer.cs" />
@ -52,13 +65,20 @@
<Compile Include="CompositeByteBuffer.cs" />
<Compile Include="DuplicatedByteBuffer.cs" />
<Compile Include="EmptyByteBuffer.cs" />
<Compile Include="IPoolChunkMetric.cs" />
<Compile Include="PoolArena.cs" />
<Compile Include="PoolChunk.cs" />
<Compile Include="PoolChunkList.cs" />
<Compile Include="PooledByteBuffer.cs" />
<Compile Include="PooledByteBufferAllocator.cs" />
<Compile Include="PooledHeapByteBuffer.cs" />
<Compile Include="PoolSubpage.cs" />
<Compile Include="PoolThreadCache.cs" />
<Compile Include="IByteBuffer.cs" />
<Compile Include="IByteBufferAllocator.cs" />
<Compile Include="IllegalReferenceCountException.cs" />
<Compile Include="Properties\AssemblyInfo.cs" />
<Compile Include="SlicedByteBuffer.cs" />
<Compile Include="PooledByteBufferAllocator.cs" />
<Compile Include="SwappedByteBuffer.cs" />
<Compile Include="Unpooled.cs" />
<Compile Include="UnpooledByteBufferAllocator.cs" />

Просмотреть файл

@ -459,11 +459,11 @@ namespace DotNetty.Buffers
public bool HasArray => true;
public byte[] Array => ByteArrayExtensions.Empty;
public byte[] Array => ArrayExtensions.ZeroBytes;
public byte[] ToArray()
{
return ByteArrayExtensions.Empty;
return ArrayExtensions.ZeroBytes;
}
public IByteBuffer Duplicate()
@ -567,6 +567,27 @@ namespace DotNetty.Buffers
return false;
}
public override int GetHashCode()
{
return 0;
}
public override bool Equals(object obj)
{
var buffer = obj as IByteBuffer;
return this.Equals(buffer);
}
public bool Equals(IByteBuffer buffer)
{
return buffer != null && !buffer.IsReadable();
}
public int CompareTo(IByteBuffer buffer)
{
return buffer.IsReadable() ? -1 : 0;
}
public override string ToString()
{
return this.str;

Просмотреть файл

@ -19,7 +19,7 @@ namespace DotNetty.Buffers
/// /// <see cref="ReaderIndex" /> LESS THAN OR EQUAL TO <see cref="WriterIndex" /> LESS THAN OR EQUAL TO
/// <see cref="Capacity" />.
/// </summary>
public interface IByteBuffer : IReferenceCounted
public interface IByteBuffer : IReferenceCounted, IComparable<IByteBuffer>, IEquatable<IByteBuffer>
{
int Capacity { get; }

Просмотреть файл

@ -0,0 +1,79 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
using System.Collections.Generic;
public interface IPoolArenaMetric
{
/// Returns the number of thread caches backed by this arena.
int NumThreadCaches { get; }
/// Returns the number of tiny sub-pages for the arena.
int NumTinySubpages { get; }
/// Returns the number of small sub-pages for the arena.
int NumSmallSubpages { get; }
/// Returns the number of chunk lists for the arena.
int NumChunkLists { get; }
/// Returns an unmodifiable {@link List} which holds {@link PoolSubpageMetric}s for tiny sub-pages.
IReadOnlyList<IPoolSubpageMetric> TinySubpages { get; }
/// Returns an unmodifiable {@link List} which holds {@link PoolSubpageMetric}s for small sub-pages.
IReadOnlyList<IPoolSubpageMetric> SmallSubpages { get; }
/// Returns an unmodifiable {@link List} which holds {@link PoolChunkListMetric}s.
IReadOnlyList<IPoolChunkListMetric> ChunkLists { get; }
/// Return the number of allocations done via the arena. This includes all sizes.
long NumAllocations { get; }
/// Return the number of tiny allocations done via the arena.
long NumTinyAllocations { get; }
/// Return the number of small allocations done via the arena.
long NumSmallAllocations { get; }
/// Return the number of normal allocations done via the arena.
long NumNormalAllocations { get; }
/// Return the number of huge allocations done via the arena.
long NumHugeAllocations { get; }
/// Return the number of deallocations done via the arena. This includes all sizes.
long NumDeallocations { get; }
/// Return the number of tiny deallocations done via the arena.
long NumTinyDeallocations { get; }
/// Return the number of small deallocations done via the arena.
long NumSmallDeallocations { get; }
/// Return the number of normal deallocations done via the arena.
long NumNormalDeallocations { get; }
/// Return the number of huge deallocations done via the arena.
long NumHugeDeallocations { get; }
/// Return the number of currently active allocations.
long NumActiveAllocations { get; }
/// Return the number of currently active tiny allocations.
long NumActiveTinyAllocations { get; }
/// Return the number of currently active small allocations.
long NumActiveSmallAllocations { get; }
/// Return the number of currently active normal allocations.
long NumActiveNormalAllocations { get; }
/// Return the number of currently active huge allocations.
long NumActiveHugeAllocations { get; }
/// Return the number of active bytes that are currently allocated by the arena.
long NumActiveBytes { get; }
}
}

Просмотреть файл

@ -0,0 +1,16 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
using System.Collections.Generic;
public interface IPoolChunkListMetric : IEnumerable<IPoolChunkMetric>
{
/// Return the minum usage of the chunk list before which chunks are promoted to the previous list.
int MinUsage { get; }
/// Return the minum usage of the chunk list after which chunks are promoted to the next list.
int MaxUsage { get; }
}
}

Просмотреть файл

@ -0,0 +1,17 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
public interface IPoolChunkMetric
{
/// Return the percentage of the current usage of the chunk.
int Usage { get; }
/// Return the size of the chunk in bytes, this is the maximum of bytes that can be served out of the chunk.
int ChunkSize { get; }
/// Return the number of free bytes in the chunk.
int FreeBytes { get; }
}
}

Просмотреть файл

@ -0,0 +1,20 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
public interface IPoolSubpageMetric
{
/// Return the number of maximal elements that can be allocated out of the sub-page.
int MaxNumElements { get; }
/// Return the number of available elements to be allocated.
int NumAvailable { get; }
/// Return the size (in bytes) of the elements that will be allocated.
int ElementSize { get; }
/// Return the size (in bytes) of this page.
int PageSize { get; }
}
}

Просмотреть файл

@ -0,0 +1,648 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
using System;
using System.Collections.Generic;
using System.Diagnostics.Contracts;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading;
using DotNetty.Common.Utilities;
enum SizeClass
{
Tiny,
Small,
Normal
}
abstract class PoolArena<T> : IPoolArenaMetric
{
internal static readonly int NumTinySubpagePools = 512 >> 4;
internal readonly PooledByteBufferAllocator Parent;
readonly int maxOrder;
internal readonly int PageSize;
internal readonly int PageShifts;
internal readonly int ChunkSize;
internal readonly int SubpageOverflowMask;
internal readonly int NumSmallSubpagePools;
readonly PoolSubpage<T>[] tinySubpagePools;
readonly PoolSubpage<T>[] smallSubpagePools;
readonly PoolChunkList<T> q050;
readonly PoolChunkList<T> q025;
readonly PoolChunkList<T> q000;
readonly PoolChunkList<T> qInit;
readonly PoolChunkList<T> q075;
readonly PoolChunkList<T> q100;
readonly List<IPoolChunkListMetric> chunkListMetrics;
// Metrics for allocations and deallocations
long allocationsTiny;
long allocationsSmall;
long allocationsNormal;
// We need to use the LongCounter here as this is not guarded via synchronized block.
long allocationsHuge;
long activeBytesHuge;
long deallocationsTiny;
long deallocationsSmall;
long deallocationsNormal;
// We need to use the LongCounter here as this is not guarded via synchronized block.
long deallocationsHuge;
// Number of thread caches backed by this arena.
int numThreadCaches;
// TODO: Test if adding padding helps under contention
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
protected PoolArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize)
{
this.Parent = parent;
this.PageSize = pageSize;
this.maxOrder = maxOrder;
this.PageShifts = pageShifts;
this.ChunkSize = chunkSize;
this.SubpageOverflowMask = ~(pageSize - 1);
this.tinySubpagePools = this.NewSubpagePoolArray(NumTinySubpagePools);
for (int i = 0; i < this.tinySubpagePools.Length; i++)
{
this.tinySubpagePools[i] = this.NewSubpagePoolHead(pageSize);
}
this.NumSmallSubpagePools = pageShifts - 9;
this.smallSubpagePools = this.NewSubpagePoolArray(this.NumSmallSubpagePools);
for (int i = 0; i < this.smallSubpagePools.Length; i++)
{
this.smallSubpagePools[i] = this.NewSubpagePoolHead(pageSize);
}
this.q100 = new PoolChunkList<T>(null, 100, int.MaxValue, chunkSize);
this.q075 = new PoolChunkList<T>(this.q100, 75, 100, chunkSize);
this.q050 = new PoolChunkList<T>(this.q075, 50, 100, chunkSize);
this.q025 = new PoolChunkList<T>(this.q050, 25, 75, chunkSize);
this.q000 = new PoolChunkList<T>(this.q025, 1, 50, chunkSize);
this.qInit = new PoolChunkList<T>(this.q000, int.MinValue, 25, chunkSize);
this.q100.PrevList(this.q075);
this.q075.PrevList(this.q050);
this.q050.PrevList(this.q025);
this.q025.PrevList(this.q000);
this.q000.PrevList(null);
this.qInit.PrevList(this.qInit);
var metrics = new List<IPoolChunkListMetric>(6);
metrics.Add(this.qInit);
metrics.Add(this.q000);
metrics.Add(this.q025);
metrics.Add(this.q050);
metrics.Add(this.q075);
metrics.Add(this.q100);
this.chunkListMetrics = metrics;
}
public int NumThreadCaches => Volatile.Read(ref this.numThreadCaches);
public void RegisterThreadCache() => Interlocked.Increment(ref this.numThreadCaches);
public void DeregisterThreadCache() => Interlocked.Decrement(ref this.numThreadCaches);
PoolSubpage<T> NewSubpagePoolHead(int pageSize)
{
var head = new PoolSubpage<T>(pageSize);
head.Prev = head;
head.Next = head;
return head;
}
PoolSubpage<T>[] NewSubpagePoolArray(int size) => new PoolSubpage<T>[size];
internal PooledByteBuffer<T> Allocate(PoolThreadCache<T> cache, int reqCapacity, int maxCapacity)
{
PooledByteBuffer<T> buf = this.NewByteBuf(maxCapacity);
this.Allocate(cache, buf, reqCapacity);
return buf;
}
internal static int TinyIdx(int normCapacity) => normCapacity.RightUShift(4);
internal static int SmallIdx(int normCapacity)
{
int tableIdx = 0;
int i = normCapacity.RightUShift(10);
while (i != 0)
{
i = i.RightUShift(1);
tableIdx++;
}
return tableIdx;
}
// capacity < pageSize
internal bool IsTinyOrSmall(int normCapacity) => (normCapacity & this.SubpageOverflowMask) == 0;
// normCapacity < 512
internal static bool IsTiny(int normCapacity) => (normCapacity & 0xFFFFFE00) == 0;
void Allocate(PoolThreadCache<T> cache, PooledByteBuffer<T> buf, int reqCapacity)
{
int normCapacity = this.NormalizeCapacity(reqCapacity);
if (this.IsTinyOrSmall(normCapacity))
{
// capacity < pageSize
int tableIdx;
PoolSubpage<T>[] table;
bool tiny = IsTiny(normCapacity);
if (tiny)
{
// < 512
if (cache.AllocateTiny(this, buf, reqCapacity, normCapacity))
{
// was able to allocate out of the cache so move on
return;
}
tableIdx = TinyIdx(normCapacity);
table = this.tinySubpagePools;
}
else
{
if (cache.AllocateSmall(this, buf, reqCapacity, normCapacity))
{
// was able to allocate out of the cache so move on
return;
}
tableIdx = SmallIdx(normCapacity);
table = this.smallSubpagePools;
}
PoolSubpage<T> head = table[tableIdx];
/**
* Synchronize on the head. This is needed as {@link PoolSubpage#allocate()} and
* {@link PoolSubpage#free(int)} may modify the doubly linked list as well.
*/
lock (head)
{
PoolSubpage<T> s = head.Next;
if (s != head)
{
Contract.Assert(s.DoNotDestroy && s.ElemSize == normCapacity);
long handle = s.Allocate();
Contract.Assert(handle >= 0);
s.Chunk.InitBufWithSubpage(buf, handle, reqCapacity);
if (tiny)
{
++this.allocationsTiny;
}
else
{
++this.allocationsSmall;
}
return;
}
}
this.AllocateNormal(buf, reqCapacity, normCapacity);
return;
}
if (normCapacity <= this.ChunkSize)
{
if (cache.AllocateNormal(this, buf, reqCapacity, normCapacity))
{
// was able to allocate out of the cache so move on
return;
}
this.AllocateNormal(buf, reqCapacity, normCapacity);
}
else
{
// Huge allocations are never served via the cache so just call allocateHuge
this.AllocateHuge(buf, reqCapacity);
}
}
[MethodImpl(MethodImplOptions.Synchronized)]
void AllocateNormal(PooledByteBuffer<T> buf, int reqCapacity, int normCapacity)
{
if (this.q050.Allocate(buf, reqCapacity, normCapacity) || this.q025.Allocate(buf, reqCapacity, normCapacity)
|| this.q000.Allocate(buf, reqCapacity, normCapacity) || this.qInit.Allocate(buf, reqCapacity, normCapacity)
|| this.q075.Allocate(buf, reqCapacity, normCapacity))
{
++this.allocationsNormal;
return;
}
// Add a new chunk.
PoolChunk<T> c = this.NewChunk(this.PageSize, this.maxOrder, this.PageShifts, this.ChunkSize);
long handle = c.Allocate(normCapacity);
++this.allocationsNormal;
Contract.Assert(handle > 0);
c.InitBuf(buf, handle, reqCapacity);
this.qInit.Add(c);
}
void AllocateHuge(PooledByteBuffer<T> buf, int reqCapacity)
{
PoolChunk<T> chunk = this.NewUnpooledChunk(reqCapacity);
Interlocked.Add(ref this.activeBytesHuge, chunk.ChunkSize);
buf.InitUnpooled(chunk, reqCapacity);
Interlocked.Increment(ref this.allocationsHuge);
}
internal void Free(PoolChunk<T> chunk, long handle, int normCapacity, PoolThreadCache<T> cache)
{
if (chunk.Unpooled)
{
int size = chunk.ChunkSize;
this.DestroyChunk(chunk);
Interlocked.Add(ref this.activeBytesHuge, -size);
Interlocked.Decrement(ref this.deallocationsHuge);
}
else
{
SizeClass sc = this.SizeClass(normCapacity);
if (cache != null && cache.Add(this, chunk, handle, normCapacity, sc))
{
// cached so not free it.
return;
}
this.FreeChunk(chunk, handle, sc);
}
}
SizeClass SizeClass(int normCapacity)
{
if (!this.IsTinyOrSmall(normCapacity))
{
return Buffers.SizeClass.Normal;
}
return IsTiny(normCapacity) ? Buffers.SizeClass.Tiny : Buffers.SizeClass.Small;
}
internal void FreeChunk(PoolChunk<T> chunk, long handle, SizeClass sizeClass)
{
bool mustDestroyChunk;
lock (this)
{
switch (sizeClass)
{
case Buffers.SizeClass.Normal:
++this.deallocationsNormal;
break;
case Buffers.SizeClass.Small:
++this.deallocationsSmall;
break;
case Buffers.SizeClass.Tiny:
++this.deallocationsTiny;
break;
default:
throw new ArgumentOutOfRangeException();
}
mustDestroyChunk = !chunk.Parent.Free(chunk, handle);
}
if (mustDestroyChunk)
{
// destroyChunk not need to be called while holding the synchronized lock.
this.DestroyChunk(chunk);
}
}
internal PoolSubpage<T> FindSubpagePoolHead(int elemSize)
{
int tableIdx;
PoolSubpage<T>[] table;
if (IsTiny(elemSize))
{
// < 512
tableIdx = elemSize.RightUShift(4);
table = this.tinySubpagePools;
}
else
{
tableIdx = 0;
elemSize = elemSize.RightUShift(10);
while (elemSize != 0)
{
elemSize = elemSize.RightUShift(1);
tableIdx++;
}
table = this.smallSubpagePools;
}
return table[tableIdx];
}
internal int NormalizeCapacity(int reqCapacity)
{
Contract.Requires(reqCapacity >= 0);
if (reqCapacity >= this.ChunkSize)
{
return reqCapacity;
}
if (!IsTiny(reqCapacity))
{
// >= 512
// Doubled
int normalizedCapacity = reqCapacity;
normalizedCapacity--;
normalizedCapacity |= normalizedCapacity.RightUShift(1);
normalizedCapacity |= normalizedCapacity.RightUShift(2);
normalizedCapacity |= normalizedCapacity.RightUShift(4);
normalizedCapacity |= normalizedCapacity.RightUShift(8);
normalizedCapacity |= normalizedCapacity.RightUShift(16);
normalizedCapacity++;
if (normalizedCapacity < 0)
{
normalizedCapacity = normalizedCapacity.RightUShift(1);
}
return normalizedCapacity;
}
// Quantum-spaced
if ((reqCapacity & 15) == 0)
{
return reqCapacity;
}
return (reqCapacity & ~15) + 16;
}
internal void Reallocate(PooledByteBuffer<T> buf, int newCapacity, bool freeOldMemory)
{
Contract.Requires(newCapacity >= 0 && newCapacity <= buf.MaxCapacity);
int oldCapacity = buf.Length;
if (oldCapacity == newCapacity)
{
return;
}
PoolChunk<T> oldChunk = buf.Chunk;
long oldHandle = buf.Handle;
T oldMemory = buf.Memory;
int oldOffset = buf.Offset;
int oldMaxLength = buf.MaxLength;
int readerIndex = buf.ReaderIndex;
int writerIndex = buf.WriterIndex;
this.Allocate(this.Parent.ThreadCache<T>(), buf, newCapacity);
if (newCapacity > oldCapacity)
{
this.MemoryCopy(
oldMemory, oldOffset,
buf.Memory, buf.Offset, oldCapacity);
}
else if (newCapacity < oldCapacity)
{
if (readerIndex < newCapacity)
{
if (writerIndex > newCapacity)
{
writerIndex = newCapacity;
}
this.MemoryCopy(
oldMemory, oldOffset + readerIndex,
buf.Memory, buf.Offset + readerIndex, writerIndex - readerIndex);
}
else
{
readerIndex = writerIndex = newCapacity;
}
}
buf.SetIndex(readerIndex, writerIndex);
if (freeOldMemory)
{
this.Free(oldChunk, oldHandle, oldMaxLength, buf.Cache);
}
}
public int NumTinySubpages => this.tinySubpagePools.Length;
public int NumSmallSubpages => this.smallSubpagePools.Length;
public int NumChunkLists => this.chunkListMetrics.Count;
public IReadOnlyList<IPoolSubpageMetric> TinySubpages => SubPageMetricList(this.tinySubpagePools);
public IReadOnlyList<IPoolSubpageMetric> SmallSubpages => SubPageMetricList(this.smallSubpagePools);
public IReadOnlyList<IPoolChunkListMetric> ChunkLists => this.chunkListMetrics;
static List<IPoolSubpageMetric> SubPageMetricList(PoolSubpage<T>[] pages)
{
var metrics = new List<IPoolSubpageMetric>();
for (int i = 1; i < pages.Length; i++)
{
PoolSubpage<T> head = pages[i];
if (head.Next == head)
{
continue;
}
PoolSubpage<T> s = head.Next;
for (;;)
{
metrics.Add(s);
s = s.Next;
if (s == head)
{
break;
}
}
}
return metrics;
}
public long NumAllocations => this.allocationsTiny + this.allocationsSmall + this.allocationsNormal + this.NumHugeAllocations;
public long NumTinyAllocations => this.allocationsTiny;
public long NumSmallAllocations => this.allocationsSmall;
public long NumNormalAllocations => this.allocationsNormal;
public long NumDeallocations => this.deallocationsTiny + this.deallocationsSmall + this.allocationsNormal + this.NumHugeDeallocations;
public long NumTinyDeallocations => this.deallocationsTiny;
public long NumSmallDeallocations => this.deallocationsSmall;
public long NumNormalDeallocations => this.deallocationsNormal;
public long NumHugeAllocations => Volatile.Read(ref this.allocationsHuge);
public long NumHugeDeallocations => Volatile.Read(ref this.deallocationsHuge);
public long NumActiveAllocations => Math.Max(this.NumAllocations - this.NumDeallocations, 0);
public long NumActiveTinyAllocations => Math.Max(this.NumTinyAllocations - this.NumTinyDeallocations, 0);
public long NumActiveSmallAllocations => Math.Max(this.NumSmallAllocations - this.NumSmallDeallocations, 0);
public long NumActiveNormalAllocations
{
get
{
long val;
lock (this)
{
val = this.NumNormalAllocations - this.NumNormalDeallocations;
}
return Math.Max(val, 0);
}
}
public long NumActiveHugeAllocations => Math.Max(this.NumHugeAllocations - this.NumHugeDeallocations, 0);
public long NumActiveBytes
{
get
{
long val = Volatile.Read(ref this.activeBytesHuge);
lock (this)
{
for (int i = 0; i < this.chunkListMetrics.Count; i++)
{
foreach (IPoolChunkMetric m in this.chunkListMetrics[i])
{
val += m.ChunkSize;
}
}
}
return Math.Max(0, val);
}
}
protected abstract PoolChunk<T> NewChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize);
protected abstract PoolChunk<T> NewUnpooledChunk(int capacity);
protected abstract PooledByteBuffer<T> NewByteBuf(int maxCapacity);
protected abstract void MemoryCopy(T src, int srcOffset, T dst, int dstOffset, int length);
protected abstract void DestroyChunk(PoolChunk<T> chunk);
[MethodImpl(MethodImplOptions.Synchronized)]
public override string ToString()
{
StringBuilder buf = new StringBuilder()
.Append("Chunk(s) at 0~25%:")
.Append(StringUtil.Newline)
.Append(this.qInit)
.Append(StringUtil.Newline)
.Append("Chunk(s) at 0~50%:")
.Append(StringUtil.Newline)
.Append(this.q000)
.Append(StringUtil.Newline)
.Append("Chunk(s) at 25~75%:")
.Append(StringUtil.Newline)
.Append(this.q025)
.Append(StringUtil.Newline)
.Append("Chunk(s) at 50~100%:")
.Append(StringUtil.Newline)
.Append(this.q050)
.Append(StringUtil.Newline)
.Append("Chunk(s) at 75~100%:")
.Append(StringUtil.Newline)
.Append(this.q075)
.Append(StringUtil.Newline)
.Append("Chunk(s) at 100%:")
.Append(StringUtil.Newline)
.Append(this.q100)
.Append(StringUtil.Newline)
.Append("tiny subpages:");
for (int i = 1; i < this.tinySubpagePools.Length; i++)
{
PoolSubpage<T> head = this.tinySubpagePools[i];
if (head.Next == head)
{
continue;
}
buf.Append(StringUtil.Newline)
.Append(i)
.Append(": ");
PoolSubpage<T> s = head.Next;
for (;;)
{
buf.Append(s);
s = s.Next;
if (s == head)
{
break;
}
}
}
buf.Append(StringUtil.Newline)
.Append("small subpages:");
for (int i = 1; i < this.smallSubpagePools.Length; i++)
{
PoolSubpage<T> head = this.smallSubpagePools[i];
if (head.Next == head)
{
continue;
}
buf.Append(StringUtil.Newline)
.Append(i)
.Append(": ");
PoolSubpage<T> s = head.Next;
for (;;)
{
buf.Append(s);
s = s.Next;
if (s == head)
{
break;
}
}
}
buf.Append(StringUtil.Newline);
return buf.ToString();
}
}
sealed class HeapArena : PoolArena<byte[]>
{
public HeapArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize)
: base(parent, pageSize, maxOrder, pageShifts, chunkSize)
{
}
protected override PoolChunk<byte[]> NewChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize) => new PoolChunk<byte[]>(this, new byte[chunkSize], pageSize, maxOrder, pageShifts, chunkSize);
protected override PoolChunk<byte[]> NewUnpooledChunk(int capacity) => new PoolChunk<byte[]>(this, new byte[capacity], capacity);
protected override void DestroyChunk(PoolChunk<byte[]> chunk)
{
// Rely on GC.
}
protected override PooledByteBuffer<byte[]> NewByteBuf(int maxCapacity) => PooledHeapByteBuffer.NewInstance(maxCapacity);
protected override void MemoryCopy(byte[] src, int srcOffset, byte[] dst, int dstOffset, int length)
{
if (length == 0)
{
return;
}
Array.Copy(src, srcOffset, dst, dstOffset, length);
}
}
}

Просмотреть файл

@ -0,0 +1,457 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
using System.Diagnostics.Contracts;
using System.Runtime.CompilerServices;
using System.Text;
using DotNetty.Common.Utilities;
/// <summary>
/// Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk
/// Notation: The following terms are important to understand the code
/// > page - a page is the smallest unit of memory chunk that can be allocated
/// > chunk - a chunk is a collection of pages
/// > in this code chunkSize = 2^{maxOrder} /// pageSize
/// To begin we allocate a byte array of size = chunkSize
/// Whenever a ByteBuf of given size needs to be created we search for the first position
/// in the byte array that has enough empty space to accommodate the requested size and
/// return a (long) handle that encodes this offset information, (this memory segment is then
/// marked as reserved so it is always used by exactly one ByteBuf and no more)
/// For simplicity all sizes are normalized according to PoolArena#normalizeCapacity method
/// This ensures that when we request for memory segments of size >= pageSize the normalizedCapacity
/// equals the next nearest power of 2
/// To search for the first offset in chunk that has at least requested size available we construct a
/// complete balanced binary tree and store it in an array (just like heaps) - memoryMap
/// The tree looks like this (the size of each node being mentioned in the parenthesis)
/// depth=0 1 node (chunkSize)
/// depth=1 2 nodes (chunkSize/2)
/// ..
/// ..
/// depth=d 2^d nodes (chunkSize/2^d)
/// ..
/// depth=maxOrder 2^maxOrder nodes (chunkSize/2^{maxOrder} = pageSize)
/// depth=maxOrder is the last level and the leafs consist of pages
/// With this tree available searching in chunkArray translates like this:
/// To allocate a memory segment of size chunkSize/2^k we search for the first node (from left) at height k
/// which is unused
/// Algorithm:
/// ----------
/// Encode the tree in memoryMap with the notation
/// memoryMap[id] = x => in the subtree rooted at id, the first node that is free to be allocated
/// is at depth x (counted from depth=0) i.e., at depths [depth_of_id, x), there is no node that is free
/// As we allocate & free nodes, we update values stored in memoryMap so that the property is maintained
/// Initialization -
/// In the beginning we construct the memoryMap array by storing the depth of a node at each node
/// i.e., memoryMap[id] = depth_of_id
/// Observations:
/// -------------
/// 1) memoryMap[id] = depth_of_id => it is free / unallocated
/// 2) memoryMap[id] > depth_of_id => at least one of its child nodes is allocated, so we cannot allocate it, but
/// some of its children can still be allocated based on their availability
/// 3) memoryMap[id] = maxOrder + 1 => the node is fully allocated & thus none of its children can be allocated, it
/// is thus marked as unusable
/// Algorithm: [allocateNode(d) => we want to find the first node (from left) at height h that can be allocated]
/// ----------
/// 1) start at root (i.e., depth = 0 or id = 1)
/// 2) if memoryMap[1] > d => cannot be allocated from this chunk
/// 3) if left node value &lt;= h; we can allocate from left subtree so move to left and repeat until found
/// 4) else try in right subtree
/// Algorithm: [allocateRun(size)]
/// ----------
/// 1) Compute d = log_2(chunkSize/size)
/// 2) Return allocateNode(d)
/// Algorithm: [allocateSubpage(size)]
/// ----------
/// 1) use allocateNode(maxOrder) to find an empty (i.e., unused) leaf (i.e., page)
/// 2) use this handle to construct the PoolSubpage object or if it already exists just call init(normCapacity)
/// note that this PoolSubpage object is added to subpagesPool in the PoolArena when we init() it
/// Note:
/// -----
/// In the implementation for improving cache coherence,
/// we store 2 pieces of information (i.e, 2 byte vals) as a short value in memoryMap
/// memoryMap[id]= (depth_of_id, x)
/// where as per convention defined above
/// the second value (i.e, x) indicates that the first node which is free to be allocated is at depth x (from root)
/// </summary>
sealed class PoolChunk<T> : IPoolChunkMetric
{
internal readonly PoolArena<T> Arena;
internal readonly T Memory;
internal readonly bool Unpooled;
readonly sbyte[] memoryMap;
readonly sbyte[] depthMap;
readonly PoolSubpage<T>[] subpages;
/** Used to determine if the requested capacity is equal to or greater than pageSize. */
readonly int subpageOverflowMask;
readonly int pageSize;
readonly int pageShifts;
readonly int maxOrder;
readonly int chunkSize;
readonly int log2ChunkSize;
readonly int maxSubpageAllocs;
/** Used to mark memory as unusable */
readonly sbyte unusable;
int freeBytes;
internal PoolChunkList<T> Parent;
internal PoolChunk<T> Prev;
internal PoolChunk<T> Next;
// TODO: Test if adding padding helps under contention
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
internal PoolChunk(PoolArena<T> arena, T memory, int pageSize, int maxOrder, int pageShifts, int chunkSize)
{
Contract.Requires(maxOrder < 30, "maxOrder should be < 30, but is: " + maxOrder);
this.Unpooled = false;
this.Arena = arena;
this.Memory = memory;
this.pageSize = pageSize;
this.pageShifts = pageShifts;
this.maxOrder = maxOrder;
this.chunkSize = chunkSize;
this.unusable = (sbyte)(maxOrder + 1);
this.log2ChunkSize = IntegerExtensions.Log2(chunkSize);
this.subpageOverflowMask = ~(pageSize - 1);
this.freeBytes = chunkSize;
Contract.Assert(maxOrder < 30, "maxOrder should be < 30, but is: " + maxOrder);
this.maxSubpageAllocs = 1 << maxOrder;
// Generate the memory map.
this.memoryMap = new sbyte[this.maxSubpageAllocs << 1];
this.depthMap = new sbyte[this.memoryMap.Length];
int memoryMapIndex = 1;
for (int d = 0; d <= maxOrder; ++d)
{
// move down the tree one level at a time
int depth = 1 << d;
for (int p = 0; p < depth; ++p)
{
// in each level traverse left to right and set value to the depth of subtree
this.memoryMap[memoryMapIndex] = (sbyte)d;
this.depthMap[memoryMapIndex] = (sbyte)d;
memoryMapIndex++;
}
}
this.subpages = this.NewSubpageArray(this.maxSubpageAllocs);
}
/** Creates a special chunk that is not pooled. */
internal PoolChunk(PoolArena<T> arena, T memory, int size)
{
this.Unpooled = true;
this.Arena = arena;
this.Memory = memory;
this.memoryMap = null;
this.depthMap = null;
this.subpages = null;
this.subpageOverflowMask = 0;
this.pageSize = 0;
this.pageShifts = 0;
this.maxOrder = 0;
this.unusable = (sbyte)(this.maxOrder + 1);
this.chunkSize = size;
this.log2ChunkSize = IntegerExtensions.Log2(this.chunkSize);
this.maxSubpageAllocs = 0;
}
PoolSubpage<T>[] NewSubpageArray(int size) => new PoolSubpage<T>[size];
public int Usage
{
get
{
int freeBytes = this.freeBytes;
if (freeBytes == 0)
{
return 100;
}
int freePercentage = (int)(freeBytes * 100L / this.chunkSize);
if (freePercentage == 0)
{
return 99;
}
return 100 - freePercentage;
}
}
internal long Allocate(int normCapacity)
{
if ((normCapacity & this.subpageOverflowMask) != 0)
{
// >= pageSize
return this.AllocateRun(normCapacity);
}
else
{
return this.AllocateSubpage(normCapacity);
}
}
/**
* Update method used by allocate
* This is triggered only when a successor is allocated and all its predecessors
* need to update their state
* The minimal depth at which subtree rooted at id has some free space
*
* @param id id
*/
void UpdateParentsAlloc(int id)
{
while (id > 1)
{
int parentId = id.RightUShift(1);
sbyte val1 = this.Value(id);
sbyte val2 = this.Value(id ^ 1);
sbyte val = val1 < val2 ? val1 : val2;
this.SetValue(parentId, val);
id = parentId;
}
}
/**
* Update method used by free
* This needs to handle the special case when both children are completely free
* in which case parent be directly allocated on request of size = child-size * 2
*
* @param id id
*/
void UpdateParentsFree(int id)
{
int logChild = this.Depth(id) + 1;
while (id > 1)
{
int parentId = id.RightUShift(1);
sbyte val1 = this.Value(id);
sbyte val2 = this.Value(id ^ 1);
logChild -= 1; // in first iteration equals log, subsequently reduce 1 from logChild as we traverse up
if (val1 == logChild && val2 == logChild)
{
this.SetValue(parentId, (sbyte)(logChild - 1));
}
else
{
sbyte val = val1 < val2 ? val1 : val2;
this.SetValue(parentId, val);
}
id = parentId;
}
}
/**
* Algorithm to allocate an index in memoryMap when we query for a free node
* at depth d
*
* @param d depth
* @return index in memoryMap
*/
int AllocateNode(int d)
{
int id = 1;
int initial = -(1 << d); // has last d bits = 0 and rest all = 1
sbyte val = this.Value(id);
if (val > d)
{
// unusable
return -1;
}
while (val < d || (id & initial) == 0)
{
// id & initial == 1 << d for all ids at depth d, for < d it is 0
id <<= 1;
val = this.Value(id);
if (val > d)
{
id ^= 1;
val = this.Value(id);
}
}
sbyte value = this.Value(id);
Contract.Assert(value == d && (id & initial) == 1 << d, $"val = {value}, id & initial = {id & initial}, d = {d}");
this.SetValue(id, this.unusable); // mark as unusable
this.UpdateParentsAlloc(id);
return id;
}
/**
* Allocate a run of pages (>=1)
*
* @param normCapacity normalized capacity
* @return index in memoryMap
*/
long AllocateRun(int normCapacity)
{
int d = this.maxOrder - (IntegerExtensions.Log2(normCapacity) - this.pageShifts);
int id = this.AllocateNode(d);
if (id < 0)
{
return id;
}
this.freeBytes -= this.RunLength(id);
return id;
}
/**
* Create/ initialize a new PoolSubpage of normCapacity
* Any PoolSubpage created/ initialized here is added to subpage pool in the PoolArena that owns this PoolChunk
*
* @param normCapacity normalized capacity
* @return index in memoryMap
*/
long AllocateSubpage(int normCapacity)
{
int d = this.maxOrder; // subpages are only be allocated from pages i.e., leaves
int id = this.AllocateNode(d);
if (id < 0)
{
return id;
}
PoolSubpage<T>[] subpages = this.subpages;
int pageSize = this.pageSize;
this.freeBytes -= pageSize;
int subpageIdx = this.SubpageIdx(id);
PoolSubpage<T> subpage = subpages[subpageIdx];
if (subpage == null)
{
subpage = new PoolSubpage<T>(this, id, this.RunOffset(id), pageSize, normCapacity);
subpages[subpageIdx] = subpage;
}
else
{
subpage.Init(normCapacity);
}
return subpage.Allocate();
}
/**
* Free a subpage or a run of pages
* When a subpage is freed from PoolSubpage, it might be added back to subpage pool of the owning PoolArena
* If the subpage pool in PoolArena has at least one other PoolSubpage of given elemSize, we can
* completely free the owning Page so it is available for subsequent allocations
*
* @param handle handle to free
*/
internal void Free(long handle)
{
int memoryMapIdx = MemoryMapIdx(handle);
int bitmapIdx = BitmapIdx(handle);
if (bitmapIdx != 0)
{
// free a subpage
PoolSubpage<T> subpage = this.subpages[this.SubpageIdx(memoryMapIdx)];
Contract.Assert(subpage != null && subpage.DoNotDestroy);
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
// This is need as we may add it back and so alter the linked-list structure.
PoolSubpage<T> head = this.Arena.FindSubpagePoolHead(subpage.ElemSize);
lock (head)
{
if (subpage.Free(bitmapIdx & 0x3FFFFFFF))
{
return;
}
}
}
this.freeBytes += this.RunLength(memoryMapIdx);
this.SetValue(memoryMapIdx, this.Depth(memoryMapIdx));
this.UpdateParentsFree(memoryMapIdx);
}
internal void InitBuf(PooledByteBuffer<T> buf, long handle, int reqCapacity)
{
int memoryMapIdx = MemoryMapIdx(handle);
int bitmapIdx = BitmapIdx(handle);
if (bitmapIdx == 0)
{
sbyte val = this.Value(memoryMapIdx);
Contract.Assert(val == this.unusable, val.ToString());
buf.Init(this, handle, this.RunOffset(memoryMapIdx), reqCapacity, this.RunLength(memoryMapIdx),
this.Arena.Parent.ThreadCache<T>());
}
else
{
this.InitBufWithSubpage(buf, handle, bitmapIdx, reqCapacity);
}
}
internal void InitBufWithSubpage(PooledByteBuffer<T> buf, long handle, int reqCapacity) => this.InitBufWithSubpage(buf, handle, BitmapIdx(handle), reqCapacity);
void InitBufWithSubpage(PooledByteBuffer<T> buf, long handle, int bitmapIdx, int reqCapacity)
{
Contract.Assert(bitmapIdx != 0);
int memoryMapIdx = MemoryMapIdx(handle);
PoolSubpage<T> subpage = this.subpages[this.SubpageIdx(memoryMapIdx)];
Contract.Assert(subpage.DoNotDestroy);
Contract.Assert(reqCapacity <= subpage.ElemSize);
buf.Init(
this, handle,
this.RunOffset(memoryMapIdx) + (bitmapIdx & 0x3FFFFFFF) * subpage.ElemSize, reqCapacity, subpage.ElemSize,
this.Arena.Parent.ThreadCache<T>());
}
sbyte Value(int id) => this.memoryMap[id];
void SetValue(int id, sbyte val) => this.memoryMap[id] = val;
sbyte Depth(int id) => this.depthMap[id];
/// represents the size in #bytes supported by node 'id' in the tree
int RunLength(int id) => 1 << this.log2ChunkSize - this.Depth(id);
int RunOffset(int id)
{
// represents the 0-based offset in #bytes from start of the byte-array chunk
int shift = id ^ 1 << this.Depth(id);
return shift * this.RunLength(id);
}
int SubpageIdx(int memoryMapIdx) => memoryMapIdx ^ this.maxSubpageAllocs; // remove highest set bit, to get offset
static int MemoryMapIdx(long handle) => (int)handle;
static int BitmapIdx(long handle) => (int)handle.RightUShift(IntegerExtensions.SizeInBits);
public int ChunkSize => this.chunkSize;
public int FreeBytes => this.freeBytes;
public override string ToString()
{
return new StringBuilder()
.Append("Chunk(")
.Append(RuntimeHelpers.GetHashCode(this).ToString("X"))
.Append(": ")
.Append(this.Usage)
.Append("%, ")
.Append(this.chunkSize - this.freeBytes)
.Append('/')
.Append(this.chunkSize)
.Append(')')
.ToString();
}
}
}

Просмотреть файл

@ -0,0 +1,230 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
using System;
using System.Collections;
using System.Collections.Generic;
using System.Diagnostics.Contracts;
using System.Linq;
using System.Text;
sealed class PoolChunkList<T> : IPoolChunkListMetric
{
static readonly IEnumerable<IPoolChunkMetric> EMPTY_METRICS = Enumerable.Empty<IPoolChunkMetric>();
readonly PoolChunkList<T> nextList;
readonly int minUsage;
readonly int maxUsage;
readonly int maxCapacity;
PoolChunk<T> head;
// This is only update once when create the linked like list of PoolChunkList in PoolArena constructor.
PoolChunkList<T> prevList;
// TODO: Test if adding padding helps under contention
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
public PoolChunkList(PoolChunkList<T> nextList, int minUsage, int maxUsage, int chunkSize)
{
Contract.Assert(minUsage <= maxUsage);
this.nextList = nextList;
this.minUsage = minUsage;
this.maxUsage = maxUsage;
this.maxCapacity = CalculateMaxCapacity(minUsage, chunkSize);
}
/// Calculates the maximum capacity of a buffer that will ever be possible to allocate out of the {@link PoolChunk}s
/// that belong to the {@link PoolChunkList} with the given {@code minUsage} and {@code maxUsage} settings.
static int CalculateMaxCapacity(int minUsage, int chunkSize)
{
minUsage = MinUsage0(minUsage);
if (minUsage == 100)
{
// If the minUsage is 100 we can not allocate anything out of this list.
return 0;
}
// Calculate the maximum amount of bytes that can be allocated from a PoolChunk in this PoolChunkList.
//
// As an example:
// - If a PoolChunkList has minUsage == 25 we are allowed to allocate at most 75% of the chunkSize because
// this is the maximum amount available in any PoolChunk in this PoolChunkList.
return (int)(chunkSize * (100L - minUsage) / 100L);
}
internal void PrevList(PoolChunkList<T> prevList)
{
Contract.Requires(this.prevList == null);
this.prevList = prevList;
}
internal bool Allocate(PooledByteBuffer<T> buf, int reqCapacity, int normCapacity)
{
if (this.head == null || normCapacity > this.maxCapacity)
{
// Either this PoolChunkList is empty or the requested capacity is larger then the capacity which can
// be handled by the PoolChunks that are contained in this PoolChunkList.
return false;
}
for (PoolChunk<T> cur = this.head;;)
{
long handle = cur.Allocate(normCapacity);
if (handle < 0)
{
cur = cur.Next;
if (cur == null)
{
return false;
}
}
else
{
cur.InitBuf(buf, handle, reqCapacity);
if (cur.Usage >= this.maxUsage)
{
this.Remove(cur);
this.nextList.Add(cur);
}
return true;
}
}
}
internal bool Free(PoolChunk<T> chunk, long handle)
{
chunk.Free(handle);
if (chunk.Usage < this.minUsage)
{
this.Remove(chunk);
// Move the PoolChunk down the PoolChunkList linked-list.
return this.Move0(chunk);
}
return true;
}
bool Move(PoolChunk<T> chunk)
{
Contract.Assert(chunk.Usage < this.maxUsage);
if (chunk.Usage < this.minUsage)
{
// Move the PoolChunk down the PoolChunkList linked-list.
return this.Move0(chunk);
}
// PoolChunk fits into this PoolChunkList, adding it here.
this.Add0(chunk);
return true;
}
/// Moves the {@link PoolChunk} down the {@link PoolChunkList} linked-list so it will end up in the right
/// {@link PoolChunkList} that has the correct minUsage / maxUsage in respect to {@link PoolChunk#usage()}.
bool Move0(PoolChunk<T> chunk)
{
if (this.prevList == null)
{
// There is no previous PoolChunkList so return false which result in having the PoolChunk destroyed and
// all memory associated with the PoolChunk will be released.
Contract.Assert(chunk.Usage == 0);
return false;
}
return this.prevList.Move(chunk);
}
internal void Add(PoolChunk<T> chunk)
{
if (chunk.Usage >= this.maxUsage)
{
this.nextList.Add(chunk);
return;
}
this.Add0(chunk);
}
/// Adds the {@link PoolChunk} to this {@link PoolChunkList}.
void Add0(PoolChunk<T> chunk)
{
chunk.Parent = this;
if (this.head == null)
{
this.head = chunk;
chunk.Prev = null;
chunk.Next = null;
}
else
{
chunk.Prev = null;
chunk.Next = this.head;
this.head.Prev = chunk;
this.head = chunk;
}
}
void Remove(PoolChunk<T> cur)
{
if (cur == this.head)
{
this.head = cur.Next;
if (this.head != null)
{
this.head.Prev = null;
}
}
else
{
PoolChunk<T> next = cur.Next;
cur.Prev.Next = next;
if (next != null)
{
next.Prev = cur.Prev;
}
}
}
public int MinUsage => MinUsage0(this.minUsage);
public int MaxUsage => Math.Min(this.maxUsage, 100);
static int MinUsage0(int value) => Math.Max(1, value);
public IEnumerator<IPoolChunkMetric> GetEnumerator() => this.head == null ? EMPTY_METRICS.GetEnumerator() : this.GetEnumeratorInternal();
IEnumerator IEnumerable.GetEnumerator() => this.GetEnumerator();
IEnumerator<IPoolChunkMetric> GetEnumeratorInternal()
{
for (PoolChunk<T> cur = this.head; cur != null;)
{
yield return cur;
cur = cur.Next;
}
}
public override string ToString()
{
if (this.head == null)
{
return "none";
}
var buf = new StringBuilder();
for (PoolChunk<T> cur = this.head;;)
{
buf.Append(cur);
cur = cur.Next;
if (cur == null)
{
break;
}
buf.Append(Environment.NewLine); // todo: StringUtil.NEWLINE
}
return buf.ToString();
}
}
}

Просмотреть файл

@ -0,0 +1,267 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
using System.Diagnostics.Contracts;
using DotNetty.Common.Utilities;
sealed class PoolSubpage<T> : IPoolSubpageMetric
{
internal readonly PoolChunk<T> Chunk;
readonly int memoryMapIdx;
readonly int runOffset;
readonly int pageSize;
readonly long[] bitmap;
internal PoolSubpage<T> Prev;
internal PoolSubpage<T> Next;
internal bool DoNotDestroy;
internal int ElemSize;
int maxNumElems;
int bitmapLength;
int nextAvail;
int numAvail;
// TODO: Test if adding padding helps under contention
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
/** Special constructor that creates a linked list head */
public PoolSubpage(int pageSize)
{
this.Chunk = null;
this.memoryMapIdx = -1;
this.runOffset = -1;
this.ElemSize = -1;
this.pageSize = pageSize;
this.bitmap = null;
}
public PoolSubpage(PoolChunk<T> chunk, int memoryMapIdx, int runOffset, int pageSize, int elemSize)
{
this.Chunk = chunk;
this.memoryMapIdx = memoryMapIdx;
this.runOffset = runOffset;
this.pageSize = pageSize;
this.bitmap = new long[pageSize.RightUShift(10)]; // pageSize / 16 / 64
this.Init(elemSize);
}
public void Init(int elemSize)
{
this.DoNotDestroy = true;
this.ElemSize = elemSize;
if (elemSize != 0)
{
this.maxNumElems = this.numAvail = this.pageSize / elemSize;
this.nextAvail = 0;
this.bitmapLength = this.maxNumElems.RightUShift(6);
if ((this.maxNumElems & 63) != 0)
{
this.bitmapLength++;
}
for (int i = 0; i < this.bitmapLength; i++)
{
this.bitmap[i] = 0;
}
}
PoolSubpage<T> head = this.Chunk.Arena.FindSubpagePoolHead(elemSize);
lock (head)
{
this.AddToPool(head);
}
}
/**
* Returns the bitmap index of the subpage allocation.
*/
internal long Allocate()
{
if (this.ElemSize == 0)
{
return this.ToHandle(0);
}
/**
* Synchronize on the head of the SubpagePool stored in the {@link PoolArena. This is needed as we synchronize
* on it when calling {@link PoolArena#allocate(PoolThreadCache, int, int)} und try to allocate out of the
* {@link PoolSubpage} pool for a given size.
*/
PoolSubpage<T> head = this.Chunk.Arena.FindSubpagePoolHead(this.ElemSize);
lock (head)
{
if (this.numAvail == 0 || !this.DoNotDestroy)
{
return -1;
}
int bitmapIdx = this.GetNextAvail();
int q = bitmapIdx.RightUShift(6);
int r = bitmapIdx & 63;
Contract.Assert((this.bitmap[q].RightUShift(r) & 1) == 0);
this.bitmap[q] |= 1L << r;
if (--this.numAvail == 0)
{
this.RemoveFromPool();
}
return this.ToHandle(bitmapIdx);
}
}
/**
* @return {@code true} if this subpage is in use.
* {@code false} if this subpage is not used by its chunk and thus it's OK to be released.
*/
internal bool Free(int bitmapIdx)
{
if (this.ElemSize == 0)
{
return true;
}
/**
* Synchronize on the head of the SubpagePool stored in the {@link PoolArena. This is needed as we synchronize
* on it when calling {@link PoolArena#allocate(PoolThreadCache, int, int)} und try to allocate out of the
* {@link PoolSubpage} pool for a given size.
*/
PoolSubpage<T> head = this.Chunk.Arena.FindSubpagePoolHead(this.ElemSize);
lock (head)
{
int q = bitmapIdx.RightUShift(6);
int r = bitmapIdx & 63;
Contract.Assert((this.bitmap[q].RightUShift(r) & 1) != 0)
;
this.bitmap[q] ^= 1L << r;
this.SetNextAvail(bitmapIdx);
if (this.numAvail++ == 0)
{
this.AddToPool(head);
return true;
}
if (this.numAvail != this.maxNumElems)
{
return true;
}
else
{
// Subpage not in use (numAvail == maxNumElems)
if (this.Prev == this.Next)
{
// Do not remove if this subpage is the only one left in the pool.
return true;
}
// Remove this subpage from the pool if there are other subpages left in the pool.
this.DoNotDestroy = false;
this.RemoveFromPool();
return false;
}
}
}
void AddToPool(PoolSubpage<T> head)
{
Contract.Assert(this.Prev == null && this.Next == null);
this.Prev = head;
this.Next = head.Next;
this.Next.Prev = this;
head.Next = this;
}
void RemoveFromPool()
{
Contract.Assert(this.Prev != null && this.Next != null);
this.Prev.Next = this.Next;
this.Next.Prev = this.Prev;
this.Next = null;
this.Prev = null;
}
void SetNextAvail(int bitmapIdx) => this.nextAvail = bitmapIdx;
int GetNextAvail()
{
int nextAvail = this.nextAvail;
if (nextAvail >= 0)
{
this.nextAvail = -1;
return nextAvail;
}
return this.FindNextAvail();
}
int FindNextAvail()
{
long[] bitmap = this.bitmap;
int bitmapLength = this.bitmapLength;
for (int i = 0; i < bitmapLength; i++)
{
long bits = bitmap[i];
if (~bits != 0)
{
return this.FindNextAvail0(i, bits);
}
}
return -1;
}
int FindNextAvail0(int i, long bits)
{
int maxNumElems = this.maxNumElems;
int baseVal = i << 6;
for (int j = 0; j < 64; j++)
{
if ((bits & 1) == 0)
{
int val = baseVal | j;
if (val < maxNumElems)
{
return val;
}
else
{
break;
}
}
bits = bits.RightUShift(1);
}
return -1;
}
long ToHandle(int bitmapIdx) => 0x4000000000000000L | (long)bitmapIdx << 32 | this.memoryMapIdx;
public override string ToString()
{
if (!this.DoNotDestroy)
{
return "(" + this.memoryMapIdx + ": not in use)";
}
return '(' + this.memoryMapIdx + ": " + (this.maxNumElems - this.numAvail) + '/' + this.maxNumElems +
", offset: " + this.runOffset + ", length: " + this.pageSize + ", elemSize: " + this.ElemSize + ')';
}
public int MaxNumElements => this.maxNumElems;
public int NumAvailable => this.numAvail;
public int ElementSize => this.ElemSize;
public int PageSize => this.pageSize;
}
}

Просмотреть файл

@ -0,0 +1,469 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
using System;
using System.Diagnostics.Contracts;
using System.Threading;
using DotNetty.Common;
using DotNetty.Common.Internal;
using DotNetty.Common.Internal.Logging;
using DotNetty.Common.Utilities;
/// <summary>
/// Acts a Thread cache for allocations. This implementation is moduled after
/// <a href="http://people.freebsd.org/~jasone/jemalloc/bsdcan2006/jemalloc.pdf">jemalloc</a> and the descripted
/// technics of
/// <a
/// href="https://www.facebook.com/notes/facebook-engineering/scalable-memory-allocation-using-jemalloc/
/// 480222803919">
/// Scalable
/// memory allocation using jemalloc
/// </a>
/// .
/// </summary>
sealed class PoolThreadCache<T>
{
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance<PoolThreadCache<T>>();
internal readonly PoolArena<T> HeapArena;
// Hold the caches for the different size classes, which are tiny, small and normal.
readonly MemoryRegionCache[] tinySubPageHeapCaches;
readonly MemoryRegionCache[] smallSubPageHeapCaches;
readonly MemoryRegionCache[] normalHeapCaches;
// Used for bitshifting when calculate the index of normal caches later
readonly int numShiftsNormalHeap;
readonly int freeSweepAllocationThreshold;
int allocations;
readonly Thread thread = Thread.CurrentThread;
readonly Action freeTask;
// TODO: Test if adding padding helps under contention
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
internal PoolThreadCache(PoolArena<T> heapArena,
int tinyCacheSize, int smallCacheSize, int normalCacheSize,
int maxCachedBufferCapacity, int freeSweepAllocationThreshold)
{
Contract.Requires(maxCachedBufferCapacity >= 0);
Contract.Requires(freeSweepAllocationThreshold > 0);
this.freeTask = this.Free0;
this.freeSweepAllocationThreshold = freeSweepAllocationThreshold;
this.HeapArena = heapArena;
if (heapArena != null)
{
// Create the caches for the heap allocations
this.tinySubPageHeapCaches = CreateSubPageCaches(
tinyCacheSize, PoolArena<T>.NumTinySubpagePools, SizeClass.Tiny);
this.smallSubPageHeapCaches = CreateSubPageCaches(
smallCacheSize, heapArena.NumSmallSubpagePools, SizeClass.Small);
this.numShiftsNormalHeap = Log2(heapArena.PageSize);
this.normalHeapCaches = CreateNormalCaches(
normalCacheSize, maxCachedBufferCapacity, heapArena);
heapArena.RegisterThreadCache();
}
else
{
// No heapArea is configured so just null out all caches
this.tinySubPageHeapCaches = null;
this.smallSubPageHeapCaches = null;
this.normalHeapCaches = null;
this.numShiftsNormalHeap = -1;
}
// The thread-local cache will keep a list of pooled buffers which must be returned to
// the pool when the thread is not alive anymore.
ThreadDeathWatcher.Watch(this.thread, this.freeTask);
}
static MemoryRegionCache[] CreateSubPageCaches(
int cacheSize, int numCaches, SizeClass sizeClass)
{
if (cacheSize > 0)
{
var cache = new MemoryRegionCache[numCaches];
for (int i = 0; i < cache.Length; i++)
{
// TODO: maybe use cacheSize / cache.length
cache[i] = new SubPageMemoryRegionCache(cacheSize, sizeClass);
}
return cache;
}
else
{
return null;
}
}
static MemoryRegionCache[] CreateNormalCaches(
int cacheSize, int maxCachedBufferCapacity, PoolArena<T> area)
{
if (cacheSize > 0)
{
int max = Math.Min(area.ChunkSize, maxCachedBufferCapacity);
int arraySize = Math.Max(1, Log2(max / area.PageSize) + 1);
var cache = new MemoryRegionCache[arraySize];
for (int i = 0; i < cache.Length; i++)
{
cache[i] = new NormalMemoryRegionCache(cacheSize);
}
return cache;
}
else
{
return null;
}
}
static int Log2(int val)
{
// todo: revisit this vs IntegerExtensions.(Ceil/Floor)Log2
int res = 0;
while (val > 1)
{
val >>= 1;
res++;
}
return res;
}
/**
* Try to allocate a tiny buffer out of the cache. Returns {@code true} if successful {@code false} otherwise
*/
internal bool AllocateTiny(PoolArena<T> area, PooledByteBuffer<T> buf, int reqCapacity, int normCapacity) => this.Allocate(this.CacheForTiny(area, normCapacity), buf, reqCapacity);
/**
* Try to allocate a small buffer out of the cache. Returns {@code true} if successful {@code false} otherwise
*/
internal bool AllocateSmall(PoolArena<T> area, PooledByteBuffer<T> buf, int reqCapacity, int normCapacity) => this.Allocate(this.CacheForSmall(area, normCapacity), buf, reqCapacity);
/**
* Try to allocate a small buffer out of the cache. Returns {@code true} if successful {@code false} otherwise
*/
internal bool AllocateNormal(PoolArena<T> area, PooledByteBuffer<T> buf, int reqCapacity, int normCapacity) => this.Allocate(this.CacheForNormal(area, normCapacity), buf, reqCapacity);
bool Allocate(MemoryRegionCache cache, PooledByteBuffer<T> buf, int reqCapacity)
{
if (cache == null)
{
// no cache found so just return false here
return false;
}
bool allocated = cache.Allocate(buf, reqCapacity);
if (++this.allocations >= this.freeSweepAllocationThreshold)
{
this.allocations = 0;
this.Trim();
}
return allocated;
}
/**
* Add {@link PoolChunk} and {@code handle} to the cache if there is enough room.
* Returns {@code true} if it fit into the cache {@code false} otherwise.
*/
internal bool Add(PoolArena<T> area, PoolChunk<T> chunk, long handle, int normCapacity, SizeClass sizeClass)
{
MemoryRegionCache c = this.Cache(area, normCapacity, sizeClass);
if (c == null)
{
return false;
}
return c.Add(chunk, handle);
}
MemoryRegionCache Cache(PoolArena<T> area, int normCapacity, SizeClass sizeClass)
{
switch (sizeClass)
{
case SizeClass.Normal:
return this.CacheForNormal(area, normCapacity);
case SizeClass.Small:
return this.CacheForSmall(area, normCapacity);
case SizeClass.Tiny:
return this.CacheForTiny(area, normCapacity);
default:
throw new ArgumentOutOfRangeException();
}
}
/**
* Should be called if the Thread that uses this cache is about to exist to release resources out of the cache
*/
internal void Free()
{
ThreadDeathWatcher.Unwatch(this.thread, this.freeTask);
this.Free0();
}
void Free0()
{
int numFreed = Free(this.tinySubPageHeapCaches) +
Free(this.smallSubPageHeapCaches) +
Free(this.normalHeapCaches);
if (numFreed > 0 && Logger.DebugEnabled)
{
Logger.Debug("Freed {} thread-local buffer(s) from thread: {}", numFreed, this.thread.Name);
}
this.HeapArena?.DeregisterThreadCache();
}
static int Free(MemoryRegionCache[] caches)
{
if (caches == null)
{
return 0;
}
int numFreed = 0;
foreach (MemoryRegionCache c in caches)
{
numFreed += Free(c);
}
return numFreed;
}
static int Free(MemoryRegionCache cache)
{
if (cache == null)
{
return 0;
}
return cache.Free();
}
internal void Trim()
{
Trim(this.tinySubPageHeapCaches);
Trim(this.smallSubPageHeapCaches);
Trim(this.normalHeapCaches);
}
static void Trim(MemoryRegionCache[] caches)
{
if (caches == null)
{
return;
}
foreach (MemoryRegionCache c in caches)
{
Trim(c);
}
}
static void Trim(MemoryRegionCache cache) => cache?.Trim();
MemoryRegionCache CacheForTiny(PoolArena<T> area, int normCapacity)
{
int idx = PoolArena<T>.TinyIdx(normCapacity);
return Cache(this.tinySubPageHeapCaches, idx);
}
MemoryRegionCache CacheForSmall(PoolArena<T> area, int normCapacity)
{
int idx = PoolArena<T>.SmallIdx(normCapacity);
return Cache(this.smallSubPageHeapCaches, idx);
}
MemoryRegionCache CacheForNormal(PoolArena<T> area, int normCapacity)
{
int idx1 = Log2(normCapacity >> this.numShiftsNormalHeap);
return Cache(this.normalHeapCaches, idx1);
}
static MemoryRegionCache Cache(MemoryRegionCache[] cache, int idx)
{
if (cache == null || idx > cache.Length - 1)
{
return null;
}
return cache[idx];
}
/**
* Cache used for buffers which are backed by TINY or SMALL size.
*/
sealed class SubPageMemoryRegionCache : MemoryRegionCache
{
internal SubPageMemoryRegionCache(int size, SizeClass sizeClass)
: base(size, sizeClass)
{
}
protected override void InitBuf(
PoolChunk<T> chunk, long handle, PooledByteBuffer<T> buf, int reqCapacity) => chunk.InitBufWithSubpage(buf, handle, reqCapacity);
}
/**
* Cache used for buffers which are backed by NORMAL size.
*/
sealed class NormalMemoryRegionCache : MemoryRegionCache
{
internal NormalMemoryRegionCache(int size)
: base(size, SizeClass.Normal)
{
}
protected override void InitBuf(
PoolChunk<T> chunk, long handle, PooledByteBuffer<T> buf, int reqCapacity) => chunk.InitBuf(buf, handle, reqCapacity);
}
abstract class MemoryRegionCache
{
readonly int size;
readonly IQueue<Entry> queue;
readonly SizeClass sizeClass;
int allocations;
protected MemoryRegionCache(int size, SizeClass sizeClass)
{
this.size = IntegerExtensions.RoundUpToPowerOfTwo(size);
this.queue = PlatformDependent.NewFixedMpscQueue<Entry>(this.size);
this.sizeClass = sizeClass;
}
/**
* Init the {@link PooledByteBuffer} using the provided chunk and handle with the capacity restrictions.
*/
protected abstract void InitBuf(PoolChunk<T> chunk, long handle,
PooledByteBuffer<T> buf, int reqCapacity);
/**
* Add to cache if not already full.
*/
public bool Add(PoolChunk<T> chunk, long handle)
{
Entry entry = NewEntry(chunk, handle);
bool queued = this.queue.TryEnqueue(entry);
if (!queued)
{
// If it was not possible to cache the chunk, immediately recycle the entry
entry.Recycle();
}
return queued;
}
/**
* Allocate something out of the cache if possible and remove the entry from the cache.
*/
public bool Allocate(PooledByteBuffer<T> buf, int reqCapacity)
{
Entry entry = this.queue.Dequeue();
if (entry == null)
{
return false;
}
this.InitBuf(entry.Chunk, entry.Handle, buf, reqCapacity);
entry.Recycle();
// allocations is not thread-safe which is fine as this is only called from the same thread all time.
++this.allocations;
return true;
}
/**
* Clear out this cache and free up all previous cached {@link PoolChunk}s and {@code handle}s.
*/
public int Free() => this.Free(int.MaxValue);
int Free(int max)
{
int numFreed = 0;
for (; numFreed < max; numFreed++)
{
Entry entry = this.queue.Dequeue();
if (entry != null)
{
this.FreeEntry(entry);
}
else
{
// all cleared
return numFreed;
}
}
return numFreed;
}
/**
* Free up cached {@link PoolChunk}s if not allocated frequently enough.
*/
public void Trim()
{
int toFree = this.size - this.allocations;
this.allocations = 0;
// We not even allocated all the number that are
if (toFree > 0)
{
this.Free(toFree);
}
}
void FreeEntry(Entry entry)
{
PoolChunk<T> chunk = entry.Chunk;
long handle = entry.Handle;
// recycle now so PoolChunk can be GC'ed.
entry.Recycle();
chunk.Arena.FreeChunk(chunk, handle, this.sizeClass);
}
sealed class Entry
{
readonly ThreadLocalPool.Handle recyclerHandle;
public PoolChunk<T> Chunk;
public long Handle = -1;
public Entry(ThreadLocalPool.Handle recyclerHandle)
{
this.recyclerHandle = recyclerHandle;
}
internal void Recycle()
{
this.Chunk = null;
this.Handle = -1;
this.recyclerHandle.Release(this);
}
}
static Entry NewEntry(PoolChunk<T> chunk, long handle)
{
Entry entry = Recycler.Take();
entry.Chunk = chunk;
entry.Handle = handle;
return entry;
}
static readonly ThreadLocalPool<Entry> Recycler = new ThreadLocalPool<Entry>(handle => new Entry(handle));
}
}
}

Просмотреть файл

@ -6,54 +6,103 @@ namespace DotNetty.Buffers
using System;
using System.Diagnostics.Contracts;
using DotNetty.Common;
using DotNetty.Common.Utilities;
class PooledByteBuffer : UnpooledHeapByteBuffer
abstract class PooledByteBuffer<T> : AbstractReferenceCountedByteBuffer
{
readonly ThreadLocalPool.Handle returnHandle;
int length;
readonly byte[] pooledArray;
readonly ThreadLocalPool.Handle recyclerHandle;
public PooledByteBuffer(ThreadLocalPool.Handle returnHandle, IByteBufferAllocator allocator, int maxFixedCapacity, int maxCapacity)
: this(returnHandle, allocator, new byte[maxFixedCapacity], maxCapacity)
protected internal PoolChunk<T> Chunk;
protected internal long Handle;
protected internal T Memory;
protected internal int Offset;
protected internal int Length;
internal int MaxLength;
internal PoolThreadCache<T> Cache;
//private ByteBuffer tmpNioBuf;
protected PooledByteBuffer(ThreadLocalPool.Handle recyclerHandle, int maxCapacity)
: base(maxCapacity)
{
this.recyclerHandle = recyclerHandle;
}
PooledByteBuffer(ThreadLocalPool.Handle returnHandle, IByteBufferAllocator allocator, byte[] pooledArray, int maxCapacity)
: base(allocator, pooledArray, 0, 0, maxCapacity)
internal void Init(PoolChunk<T> chunk, long handle, int offset, int length, int maxLength, PoolThreadCache<T> cache)
{
this.length = pooledArray.Length;
this.returnHandle = returnHandle;
this.pooledArray = pooledArray;
}
Contract.Assert(handle >= 0);
Contract.Assert(chunk != null);
internal void Init()
{
this.Chunk = chunk;
this.Handle = handle;
this.Memory = chunk.Memory;
this.Offset = offset;
this.Length = length;
this.MaxLength = maxLength;
this.SetIndex(0, 0);
this.DiscardMarkers();
//tmpNioBuf = null;
this.Cache = cache;
}
public override int Capacity => this.length;
internal void InitUnpooled(PoolChunk<T> chunk, int length)
{
Contract.Assert(chunk != null);
public override IByteBuffer AdjustCapacity(int newCapacity)
this.Chunk = chunk;
this.Handle = 0;
this.Memory = chunk.Memory;
this.Offset = 0;
this.Length = this.MaxLength = length;
this.SetIndex(0, 0);
//tmpNioBuf = null;
this.Cache = null;
}
public override int Capacity => this.Length;
public sealed override IByteBuffer AdjustCapacity(int newCapacity)
{
this.EnsureAccessible();
Contract.Requires(newCapacity >= 0 && newCapacity <= this.MaxCapacity);
if (this.Array == this.pooledArray)
// If the request capacity does not require reallocation, just update the length of the memory.
if (this.Chunk.Unpooled)
{
if (newCapacity > this.length)
if (newCapacity == this.Length)
{
if (newCapacity < this.pooledArray.Length)
return this;
}
}
else
{
if (newCapacity > this.Length)
{
if (newCapacity <= this.MaxLength)
{
this.length = newCapacity;
this.Length = newCapacity;
return this;
}
}
else if (newCapacity < this.length)
else if (newCapacity < this.Length)
{
this.length = newCapacity;
this.SetIndex(Math.Min(this.ReaderIndex, newCapacity), Math.Min(this.WriterIndex, newCapacity));
return this;
if (newCapacity > this.MaxLength.RightUShift(1))
{
if (this.MaxLength <= 512)
{
if (newCapacity > this.MaxLength - 16)
{
this.Length = newCapacity;
this.SetIndex(Math.Min(this.ReaderIndex, newCapacity), Math.Min(this.WriterIndex, newCapacity));
return this;
}
}
else
{
// > 512 (i.e. >= 1024)
this.Length = newCapacity;
this.SetIndex(Math.Min(this.ReaderIndex, newCapacity), Math.Min(this.WriterIndex, newCapacity));
return this;
}
}
}
else
{
@ -61,25 +110,42 @@ namespace DotNetty.Buffers
}
}
// todo: fall through to here means buffer pool is being used inefficiently. consider providing insight on such events
base.AdjustCapacity(newCapacity);
this.length = newCapacity;
// Reallocation required.
this.Chunk.Arena.Reallocate(this, newCapacity, true);
return this;
}
public override IByteBuffer Copy(int index, int length)
public sealed override IByteBufferAllocator Allocator => this.Chunk.Arena.Parent;
public sealed override ByteOrder Order => ByteOrder.BigEndian;
public sealed override IByteBuffer Unwrap() => null;
//protected IByteBuffer internalNioBuffer() {
// ByteBuffer tmpNioBuf = this.tmpNioBuf;
// if (tmpNioBuf == null)
// {
// this.tmpNioBuf = tmpNioBuf = newInternalNioBuffer(memory);
// }
// return tmpNioBuf;
//}
//protected abstract ByteBuffer newInternalNioBuffer(T memory);
protected sealed override void Deallocate()
{
this.CheckIndex(index, length);
IByteBuffer copy = this.Allocator.Buffer(length, this.MaxCapacity);
copy.WriteBytes(this.Array, this.ArrayOffset + index, length);
return copy;
if (this.Handle >= 0)
{
long handle = this.Handle;
this.Handle = -1;
this.Memory = default(T);
this.Chunk.Arena.Free(this.Chunk, handle, this.MaxLength, this.Cache);
this.Recycle();
}
}
protected override void Deallocate()
{
this.SetArray(this.pooledArray); // release byte array that has been allocated in response to capacity adjustment to a value higher than max pooled size
this.SetReferenceCount(1); // ensures that next time buffer is pulled from the pool it has "fresh" ref count
this.returnHandle.Release(this);
}
void Recycle() => this.recyclerHandle.Release(this);
protected int Idx(int index) => this.Offset + index;
}
}

Просмотреть файл

@ -1,39 +1,323 @@
// Copyright (c) Microsoft. All rights reserved.
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
using System;
using System.Collections.Generic;
using System.Diagnostics.Contracts;
using System.Text;
using DotNetty.Common;
using DotNetty.Common.Internal;
using DotNetty.Common.Internal.Logging;
using DotNetty.Common.Utilities;
public class PooledByteBufferAllocator : AbstractByteBufferAllocator
{
readonly ThreadLocalPool<PooledByteBuffer> pool;
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance<PooledByteBufferAllocator>();
static readonly int DEFAULT_NUM_HEAP_ARENA;
public PooledByteBufferAllocator(int maxPooledBufSize, int maxLocalPoolSize)
static readonly int DEFAULT_PAGE_SIZE;
static readonly int DEFAULT_MAX_ORDER; // 8192 << 11 = 16 MiB per chunk
static readonly int DEFAULT_TINY_CACHE_SIZE;
static readonly int DEFAULT_SMALL_CACHE_SIZE;
static readonly int DEFAULT_NORMAL_CACHE_SIZE;
static readonly int DEFAULT_MAX_CACHED_BUFFER_CAPACITY;
static readonly int DEFAULT_CACHE_TRIM_INTERVAL;
static readonly int MIN_PAGE_SIZE = 4096;
static readonly int MAX_CHUNK_SIZE = (int)((int.MaxValue + 1L) / 2);
static PooledByteBufferAllocator()
{
Contract.Requires(maxLocalPoolSize > maxPooledBufSize);
int defaultPageSize = SystemPropertyUtil.GetInt("io.netty.allocator.pageSize", 8192);
Exception pageSizeFallbackCause = null;
try
{
ValidateAndCalculatePageShifts(defaultPageSize);
}
catch (Exception t)
{
pageSizeFallbackCause = t;
defaultPageSize = 8192;
}
DEFAULT_PAGE_SIZE = defaultPageSize;
this.MaxPooledBufSize = maxPooledBufSize;
this.pool = new ThreadLocalPool<PooledByteBuffer>(
handle => new PooledByteBuffer(handle, this, maxPooledBufSize, int.MaxValue),
maxLocalPoolSize / maxPooledBufSize,
false);
int defaultMaxOrder = SystemPropertyUtil.GetInt("io.netty.allocator.maxOrder", 11);
Exception maxOrderFallbackCause = null;
try
{
ValidateAndCalculateChunkSize(DEFAULT_PAGE_SIZE, defaultMaxOrder);
}
catch (Exception t)
{
maxOrderFallbackCause = t;
defaultMaxOrder = 11;
}
DEFAULT_MAX_ORDER = defaultMaxOrder;
// Determine reasonable default for nHeapArena and nDirectArena.
// Assuming each arena has 3 chunks, the pool should not consume more than 50% of max memory.
// Use 2 * cores by default to reduce contention as we use 2 * cores for the number of EventLoops
// in NIO and EPOLL as well. If we choose a smaller number we will run into hotspots as allocation and
// deallocation needs to be synchronized on the PoolArena.
// See https://github.com/netty/netty/issues/3888
int defaultMinNumArena = Environment.ProcessorCount * 2;
int defaultChunkSize = DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER;
DEFAULT_NUM_HEAP_ARENA = Math.Max(0, SystemPropertyUtil.GetInt("dotNetty.allocator.numHeapArenas", defaultMinNumArena));
// cache sizes
DEFAULT_TINY_CACHE_SIZE = SystemPropertyUtil.GetInt("io.netty.allocator.tinyCacheSize", 512);
DEFAULT_SMALL_CACHE_SIZE = SystemPropertyUtil.GetInt("io.netty.allocator.smallCacheSize", 256);
DEFAULT_NORMAL_CACHE_SIZE = SystemPropertyUtil.GetInt("io.netty.allocator.normalCacheSize", 64);
// 32 kb is the default maximum capacity of the cached buffer. Similar to what is explained in
// 'Scalable memory allocation using jemalloc'
DEFAULT_MAX_CACHED_BUFFER_CAPACITY = SystemPropertyUtil.GetInt("io.netty.allocator.maxCachedBufferCapacity", 32 * 1024);
// the number of threshold of allocations when cached entries will be freed up if not frequently used
DEFAULT_CACHE_TRIM_INTERVAL = SystemPropertyUtil.GetInt(
"io.netty.allocator.cacheTrimInterval", 8192);
if (Logger.DebugEnabled)
{
Logger.Debug("-Dio.netty.allocator.numHeapArenas: {}", DEFAULT_NUM_HEAP_ARENA);
if (pageSizeFallbackCause == null)
{
Logger.Debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE);
}
else
{
Logger.Debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE, pageSizeFallbackCause);
}
if (maxOrderFallbackCause == null)
{
Logger.Debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER);
}
else
{
Logger.Debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER, maxOrderFallbackCause);
}
Logger.Debug("-Dio.netty.allocator.chunkSize: {}", DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER);
Logger.Debug("-Dio.netty.allocator.tinyCacheSize: {}", DEFAULT_TINY_CACHE_SIZE);
Logger.Debug("-Dio.netty.allocator.smallCacheSize: {}", DEFAULT_SMALL_CACHE_SIZE);
Logger.Debug("-Dio.netty.allocator.normalCacheSize: {}", DEFAULT_NORMAL_CACHE_SIZE);
Logger.Debug("-Dio.netty.allocator.maxCachedBufferCapacity: {}", DEFAULT_MAX_CACHED_BUFFER_CAPACITY);
Logger.Debug("-Dio.netty.allocator.cacheTrimInterval: {}", DEFAULT_CACHE_TRIM_INTERVAL);
}
Default = new PooledByteBufferAllocator();
}
public int MaxPooledBufSize { get; }
public static readonly PooledByteBufferAllocator Default;
readonly PoolArena<byte[]>[] heapArenas;
readonly int tinyCacheSize;
readonly int smallCacheSize;
readonly int normalCacheSize;
readonly IReadOnlyList<IPoolArenaMetric> heapArenaMetrics;
readonly PoolThreadLocalCache threadCache;
public PooledByteBufferAllocator()
: this(DEFAULT_NUM_HEAP_ARENA, DEFAULT_PAGE_SIZE, DEFAULT_MAX_ORDER)
{
}
public PooledByteBufferAllocator(int nHeapArena, int pageSize, int maxOrder)
: this(nHeapArena, pageSize, maxOrder,
DEFAULT_TINY_CACHE_SIZE, DEFAULT_SMALL_CACHE_SIZE, DEFAULT_NORMAL_CACHE_SIZE)
{
}
public PooledByteBufferAllocator(int nHeapArena, int pageSize, int maxOrder,
int tinyCacheSize, int smallCacheSize, int normalCacheSize)
{
Contract.Requires(nHeapArena >= 0);
//super(preferDirect);
this.threadCache = new PoolThreadLocalCache(this);
this.tinyCacheSize = tinyCacheSize;
this.smallCacheSize = smallCacheSize;
this.normalCacheSize = normalCacheSize;
int chunkSize = ValidateAndCalculateChunkSize(pageSize, maxOrder);
int pageShifts = ValidateAndCalculatePageShifts(pageSize);
if (nHeapArena > 0)
{
this.heapArenas = NewArenaArray<byte[]>(nHeapArena);
var metrics = new List<IPoolArenaMetric>(this.heapArenas.Length);
for (int i = 0; i < this.heapArenas.Length; i++)
{
var arena = new HeapArena(this, pageSize, maxOrder, pageShifts, chunkSize);
this.heapArenas[i] = arena;
metrics.Add(arena);
}
this.heapArenaMetrics = metrics.AsReadOnly();
}
else
{
this.heapArenas = null;
this.heapArenaMetrics = new List<IPoolArenaMetric>();
}
}
static PoolArena<T>[] NewArenaArray<T>(int size) => new PoolArena<T>[size];
static int ValidateAndCalculatePageShifts(int pageSize)
{
Contract.Requires(pageSize >= MIN_PAGE_SIZE);
Contract.Requires((pageSize & pageSize - 1) == 0, "Expected power of 2");
// Logarithm base 2. At this point we know that pageSize is a power of two.
return IntegerExtensions.Log2(pageSize);
}
static int ValidateAndCalculateChunkSize(int pageSize, int maxOrder)
{
if (maxOrder > 14)
{
throw new ArgumentException("maxOrder: " + maxOrder + " (expected: 0-14)");
}
// Ensure the resulting chunkSize does not overflow.
int chunkSize = pageSize;
for (int i = maxOrder; i > 0; i--)
{
if (chunkSize > MAX_CHUNK_SIZE >> 1)
{
throw new ArgumentException($"pageSize ({pageSize}) << maxOrder ({maxOrder}) must not exceed {MAX_CHUNK_SIZE}");
}
chunkSize <<= 1;
}
return chunkSize;
}
protected override IByteBuffer NewBuffer(int initialCapacity, int maxCapacity)
{
if (initialCapacity > this.MaxPooledBufSize)
PoolThreadCache<byte[]> cache = this.threadCache.Value;
PoolArena<byte[]> heapArena = cache.HeapArena;
IByteBuffer buf;
if (heapArena != null)
{
return new UnpooledHeapByteBuffer(this, initialCapacity, maxCapacity);
buf = heapArena.Allocate(cache, initialCapacity, maxCapacity);
}
else
{
buf = new UnpooledHeapByteBuffer(this, initialCapacity, maxCapacity);
}
PooledByteBuffer buffer = this.pool.Take();
buffer.Init();
return ToLeakAwareBuffer(buf);
}
return ToLeakAwareBuffer(buffer);
sealed class PoolThreadLocalCache : FastThreadLocal<PoolThreadCache<byte[]>>
{
readonly PooledByteBufferAllocator owner;
public PoolThreadLocalCache(PooledByteBufferAllocator owner)
{
this.owner = owner;
}
protected override PoolThreadCache<byte[]> GetInitialValue()
{
lock (this)
{
PoolArena<byte[]> heapArena = this.GetLeastUsedArena(this.owner.heapArenas);
return new PoolThreadCache<byte[]>(
heapArena, this.owner.tinyCacheSize, this.owner.smallCacheSize, this.owner.normalCacheSize,
DEFAULT_MAX_CACHED_BUFFER_CAPACITY, DEFAULT_CACHE_TRIM_INTERVAL);
}
}
protected override void OnRemoval(PoolThreadCache<byte[]> threadCache) => threadCache.Free();
PoolArena<T> GetLeastUsedArena<T>(PoolArena<T>[] arenas)
{
if (arenas == null || arenas.Length == 0)
{
return null;
}
PoolArena<T> minArena = arenas[0];
for (int i = 1; i < arenas.Length; i++)
{
PoolArena<T> arena = arenas[i];
if (arena.NumThreadCaches < minArena.NumThreadCaches)
{
minArena = arena;
}
}
return minArena;
}
}
/**
* Return the number of heap arenas.
*/
public int NumHeapArenas() => this.heapArenaMetrics.Count;
/**
* Return a {@link List} of all heap {@link PoolArenaMetric}s that are provided by this pool.
*/
public IReadOnlyList<IPoolArenaMetric> HeapArenas() => this.heapArenaMetrics;
/**
* Return the number of thread local caches used by this {@link PooledByteBufferAllocator}.
*/
public int NumThreadLocalCaches()
{
PoolArena<byte[]>[] arenas = this.heapArenas;
if (arenas == null)
{
return 0;
}
int total = 0;
for (int i = 0; i < arenas.Length; i++)
{
total += arenas[i].NumThreadCaches;
}
return total;
}
/// Return the size of the tiny cache.
public int TinyCacheSize => this.tinyCacheSize;
/// Return the size of the small cache.
public int SmallCacheSize => this.smallCacheSize;
/// Return the size of the normal cache.
public int NormalCacheSize => this.normalCacheSize;
internal PoolThreadCache<T> ThreadCache<T>() => (PoolThreadCache<T>)(object)this.threadCache.Value;
/// Returns the status of the allocator (which contains all metrics) as string. Be aware this may be expensive
/// and so should not called too frequently.
public string DumpStats()
{
int heapArenasLen = this.heapArenas?.Length ?? 0;
StringBuilder buf = new StringBuilder(512)
.Append(heapArenasLen)
.Append(" heap arena(s):")
.Append(StringUtil.Newline);
if (heapArenasLen > 0)
{
foreach (PoolArena<byte[]> a in this.heapArenas)
{
buf.Append(a);
}
}
return buf.ToString();
}
}
}

Просмотреть файл

@ -0,0 +1,207 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers
{
using System.IO;
using System.Threading;
using System.Threading.Tasks;
using DotNetty.Common;
using DotNetty.Common.Utilities;
sealed class PooledHeapByteBuffer : PooledByteBuffer<byte[]>
{
static readonly ThreadLocalPool<PooledHeapByteBuffer> Recycler = new ThreadLocalPool<PooledHeapByteBuffer>(handle => new PooledHeapByteBuffer(handle, 0));
internal static PooledHeapByteBuffer NewInstance(int maxCapacity)
{
PooledHeapByteBuffer buf = Recycler.Take();
buf.SetReferenceCount(1); // todo: reuse method?
buf.MaxCapacity = maxCapacity;
buf.SetIndex(0, 0);
buf.DiscardMarkers();
return buf;
}
PooledHeapByteBuffer(ThreadLocalPool.Handle recyclerHandle, int maxCapacity)
: base(recyclerHandle, maxCapacity)
{
}
protected override byte _GetByte(int index) => this.Memory[this.Idx(index)];
protected override short _GetShort(int index)
{
index = this.Idx(index);
return (short)(this.Memory[index] << 8 | this.Memory[index + 1] & 0xFF);
}
protected override int _GetInt(int index)
{
index = this.Idx(index);
return (this.Memory[index] & 0xff) << 24 |
(this.Memory[index + 1] & 0xff) << 16 |
(this.Memory[index + 2] & 0xff) << 8 |
this.Memory[index + 3] & 0xff;
}
protected override long _GetLong(int index)
{
index = this.Idx(index);
return ((long)this.Memory[index] & 0xff) << 56 |
((long)this.Memory[index + 1] & 0xff) << 48 |
((long)this.Memory[index + 2] & 0xff) << 40 |
((long)this.Memory[index + 3] & 0xff) << 32 |
((long)this.Memory[index + 4] & 0xff) << 24 |
((long)this.Memory[index + 5] & 0xff) << 16 |
((long)this.Memory[index + 6] & 0xff) << 8 |
(long)this.Memory[index + 7] & 0xff;
}
public override IByteBuffer GetBytes(int index, IByteBuffer dst, int dstIndex, int length)
{
this.CheckDstIndex(index, length, dstIndex, dst.Capacity);
if (dst.HasArray)
{
this.GetBytes(index, dst.Array, dst.ArrayOffset + dstIndex, length);
}
else
{
dst.SetBytes(dstIndex, this.Memory, this.Idx(index), length);
}
return this;
}
public override IByteBuffer GetBytes(int index, byte[] dst, int dstIndex, int length)
{
this.CheckDstIndex(index, length, dstIndex, dst.Length);
System.Array.Copy(this.Memory, this.Idx(index), dst, dstIndex, length);
return this;
}
public override IByteBuffer GetBytes(int index, Stream destination, int length)
{
this.CheckIndex(index, length);
destination.Write(this.Memory, this.Idx(index), length);
return this;
}
protected override void _SetByte(int index, int value) => this.Memory[this.Idx(index)] = (byte)value;
protected override void _SetShort(int index, int value)
{
index = this.Idx(index);
this.Memory[index] = (byte)value.RightUShift(8);
this.Memory[index + 1] = (byte)value;
}
protected override void _SetInt(int index, int value)
{
index = this.Idx(index);
this.Memory[index] = (byte)value.RightUShift(24);
this.Memory[index + 1] = (byte)value.RightUShift(16);
this.Memory[index + 2] = (byte)value.RightUShift(8);
this.Memory[index + 3] = (byte)value;
}
protected override void _SetLong(int index, long value)
{
index = this.Idx(index);
this.Memory[index] = (byte)value.RightUShift(56);
this.Memory[index + 1] = (byte)value.RightUShift(48);
this.Memory[index + 2] = (byte)value.RightUShift(40);
this.Memory[index + 3] = (byte)value.RightUShift(32);
this.Memory[index + 4] = (byte)value.RightUShift(24);
this.Memory[index + 5] = (byte)value.RightUShift(16);
this.Memory[index + 6] = (byte)value.RightUShift(8);
this.Memory[index + 7] = (byte)value;
}
public override IByteBuffer SetBytes(int index, IByteBuffer src, int srcIndex, int length)
{
this.CheckSrcIndex(index, length, srcIndex, src.Capacity);
if (src.HasArray)
{
this.SetBytes(index, src.Array, src.ArrayOffset + srcIndex, length);
}
else
{
src.GetBytes(srcIndex, this.Memory, this.Idx(index), length);
}
return this;
}
public override async Task<int> SetBytesAsync(int index, Stream src, int length, CancellationToken cancellationToken)
{
int readTotal = 0;
int read;
int offset = this.ArrayOffset + index;
do
{
read = await src.ReadAsync(this.Array, offset + readTotal, length - readTotal, cancellationToken);
readTotal += read;
}
while (read > 0 && readTotal < length);
return readTotal;
}
public override IByteBuffer SetBytes(int index, byte[] src, int srcIndex, int length)
{
this.CheckSrcIndex(index, length, srcIndex, src.Length);
System.Array.Copy(src, srcIndex, this.Memory, this.Idx(index), length);
return this;
}
public override IByteBuffer Copy(int index, int length)
{
this.CheckIndex(index, length);
IByteBuffer copy = this.Allocator.Buffer(length, this.MaxCapacity);
copy.WriteBytes(this.Memory, this.Idx(index), length);
return copy;
}
//public int nioBufferCount()
//{
// return 1;
//}
//public ByteBuffer[] nioBuffers(int index, int length)
//{
// return new ByteBuffer[] { this.nioBuffer(index, length) };
//}
//public ByteBuffer nioBuffer(int index, int length)
//{
// checkIndex(index, length);
// index = idx(index);
// ByteBuffer buf = ByteBuffer.wrap(this.memory, index, length);
// return buf.slice();
//}
//public ByteBuffer internalNioBuffer(int index, int length)
//{
// checkIndex(index, length);
// index = idx(index);
// return (ByteBuffer)internalNioBuffer().clear().position(index).limit(index + length);
//}
public override bool HasArray => true;
public override byte[] Array
{
get
{
this.EnsureAccessible();
return this.Memory;
}
}
public override int ArrayOffset => this.Offset;
//protected ByteBuffer newInternalNioBuffer(byte[] memory)
//{
// return ByteBuffer.wrap(memory);
//}
}
}

Просмотреть файл

@ -0,0 +1,10 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System.Runtime.CompilerServices;
#if !NOTEST
[assembly: InternalsVisibleTo("DotNetty.Buffers.Tests")]
#endif

Просмотреть файл

@ -5,11 +5,11 @@ namespace DotNetty.Buffers
{
using DotNetty.Common;
sealed class SimpleLeakAwareByteBuf : WrappedByteBuffer
sealed class SimpleLeakAwareByteBuffer : WrappedByteBuffer
{
readonly IResourceLeak leak;
internal SimpleLeakAwareByteBuf(IByteBuffer buf, IResourceLeak leak)
internal SimpleLeakAwareByteBuffer(IByteBuffer buf, IResourceLeak leak)
: base(buf)
{
this.leak = leak;
@ -54,28 +54,28 @@ namespace DotNetty.Buffers
}
else
{
return new SimpleLeakAwareByteBuf(base.WithOrder(endianness), this.leak);
return new SimpleLeakAwareByteBuffer(base.WithOrder(endianness), this.leak);
}
}
public override IByteBuffer Slice()
{
return new SimpleLeakAwareByteBuf(base.Slice(), this.leak);
return new SimpleLeakAwareByteBuffer(base.Slice(), this.leak);
}
public override IByteBuffer Slice(int index, int length)
{
return new SimpleLeakAwareByteBuf(base.Slice(index, length), this.leak);
return new SimpleLeakAwareByteBuffer(base.Slice(index, length), this.leak);
}
public override IByteBuffer Duplicate()
{
return new SimpleLeakAwareByteBuf(base.Duplicate(), this.leak);
return new SimpleLeakAwareByteBuffer(base.Duplicate(), this.leak);
}
public override IByteBuffer ReadSlice(int length)
{
return new SimpleLeakAwareByteBuf(base.ReadSlice(length), this.leak);
return new SimpleLeakAwareByteBuffer(base.ReadSlice(length), this.leak);
}
}
}

Просмотреть файл

@ -648,6 +648,34 @@ namespace DotNetty.Buffers
return this.buf.ForEachByteDesc(index, length, processor);
}
public override int GetHashCode()
{
return this.buf.GetHashCode();
}
public override bool Equals(object obj)
{
return this.Equals(obj as IByteBuffer);
}
public bool Equals(IByteBuffer buffer)
{
if (ReferenceEquals(this, buffer))
{
return true;
}
if (buffer != null)
{
return ByteBufferUtil.Equals(this, buffer);
}
return false;
}
public int CompareTo(IByteBuffer buffer)
{
return ByteBufferUtil.Compare(this, buffer);
}
public override string ToString()
{
return "Swapped(" + this.buf + ")";

Просмотреть файл

@ -554,26 +554,6 @@ namespace DotNetty.Buffers
// return this.buf.BytesBefore(index, length, value);
//}
//public virtual int ForEachByte(ByteProcessor processor)
//{
// return this.buf.ForEachByte(processor);
//}
//public virtual int ForEachByte(int index, int length, ByteProcessor processor)
//{
// return this.buf.ForEachByte(index, length, processor);
//}
//public virtual int ForEachByteDesc(ByteProcessor processor)
//{
// return this.buf.ForEachByteDesc(processor);
//}
//public virtual int ForEachByteDesc(int index, int length, ByteProcessor processor)
//{
// return this.buf.ForEachByteDesc(index, length, processor);
//}
public virtual IByteBuffer Copy()
{
return this.Buf.Copy();
@ -610,17 +590,6 @@ namespace DotNetty.Buffers
public virtual int ArrayOffset => this.Buf.ArrayOffset;
// todo: port: complete
// public virtual String toString(Charset charset)
// {
// return buf.toString(charset);
// }
//public virtual String toString(int index, int length, Charset charset)
// {
// return buf.ToString(index, length, charset);
// }
public override int GetHashCode()
{
return this.Buf.GetHashCode();
@ -631,11 +600,15 @@ namespace DotNetty.Buffers
return this.Buf.Equals(obj);
}
// todo: port: complete
//public virtual int CompareTo(IByteBuffer buffer)
//{
// return this.buf.CompareTo(buffer);
//}
public bool Equals(IByteBuffer buffer)
{
return this.Buf.Equals(buffer);
}
public virtual int CompareTo(IByteBuffer buffer)
{
return this.Buf.CompareTo(buffer);
}
public override string ToString()
{
@ -690,22 +663,22 @@ namespace DotNetty.Buffers
public int ForEachByte(ByteProcessor processor)
{
return this.ForEachByte(processor);
return this.Buf.ForEachByte(processor);
}
public int ForEachByte(int index, int length, ByteProcessor processor)
{
return this.ForEachByte(index, length, processor);
return this.Buf.ForEachByte(index, length, processor);
}
public int ForEachByteDesc(ByteProcessor processor)
{
return this.ForEachByteDesc(processor);
return this.Buf.ForEachByteDesc(processor);
}
public int ForEachByteDesc(int index, int length, ByteProcessor processor)
{
return this.ForEachByteDesc(processor);
return this.Buf.ForEachByteDesc(processor);
}
public virtual string ToString(Encoding encoding)

Просмотреть файл

@ -96,7 +96,7 @@ namespace DotNetty.Common.Concurrency
public override void Execute(IRunnable task)
{
this.taskQueue.Enqueue(task);
this.taskQueue.TryEnqueue(task);
if (!this.InEventLoop)
{
@ -371,7 +371,7 @@ namespace DotNetty.Common.Concurrency
break;
}
this.taskQueue.Enqueue(scheduledTask);
this.taskQueue.TryEnqueue(scheduledTask);
}
}
}

Просмотреть файл

@ -161,6 +161,13 @@
<Compile Include="Concurrency\StateActionWithContextScheduledAsyncTask.cs" />
<Compile Include="Concurrency\StateActionWithContextScheduledTask.cs" />
<Compile Include="Deque.cs" />
<Compile Include="FastThreadLocal.cs" />
<Compile Include="InternalThreadLocalMap.cs" />
<Compile Include="Internal\AbstractQueue.cs" />
<Compile Include="Internal\IQueue.cs" />
<Compile Include="Internal\ConcurrentCircularArrayQueue.cs" />
<Compile Include="Internal\MpscArrayQueue.cs" />
<Compile Include="Internal\PlatformDependent.cs" />
<Compile Include="Properties\Friends.cs" />
<Compile Include="Concurrency\AbstractEventExecutor.cs" />
<Compile Include="Concurrency\AbstractScheduledEventExecutor.cs" />
@ -185,15 +192,18 @@
<Compile Include="IResourceLeak.cs" />
<Compile Include="ResourceLeakDetector.cs" />
<Compile Include="IResourceLeakHint.cs" />
<Compile Include="ThreadDeathWatcher.cs" />
<Compile Include="ThreadLocalObjectList.cs" />
<Compile Include="ThreadLocalPool.cs" />
<Compile Include="PreciseTimeSpan.cs" />
<Compile Include="Utilities\AtomicReference.cs" />
<Compile Include="Utilities\BitOps.cs" />
<Compile Include="Utilities\ByteArrayExtensions.cs" />
<Compile Include="Utilities\ArrayExtensions.cs" />
<Compile Include="Utilities\DebugExtensions.cs" />
<Compile Include="Utilities\IntegerExtensions.cs" />
<Compile Include="Utilities\MpscLinkedQueue.cs" />
<Compile Include="Utilities\PriorityQueue.cs" />
<Compile Include="Utilities\RandomExtensions.cs" />
<Compile Include="Utilities\RecyclableMpscLinkedQueueNode.cs" />
<Compile Include="Utilities\ReferenceCountUtil.cs" />
<Compile Include="Internal\SystemPropertyUtil.cs" />

Просмотреть файл

@ -0,0 +1,210 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Common
{
using System.Collections.Generic;
using System.Runtime.CompilerServices;
public abstract class FastThreadLocal
{
static readonly int VariablesToRemoveIndex = InternalThreadLocalMap.NextVariableIndex();
/// <summary>
/// Removes all {@link FastThreadLocal} variables bound to the current thread. This operation is useful when you
/// are in a container environment, and you don't want to leave the thread local variables in the threads you do not
/// manage.
/// </summary>
public static void RemoveAll()
{
InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.GetIfSet();
if (threadLocalMap == null)
{
return;
}
try
{
object v = threadLocalMap.GetIndexedVariable(VariablesToRemoveIndex);
if (v != null && v != InternalThreadLocalMap.Unset)
{
var variablesToRemove = (HashSet<FastThreadLocal>)v;
foreach (FastThreadLocal tlv in variablesToRemove) // todo: do we need to make a snapshot?
{
tlv.Remove(threadLocalMap);
}
}
}
finally
{
InternalThreadLocalMap.Remove();
}
}
/// Destroys the data structure that keeps all {@link FastThreadLocal} variables accessed from
/// non-{@link FastThreadLocalThread}s. This operation is useful when you are in a container environment, and you
/// do not want to leave the thread local variables in the threads you do not manage. Call this method when your
/// application is being unloaded from the container.
public static void Destroy()
{
InternalThreadLocalMap.Destroy();
}
protected static void AddToVariablesToRemove(InternalThreadLocalMap threadLocalMap, FastThreadLocal variable)
{
object v = threadLocalMap.GetIndexedVariable(VariablesToRemoveIndex);
HashSet<FastThreadLocal> variablesToRemove;
if (v == InternalThreadLocalMap.Unset || v == null)
{
variablesToRemove = new HashSet<FastThreadLocal>(); // Collections.newSetFromMap(new IdentityHashMap<FastThreadLocal<?>, Boolean>());
threadLocalMap.SetIndexedVariable(VariablesToRemoveIndex, variablesToRemove);
}
else
{
variablesToRemove = (HashSet<FastThreadLocal>)v;
}
variablesToRemove.Add(variable);
}
protected static void RemoveFromVariablesToRemove(InternalThreadLocalMap threadLocalMap, FastThreadLocal variable)
{
object v = threadLocalMap.GetIndexedVariable(VariablesToRemoveIndex);
if (v == InternalThreadLocalMap.Unset || v == null)
{
return;
}
var variablesToRemove = (HashSet<FastThreadLocal>)v;
variablesToRemove.Remove(variable);
}
/// <summary>
/// Sets the value to uninitialized; a proceeding call to get() will trigger a call to GetInitialValue().
/// </summary>
/// <param name="threadLocalMap"></param>
public abstract void Remove(InternalThreadLocalMap threadLocalMap);
}
public class FastThreadLocal<T> : FastThreadLocal
where T : class
{
readonly int index;
/// <summary>
/// Returns the number of thread local variables bound to the current thread.
/// </summary>
public static int Count => InternalThreadLocalMap.GetIfSet()?.Count ?? 0;
public FastThreadLocal()
{
this.index = InternalThreadLocalMap.NextVariableIndex();
}
/// <summary>
/// Gets or sets current value for the current thread.
/// </summary>
public T Value
{
get { return this.Get(InternalThreadLocalMap.Get()); }
set { this.Set(InternalThreadLocalMap.Get(), value); }
}
/// <summary>
/// Returns the current value for the specified thread local map.
/// The specified thread local map must be for the current thread.
/// </summary>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public T Get(InternalThreadLocalMap threadLocalMap)
{
object v = threadLocalMap.GetIndexedVariable(this.index);
if (v != InternalThreadLocalMap.Unset)
{
return (T)v;
}
return this.Initialize(threadLocalMap);
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
T Initialize(InternalThreadLocalMap threadLocalMap)
{
T v = this.GetInitialValue();
threadLocalMap.SetIndexedVariable(this.index, v);
AddToVariablesToRemove(threadLocalMap, this);
return v;
}
/// <summary>
/// Set the value for the specified thread local map. The specified thread local map must be for the current thread.
/// </summary>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Set(InternalThreadLocalMap threadLocalMap, T value)
{
if (threadLocalMap.SetIndexedVariable(this.index, value))
{
AddToVariablesToRemove(threadLocalMap, this);
}
}
/// <summary>
/// Returns {@code true} if and only if this thread-local variable is set.
/// </summary>
public bool IsSet()
{
return this.IsSet(InternalThreadLocalMap.GetIfSet());
}
/// <summary>
/// Returns {@code true} if and only if this thread-local variable is set.
/// The specified thread local map must be for the current thread.
/// </summary>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public bool IsSet(InternalThreadLocalMap threadLocalMap)
{
return threadLocalMap != null && threadLocalMap.IsIndexedVariableSet(this.index);
}
/// <summary>
/// Returns the initial value for this thread-local variable.
/// </summary>
protected virtual T GetInitialValue()
{
return null;
}
public void Remove()
{
this.Remove(InternalThreadLocalMap.GetIfSet());
}
/// Sets the value to uninitialized for the specified thread local map;
/// a proceeding call to get() will trigger a call to GetInitialValue().
/// The specified thread local map must be for the current thread.
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public sealed override void Remove(InternalThreadLocalMap threadLocalMap)
{
if (threadLocalMap == null)
{
return;
}
object v = threadLocalMap.RemoveIndexedVariable(this.index);
RemoveFromVariablesToRemove(threadLocalMap, this);
if (v != InternalThreadLocalMap.Unset)
{
this.OnRemoval((T)v);
}
}
/// <summary>
/// Invoked when this thread local variable is removed by {@link #remove()}.
/// </summary>
protected virtual void OnRemoval(T value)
{
}
}
}

Просмотреть файл

@ -0,0 +1,20 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Common.Internal
{
public abstract class AbstractQueue<T> : IQueue<T>
{
public abstract bool TryEnqueue(T element);
public abstract T Dequeue();
public abstract T Peek();
public abstract int Count { get; }
public abstract bool IsEmpty { get; }
public abstract void Clear();
}
}

Просмотреть файл

@ -0,0 +1,146 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Common.Internal
{
using System;
using System.Threading;
using DotNetty.Common.Utilities;
/// Forked from
/// <a href="https://github.com/JCTools/JCTools">JCTools</a>
/// .
/// A concurrent access enabling class used by circular array based queues this class exposes an offset computation
/// method along with differently memory fenced load/store methods into the underlying array. The class is pre-padded and
/// the array is padded on either side to help with False sharing prvention. It is expected theat subclasses handle post
/// padding.
/// <p />
/// Offset calculation is separate from access to enable the reuse of a give compute offset.
/// <p />
/// Load/Store methods using a
/// <i>buffer</i>
/// parameter are provided to allow the prevention of field reload after a
/// LoadLoad barrier.
/// <p />
/// @param
/// <E>
abstract class ConcurrentCircularArrayQueue<T> : ConcurrentCircularArrayQueueL0Pad<T>
where T : class
{
protected static readonly int RefBufferPad = (64 * 2) / IntPtr.Size;
protected long Mask;
protected readonly T[] Buffer;
protected ConcurrentCircularArrayQueue(int capacity)
{
int actualCapacity = IntegerExtensions.RoundUpToPowerOfTwo(capacity);
this.Mask = actualCapacity - 1;
// pad data on either end with some empty slots.
this.Buffer = new T[actualCapacity + RefBufferPad * 2];
}
/// @param index desirable element index
/// @return the offset in bytes within the array for a given index.
protected long CalcElementOffset(long index)
{
return CalcElementOffset(index, this.Mask);
}
/// @param index desirable element index
/// @param mask
/// @return the offset in bytes within the array for a given index.
protected static long CalcElementOffset(long index, long mask)
{
return RefBufferPad + (index & mask);
}
/// A plain store (no ordering/fences) of an element to a given offset
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
/// @param e a kitty
protected void SpElement(long offset, T e)
{
SpElement(this.Buffer, offset, e);
}
/// A plain store (no ordering/fences) of an element to a given offset
/// @param buffer this.buffer
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
/// @param e an orderly kitty
protected static void SpElement(T[] buffer, long offset, T e)
{
buffer[offset] = e;
}
/// An ordered store(store + StoreStore barrier) of an element to a given offset
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
/// @param e an orderly kitty
protected void SoElement(long offset, T e)
{
SoElement(this.Buffer, offset, e);
}
/// An ordered store(store + StoreStore barrier) of an element to a given offset
/// @param buffer this.buffer
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
/// @param e an orderly kitty
protected static void SoElement(T[] buffer, long offset, T e)
{
Volatile.Write(ref buffer[offset], e);
}
/// A plain load (no ordering/fences) of an element from a given offset.
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
/// @return the element at the offset
protected T LpElement(long offset)
{
return LpElement(this.Buffer, offset);
}
/// A plain load (no ordering/fences) of an element from a given offset.
/// @param buffer this.buffer
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
/// @return the element at the offset
protected static T LpElement(T[] buffer, long offset)
{
return buffer[offset];
}
/// A volatile load (load + LoadLoad barrier) of an element from a given offset.
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
/// @return the element at the offset
protected T LvElement(long offset)
{
return LvElement(this.Buffer, offset);
}
/// A volatile load (load + LoadLoad barrier) of an element from a given offset.
/// @param buffer this.buffer
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
/// @return the element at the offset
protected static T LvElement(T[] buffer, long offset)
{
return Volatile.Read(ref buffer[offset]);
}
public override void Clear()
{
while (this.Dequeue() != null || !this.IsEmpty)
{
// looping
}
}
public int Capacity()
{
return (int)(this.Mask + 1);
}
}
abstract class ConcurrentCircularArrayQueueL0Pad<T> : AbstractQueue<T>
{
#pragma warning disable 169 // padded reference
long p00, p01, p02, p03, p04, p05, p06, p07;
long p30, p31, p32, p33, p34, p35, p36, p37;
#pragma warning restore 169
}
}

Просмотреть файл

@ -0,0 +1,20 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Common.Internal
{
public interface IQueue<T>
{
bool TryEnqueue(T element);
T Dequeue();
T Peek();
int Count { get; }
bool IsEmpty { get; }
void Clear();
}
}

Просмотреть файл

@ -0,0 +1,331 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Common.Internal
{
using System.Diagnostics.Contracts;
using System.Threading;
/// Forked from
/// <a href="https://github.com/JCTools/JCTools">JCTools</a>
/// .
/// A Multi-Producer-Single-Consumer queue based on a {@link ConcurrentCircularArrayQueue}. This implies that
/// any thread may call the offer method, but only a single thread may call poll/peek for correctness to
/// maintained.
/// <br />
/// This implementation follows patterns documented on the package level for False Sharing protection.
/// <br />
/// This implementation is using the
/// <a href="http://sourceforge.net/projects/mc-fastflow/">Fast Flow</a>
/// method for polling from the queue (with minor change to correctly publish the index) and an extension of
/// the Leslie Lamport concurrent queue algorithm (originated by Martin Thompson) on the producer side.
/// <br />
/// @param <E>
sealed class MpscArrayQueue<T> : MpscArrayQueueConsumerField<T>
where T : class
{
#pragma warning disable 169 // padded reference
long p40, p41, p42, p43, p44, p45, p46;
long p30, p31, p32, p33, p34, p35, p36, p37;
#pragma warning restore 169
public MpscArrayQueue(int capacity)
: base(capacity)
{
}
/// {@inheritDoc}
/// <br />
/// IMPLEMENTATION NOTES:
/// <br />
/// Lock free offer using a single CAS. As class name suggests access is permitted to many threads
/// concurrently.
/// @see java.util.Queue#offer(java.lang.Object)
public override bool TryEnqueue(T e)
{
Contract.Requires(e != null);
// use a cached view on consumer index (potentially updated in loop)
long mask = this.Mask;
long capacity = mask + 1;
long consumerIndexCache = this.ConsumerIndexCache; // LoadLoad
long currentProducerIndex;
do
{
currentProducerIndex = this.ProducerIndex; // LoadLoad
long wrapPoint = currentProducerIndex - capacity;
if (consumerIndexCache <= wrapPoint)
{
long currHead = this.ConsumerIndex; // LoadLoad
if (currHead <= wrapPoint)
{
return false; // FULL :(
}
else
{
// update shared cached value of the consumerIndex
this.ConsumerIndexCache = currHead; // StoreLoad
// update on stack copy, we might need this value again if we lose the CAS.
consumerIndexCache = currHead;
}
}
}
while (!this.TrySetProducerIndex(currentProducerIndex, currentProducerIndex + 1));
// NOTE: the new producer index value is made visible BEFORE the element in the array. If we relied on
// the index visibility to poll() we would need to handle the case where the element is not visible.
// Won CAS, move on to storing
long offset = CalcElementOffset(currentProducerIndex, mask);
this.SoElement(offset, e); // StoreStore
return true; // AWESOME :)
}
/// A wait free alternative to offer which fails on CAS failure.
/// @param e new element, not null
/// @return 1 if next element cannot be filled, -1 if CAS failed, 0 if successful
public int WeakEnqueue(T e)
{
Contract.Requires(e != null);
long mask = this.Mask;
long capacity = mask + 1;
long currentTail = this.ProducerIndex; // LoadLoad
long consumerIndexCache = this.ConsumerIndexCache; // LoadLoad
long wrapPoint = currentTail - capacity;
if (consumerIndexCache <= wrapPoint)
{
long currHead = this.ConsumerIndex; // LoadLoad
if (currHead <= wrapPoint)
{
return 1; // FULL :(
}
else
{
this.ConsumerIndexCache = currHead; // StoreLoad
}
}
// look Ma, no loop!
if (!this.TrySetProducerIndex(currentTail, currentTail + 1))
{
return -1; // CAS FAIL :(
}
// Won CAS, move on to storing
long offset = CalcElementOffset(currentTail, mask);
this.SoElement(offset, e);
return 0; // AWESOME :)
}
/// {@inheritDoc}
/// <p />
/// IMPLEMENTATION NOTES:
/// <br />
/// Lock free poll using ordered loads/stores. As class name suggests access is limited to a single thread.
/// @see java.util.Queue#poll()
public override T Dequeue()
{
long consumerIndex = this.ConsumerIndex; // LoadLoad
long offset = this.CalcElementOffset(consumerIndex);
// Copy field to avoid re-reading after volatile load
T[] buffer = this.Buffer;
// If we can't see the next available element we can't poll
T e = LvElement(buffer, offset); // LoadLoad
if (null == e)
{
// NOTE: Queue may not actually be empty in the case of a producer (P1) being interrupted after
// winning the CAS on offer but before storing the element in the queue. Other producers may go on
// to fill up the queue after this element.
if (consumerIndex != this.ProducerIndex)
{
do
{
e = LvElement(buffer, offset);
}
while (e == null);
}
else
{
return default(T);
}
}
SpElement(buffer, offset, default(T));
this.ConsumerIndex = consumerIndex + 1; // StoreStore
return e;
}
/// {@inheritDoc}
/// <p />
/// IMPLEMENTATION NOTES:
/// <br />
/// Lock free peek using ordered loads. As class name suggests access is limited to a single thread.
/// @see java.util.Queue#poll()
public override T Peek()
{
// Copy field to avoid re-reading after volatile load
T[] buffer = this.Buffer;
long consumerIndex = this.ConsumerIndex; // LoadLoad
long offset = this.CalcElementOffset(consumerIndex);
T e = LvElement(buffer, offset);
if (null == e)
{
// NOTE: Queue may not actually be empty in the case of a producer (P1) being interrupted after
// winning the CAS on offer but before storing the element in the queue. Other producers may go on
// to fill up the queue after this element.
if (consumerIndex != this.ProducerIndex)
{
do
{
e = LvElement(buffer, offset);
}
while (e == null);
}
else
{
return default(T);
}
}
return e;
}
/// {@inheritDoc}
public override int Count
{
get
{
// It is possible for a thread to be interrupted or reschedule between the read of the producer and
// consumer indices, therefore protection is required to ensure size is within valid range. In the
// event of concurrent polls/offers to this method the size is OVER estimated as we read consumer
// index BEFORE the producer index.
long after = this.ConsumerIndex;
while (true)
{
long before = after;
long currentProducerIndex = this.ProducerIndex;
after = this.ConsumerIndex;
if (before == after)
{
return (int)(currentProducerIndex - after);
}
}
}
}
public override bool IsEmpty
{
get
{
// Order matters!
// Loading consumer before producer allows for producer increments after consumer index is read.
// This ensures the correctness of this method at least for the consumer thread. Other threads POV is
// not really
// something we can fix here.
return this.ConsumerIndex == this.ProducerIndex;
}
}
}
abstract class MpscArrayQueueL1Pad<T> : ConcurrentCircularArrayQueue<T>
where T : class
{
#pragma warning disable 169 // padded reference
long p10, p11, p12, p13, p14, p15, p16;
long p30, p31, p32, p33, p34, p35, p36, p37;
#pragma warning restore 169
protected MpscArrayQueueL1Pad(int capacity)
: base(capacity)
{
}
}
abstract class MpscArrayQueueTailField<T> : MpscArrayQueueL1Pad<T>
where T : class
{
long producerIndex;
protected MpscArrayQueueTailField(int capacity)
: base(capacity)
{
}
protected long ProducerIndex
{
get { return Volatile.Read(ref this.producerIndex); }
}
protected bool TrySetProducerIndex(long expect, long newValue)
{
return Interlocked.CompareExchange(ref this.producerIndex, newValue, expect) == expect;
}
}
abstract class MpscArrayQueueMidPad<T> : MpscArrayQueueTailField<T>
where T : class
{
#pragma warning disable 169 // padded reference
long p20, p21, p22, p23, p24, p25, p26;
long p30, p31, p32, p33, p34, p35, p36, p37;
#pragma warning restore 169
protected MpscArrayQueueMidPad(int capacity)
: base(capacity)
{
}
}
abstract class MpscArrayQueueHeadCacheField<T> : MpscArrayQueueMidPad<T>
where T : class
{
long headCache;
protected MpscArrayQueueHeadCacheField(int capacity)
: base(capacity)
{
}
protected long ConsumerIndexCache
{
get { return Volatile.Read(ref this.headCache); }
set { Volatile.Write(ref this.headCache, value); }
}
}
abstract class MpscArrayQueueL2Pad<T> : MpscArrayQueueHeadCacheField<T>
where T : class
{
#pragma warning disable 169 // padded reference
long p20, p21, p22, p23, p24, p25, p26;
long p30, p31, p32, p33, p34, p35, p36, p37;
#pragma warning restore 169
protected MpscArrayQueueL2Pad(int capacity)
: base(capacity)
{
}
}
abstract class MpscArrayQueueConsumerField<T> : MpscArrayQueueL2Pad<T>
where T : class
{
long consumerIndex;
protected MpscArrayQueueConsumerField(int capacity)
: base(capacity)
{
}
protected long ConsumerIndex
{
get { return Volatile.Read(ref this.consumerIndex); }
set { Volatile.Write(ref this.consumerIndex, value); } // todo: revisit: UNSAFE.putOrderedLong -- StoreStore fence
}
}
}

Просмотреть файл

@ -0,0 +1,22 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Common.Internal
{
using DotNetty.Common.Utilities;
public static class PlatformDependent
{
public static IQueue<T> NewFixedMpscQueue<T>(int capacity)
where T : class
{
return new MpscArrayQueue<T>(capacity);
}
public static IQueue<T> NewMpscQueue<T>()
where T : class
{
return new MpscLinkedQueue<T>();
}
}
}

Просмотреть файл

@ -0,0 +1,220 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Common
{
using System;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading;
using DotNetty.Common.Utilities;
/// <summary>
/// The internal data structure that stores the thread-local variables for Netty and all {@link FastThreadLocal}s.
/// Note that this class is for internal use only and is subject to change at any time. Use {@link FastThreadLocal}
/// unless you know what you are doing.
/// </summary>
public sealed class InternalThreadLocalMap
{
public static readonly object Unset = new object();
[ThreadStatic]
static InternalThreadLocalMap slowThreadLocalMap;
static int nextIndex;
/// Used by {@link FastThreadLocal}
object[] indexedVariables;
// Core thread-locals
int futureListenerStackDepth;
int localChannelReaderStackDepth;
// String-related thread-locals
StringBuilder stringBuilder;
internal static int NextVariableIndex()
{
int index = Interlocked.Increment(ref nextIndex);
if (index < 0)
{
Interlocked.Decrement(ref nextIndex);
throw new InvalidOperationException("too many thread-local indexed variables");
}
return index;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static InternalThreadLocalMap GetIfSet()
{
return slowThreadLocalMap;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static InternalThreadLocalMap Get()
{
InternalThreadLocalMap ret = slowThreadLocalMap;
if (ret == null)
{
ret = new InternalThreadLocalMap();
slowThreadLocalMap = ret;
}
return ret;
}
public static void Remove()
{
slowThreadLocalMap = null;
}
public static void Destroy()
{
slowThreadLocalMap = null;
}
// Cache line padding (must be public)
// With CompressedOops enabled, an instance of this class should occupy at least 128 bytes.
public long rp1, rp2, rp3, rp4, rp5, rp6, rp7, rp8, rp9;
InternalThreadLocalMap()
{
this.indexedVariables = CreateIndexedVariableTable();
}
static object[] CreateIndexedVariableTable()
{
var array = new object[32];
array.Fill(Unset);
return array;
}
public int Count
{
get
{
int count = 0;
if (this.futureListenerStackDepth != 0)
{
count++;
}
if (this.localChannelReaderStackDepth != 0)
{
count++;
}
if (this.stringBuilder != null)
{
count++;
}
foreach (object o in this.indexedVariables)
{
if (o != Unset)
{
count++;
}
}
// We should subtract 1 from the count because the first element in 'indexedVariables' is reserved
// by 'FastThreadLocal' to keep the list of 'FastThreadLocal's to remove on 'FastThreadLocal.RemoveAll()'.
return count - 1;
}
}
public StringBuilder StringBuilder
{
get
{
StringBuilder builder = this.stringBuilder;
if (builder == null)
{
this.stringBuilder = builder = new StringBuilder(512);
}
else
{
builder.Length = 0;
}
return builder;
}
}
public int FutureListenerStackDepth
{
get { return this.futureListenerStackDepth; }
set { this.futureListenerStackDepth = value; }
}
public int LocalChannelReaderStackDepth
{
get { return this.localChannelReaderStackDepth; }
set { this.localChannelReaderStackDepth = value; }
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public object GetIndexedVariable(int index)
{
object[] lookup = this.indexedVariables;
return index < lookup.Length ? lookup[index] : Unset;
}
/**
* @return {@code true} if and only if a new thread-local variable has been created
*/
public bool SetIndexedVariable(int index, object value)
{
object[] lookup = this.indexedVariables;
if (index < lookup.Length)
{
object oldValue = lookup[index];
lookup[index] = value;
return oldValue == Unset;
}
else
{
this.ExpandIndexedVariableTableAndSet(index, value);
return true;
}
}
void ExpandIndexedVariableTableAndSet(int index, object value)
{
object[] oldArray = this.indexedVariables;
int oldCapacity = oldArray.Length;
int newCapacity = index;
newCapacity |= newCapacity.RightUShift(1);
newCapacity |= newCapacity.RightUShift(2);
newCapacity |= newCapacity.RightUShift(4);
newCapacity |= newCapacity.RightUShift(8);
newCapacity |= newCapacity.RightUShift(16);
newCapacity++;
var newArray = new object[newCapacity];
oldArray.CopyTo(newArray, 0);
newArray.Fill(oldCapacity, newArray.Length - oldCapacity, Unset);
newArray[index] = value;
this.indexedVariables = newArray;
}
public object RemoveIndexedVariable(int index)
{
object[] lookup = this.indexedVariables;
if (index < lookup.Length)
{
object v = lookup[index];
lookup[index] = Unset;
return v;
}
else
{
return Unset;
}
}
public bool IsIndexedVariableSet(int index)
{
object[] lookup = this.indexedVariables;
return index < lookup.Length && lookup[index] != Unset;
}
}
}

Просмотреть файл

@ -0,0 +1,237 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Common
{
using System;
using System.Collections.Generic;
using System.Diagnostics.Contracts;
using System.Threading;
using DotNetty.Common.Concurrency;
using DotNetty.Common.Internal;
using DotNetty.Common.Internal.Logging;
using DotNetty.Common.Utilities;
public static class ThreadDeathWatcher
{
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance(typeof(ThreadDeathWatcher));
static readonly IQueue<Entry> PendingEntries = PlatformDependent.NewMpscQueue<Entry>();
static readonly Watcher watcher = new Watcher();
static int started;
static volatile Thread watcherThread;
static ThreadDeathWatcher()
{
string poolName = "threadDeathWatcher";
string serviceThreadPrefix = SystemPropertyUtil.Get("io.netty.serviceThreadPrefix");
if (!string.IsNullOrEmpty(serviceThreadPrefix))
{
poolName = serviceThreadPrefix + poolName;
}
}
/**
* Schedules the specified {@code task} to run when the specified {@code thread} dies.
*
* @param thread the {@link Thread} to watch
* @param task the {@link Runnable} to run when the {@code thread} dies
*
* @throws IllegalArgumentException if the specified {@code thread} is not alive
*/
public static void Watch(Thread thread, Action task)
{
Contract.Requires(thread != null);
Contract.Requires(task != null);
Contract.Requires(thread.IsAlive);
Schedule(thread, task, true);
}
/**
* Cancels the task scheduled via {@link #watch(Thread, Runnable)}.
*/
public static void Unwatch(Thread thread, Action task)
{
Contract.Requires(thread != null);
Contract.Requires(task != null);
Schedule(thread, task, false);
}
static void Schedule(Thread thread, Action task, bool isWatch)
{
PendingEntries.TryEnqueue(new Entry(thread, task, isWatch));
if (Interlocked.CompareExchange(ref started, 1, 0) == 0)
{
Thread watcherThread = new Thread(s => ((IRunnable)s).Run());
watcherThread.Start(watcher);
ThreadDeathWatcher.watcherThread = watcherThread;
}
}
/**
* Waits until the thread of this watcher has no threads to watch and terminates itself.
* Because a new watcher thread will be started again on {@link #watch(Thread, Runnable)},
* this operation is only useful when you want to ensure that the watcher thread is terminated
* <strong>after</strong> your application is shut down and there's no chance of calling
* {@link #watch(Thread, Runnable)} afterwards.
*
* @return {@code true} if and only if the watcher thread has been terminated
*/
public static bool AwaitInactivity(TimeSpan timeout)
{
Thread watcherThread = ThreadDeathWatcher.watcherThread;
if (watcherThread != null)
{
watcherThread.Join(timeout);
return !watcherThread.IsAlive;
}
else
{
return true;
}
}
sealed class Watcher : IRunnable
{
readonly List<Entry> watchees = new List<Entry>();
public void Run()
{
for (;;)
{
this.FetchWatchees();
this.NotifyWatchees();
// Try once again just in case notifyWatchees() triggered watch() or unwatch().
this.FetchWatchees();
this.NotifyWatchees();
Thread.Sleep(1000);
if (this.watchees.Count == 0 && PendingEntries.IsEmpty)
{
// Mark the current worker thread as stopped.
// The following CAS must always success and must be uncontended,
// because only one watcher thread should be running at the same time.
bool stopped = Interlocked.CompareExchange(ref started, 0, 1) == 1;
Contract.Assert(stopped);
// Check if there are pending entries added by watch() while we do CAS above.
if (PendingEntries.IsEmpty)
{
// A) watch() was not invoked and thus there's nothing to handle
// -> safe to terminate because there's nothing left to do
// B) a new watcher thread started and handled them all
// -> safe to terminate the new watcher thread will take care the rest
break;
}
// There are pending entries again, added by watch()
if (Interlocked.CompareExchange(ref started, 1, 0) != 0)
{
// watch() started a new watcher thread and set 'started' to true.
// -> terminate this thread so that the new watcher reads from pendingEntries exclusively.
break;
}
// watch() added an entry, but this worker was faster to set 'started' to true.
// i.e. a new watcher thread was not started
// -> keep this thread alive to handle the newly added entries.
}
}
}
void FetchWatchees()
{
for (;;)
{
Entry e = PendingEntries.Dequeue();
if (e == null)
{
break;
}
if (e.IsWatch)
{
this.watchees.Add(e);
}
else
{
this.watchees.Remove(e);
}
}
}
void NotifyWatchees()
{
List<Entry> watchees = this.watchees;
for (int i = 0; i < watchees.Count;)
{
Entry e = watchees[i];
if (!e.Thread.IsAlive)
{
watchees.RemoveAt(i);
try
{
e.Task();
}
catch (Exception t)
{
Logger.Warn("Thread death watcher task raised an exception:", t);
}
}
else
{
i++;
}
}
}
}
sealed class Entry : MpscLinkedQueueNode<Entry>
{
internal readonly Thread Thread;
internal readonly Action Task;
internal readonly bool IsWatch;
public Entry(Thread thread, Action task, bool isWatch)
{
this.Thread = thread;
this.Task = task;
this.IsWatch = isWatch;
}
public override Entry Value
{
get { return this; }
}
public override int GetHashCode()
{
return this.Thread.GetHashCode() ^ this.Task.GetHashCode();
}
public override bool Equals(object obj)
{
if (obj == this)
{
return true;
}
if (!(obj is Entry))
{
return false;
}
var that = (Entry)obj;
return this.Thread == that.Thread && this.Task == that.Task;
}
}
}
}

Просмотреть файл

@ -4,8 +4,8 @@
namespace DotNetty.Common
{
using System;
using System.Collections.Generic;
using System.Diagnostics.Contracts;
using System.Runtime.CompilerServices;
using System.Threading;
public class ThreadLocalPool
@ -36,7 +36,7 @@ namespace DotNetty.Common
return;
}
Dictionary<Stack, WeakOrderQueue> queueDictionary = DelayedPool.Value;
ConditionalWeakTable<Stack, WeakOrderQueue> queueDictionary = DelayedPool.Value;
WeakOrderQueue queue;
if (!queueDictionary.TryGetValue(stack, out queue))
{
@ -373,8 +373,15 @@ namespace DotNetty.Common
static int idSource = int.MinValue;
static readonly int ownThreadId = Interlocked.Increment(ref idSource);
internal static readonly ThreadLocal<Dictionary<Stack, WeakOrderQueue>> DelayedPool =
new ThreadLocal<Dictionary<Stack, WeakOrderQueue>>(() => new Dictionary<Stack, WeakOrderQueue>());
internal static readonly DelayedThreadLocal DelayedPool = new DelayedThreadLocal();
internal sealed class DelayedThreadLocal : FastThreadLocal<ConditionalWeakTable<Stack, WeakOrderQueue>>
{
protected override ConditionalWeakTable<Stack, WeakOrderQueue> GetInitialValue()
{
return new ConditionalWeakTable<Stack, WeakOrderQueue>();
}
}
public ThreadLocalPool(int maxCapacity)
{
@ -388,7 +395,7 @@ namespace DotNetty.Common
public sealed class ThreadLocalPool<T> : ThreadLocalPool
where T : class
{
readonly ThreadLocal<Stack> threadLocal;
readonly ThreadLocalStack threadLocal;
readonly Func<Handle, T> valueFactory;
readonly bool preCreate;
@ -409,23 +416,10 @@ namespace DotNetty.Common
this.preCreate = preCreate;
this.threadLocal = new ThreadLocal<Stack>(this.InitializeStorage, true);
this.threadLocal = new ThreadLocalStack(this);
this.valueFactory = valueFactory;
}
Stack InitializeStorage()
{
var stack = new Stack(this.MaxCapacity, this, Thread.CurrentThread);
if (this.preCreate)
{
for (int i = 0; i < this.MaxCapacity; i++)
{
stack.Push(this.CreateValue(stack));
}
}
return stack;
}
public T Take()
{
Stack stack = this.threadLocal.Value;
@ -459,5 +453,28 @@ namespace DotNetty.Common
internal int ThreadLocalCapacity => this.threadLocal.Value.elements.Length;
internal int ThreadLocalSize => this.threadLocal.Value.Size;
sealed class ThreadLocalStack : FastThreadLocal<Stack>
{
readonly ThreadLocalPool<T> owner;
public ThreadLocalStack(ThreadLocalPool<T> owner)
{
this.owner = owner;
}
protected override Stack GetInitialValue()
{
var stack = new Stack(this.owner.MaxCapacity, this.owner, Thread.CurrentThread);
if (this.owner.preCreate)
{
for (int i = 0; i < this.owner.MaxCapacity; i++)
{
stack.Push(this.owner.CreateValue(stack));
}
}
return stack;
}
}
}
}

Просмотреть файл

@ -9,11 +9,11 @@ namespace DotNetty.Common.Utilities
/// <summary>
/// Extension methods used for slicing byte arrays
/// </summary>
public static class ByteArrayExtensions
public static class ArrayExtensions
{
public static readonly byte[] Empty = new byte[0];
public static readonly byte[] ZeroBytes = new byte[0];
public static byte[] Slice(this byte[] array, int length)
public static T[] Slice<T>(this T[] array, int length)
{
Contract.Requires(array != null);
@ -24,7 +24,7 @@ namespace DotNetty.Common.Utilities
return Slice(array, 0, length);
}
public static byte[] Slice(this byte[] array, int index, int length)
public static T[] Slice<T>(this T[] array, int index, int length)
{
Contract.Requires(array != null);
@ -32,17 +32,17 @@ namespace DotNetty.Common.Utilities
{
throw new ArgumentOutOfRangeException(nameof(length), $"index: ({index}), length({length}) index + length cannot be longer than Array.length({array.Length})");
}
var result = new byte[length];
var result = new T[length];
Array.Copy(array, index, result, 0, length);
return result;
}
public static void SetRange(this byte[] array, int index, byte[] src)
public static void SetRange<T>(this T[] array, int index, T[] src)
{
SetRange(array, index, src, 0, src.Length);
}
public static void SetRange(this byte[] array, int index, byte[] src, int srcIndex, int srcLength)
public static void SetRange<T>(this T[] array, int index, T[] src, int srcIndex, int srcLength)
{
Contract.Requires(array != null);
Contract.Requires(src != null);
@ -57,5 +57,23 @@ namespace DotNetty.Common.Utilities
Array.Copy(src, srcIndex, array, index, srcLength);
}
public static void Fill<T>(this T[] array, T value)
{
for (int i = 0; i < array.Length; i++)
{
array[i] = value;
}
}
public static void Fill<T>(this T[] array, int offset, int count, T value)
{
Contract.Requires(count + offset <= array.Length);
for (int i = offset; i < count + offset; i++)
{
array[i] = value;
}
}
}
}

Просмотреть файл

@ -11,7 +11,6 @@ namespace DotNetty.Common.Utilities
public sealed class AtomicReference<T>
where T : class
{
// ReSharper disable once InconsistentNaming
T atomicValue;
/// <summary>

Просмотреть файл

@ -0,0 +1,43 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Common.Utilities
{
public static class IntegerExtensions
{
static readonly int[] MultiplyDeBruijnBitPosition =
{
0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30,
8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31
};
public const int SizeInBits = sizeof(int) * 8;
public static int RoundUpToPowerOfTwo(int res)
{
if (res <= 2)
{
return 2;
}
res--;
res |= res >> 1;
res |= res >> 2;
res |= res >> 4;
res |= res >> 8;
res |= res >> 16;
res++;
return res;
}
public static int Log2(int v)
{
v |= v >> 1; // first round down to one less than a power of 2
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return MultiplyDeBruijnBitPosition[unchecked((uint)(v * 0x07C4ACDDU) >> 27)];
}
}
}

Просмотреть файл

@ -7,8 +7,9 @@ namespace DotNetty.Common.Utilities
using System.Collections.Generic;
using System.Diagnostics.Contracts;
using System.Threading;
using DotNetty.Common.Internal;
sealed class MpscLinkedQueue<T> : MpscLinkedQueueTailRef<T>, IEnumerable<T>
sealed class MpscLinkedQueue<T> : MpscLinkedQueueTailRef<T>, IEnumerable<T>, IQueue<T>
where T : class
{
#pragma warning disable 169 // padded reference
@ -60,7 +61,7 @@ namespace DotNetty.Common.Utilities
return next;
}
public bool Enqueue(T value)
public bool TryEnqueue(T value)
{
Contract.Requires(value != null);

Просмотреть файл

@ -0,0 +1,15 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Common.Utilities
{
using System;
public static class RandomExtensions
{
public static long NextLong(this Random random)
{
return random.Next() << 32 & unchecked((uint)random.Next());
}
}
}

Просмотреть файл

@ -4,6 +4,7 @@
namespace DotNetty.Common.Utilities
{
using System;
using System.Threading;
using DotNetty.Common.Internal.Logging;
public sealed class ReferenceCountUtil
@ -136,5 +137,52 @@ namespace DotNetty.Common.Utilities
}
}
}
/// <summary>
/// Schedules the specified object to be released when the caller thread terminates. Note that this operation is
/// intended to simplify reference counting of ephemeral objects during unit tests. Do not use it beyond the
/// intended use case.
/// </summary>
public static T ReleaseLater<T>(T msg)
{
return ReleaseLater(msg, 1);
}
/// <summary>
/// Schedules the specified object to be released when the caller thread terminates. Note that this operation is
/// intended to simplify reference counting of ephemeral objects during unit tests. Do not use it beyond the
/// intended use case.
/// </summary>
public static T ReleaseLater<T>(T msg, int decrement)
{
var referenceCounted = msg as IReferenceCounted;
if (referenceCounted != null)
{
ThreadDeathWatcher.Watch(Thread.CurrentThread, () =>
{
try
{
if (!referenceCounted.Release(decrement))
{
Logger.Warn("Non-zero refCnt: {}", FormatReleaseString(referenceCounted, decrement));
}
else
{
Logger.Debug("Released: {}", FormatReleaseString(referenceCounted, decrement));
}
}
catch (Exception ex)
{
Logger.Warn("Failed to release an object: {}", referenceCounted, ex);
}
});
}
return msg;
}
static string FormatReleaseString(IReferenceCounted referenceCounted, int decrement)
{
return referenceCounted.GetType().Name + ".Release(" + decrement + ") refCnt: " + referenceCounted.ReferenceCount;
}
}
}

Просмотреть файл

@ -17,7 +17,7 @@ namespace DotNetty.Transport.Channels
{
static readonly TimeSpan DefaultConnectTimeout = TimeSpan.FromSeconds(30);
volatile IByteBufferAllocator allocator = UnpooledByteBufferAllocator.Default;
volatile IByteBufferAllocator allocator = ByteBufferUtil.DefaultAllocator;
volatile IRecvByteBufAllocator recvByteBufAllocator = FixedRecvByteBufAllocator.Default;
volatile IMessageSizeEstimator messageSizeEstimator = DefaultMessageSizeEstimator.Default;

Просмотреть файл

@ -5,6 +5,7 @@ namespace DotNetty.Transport.Channels.Sockets
{
using System;
using System.Net.Sockets;
using System.Threading;
using DotNetty.Buffers;
/// <summary>
@ -185,7 +186,18 @@ namespace DotNetty.Transport.Channels.Sockets
protected override void ScheduleSocketRead()
{
SocketChannelAsyncOperation operation = this.ReadOperation;
bool pending = this.Socket.ReceiveAsync(operation);
bool pending;
if (ExecutionContext.IsFlowSuppressed())
{
pending = this.Socket.ReceiveAsync(operation);
}
else
{
using (ExecutionContext.SuppressFlow())
{
pending = this.Socket.ReceiveAsync(operation);
}
}
if (!pending)
{
// todo: potential allocation / non-static field?
@ -329,7 +341,20 @@ namespace DotNetty.Transport.Channels.Sockets
SocketChannelAsyncOperation operation = this.PrepareWriteOperation(buffer);
this.SetState(StateFlags.WriteScheduled);
bool pending = this.Socket.SendAsync(operation);
bool pending;
if (ExecutionContext.IsFlowSuppressed())
{
pending = this.Socket.SendAsync(operation);
}
else
{
using (ExecutionContext.SuppressFlow())
{
pending = this.Socket.SendAsync(operation);
}
}
if (!pending)
{
((ISocketChannelUnsafe)this.Unsafe).FinishWrite(operation);

Просмотреть файл

@ -22,7 +22,7 @@ namespace DotNetty.Transport.Channels.Sockets
this.Completed += AbstractSocketChannel.IoCompletedCallback;
if (setEmptyBuffer)
{
this.SetBuffer(ByteArrayExtensions.Empty, 0, 0);
this.SetBuffer(ArrayExtensions.ZeroBytes, 0, 0);
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,66 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers.Tests
{
using Xunit;
public abstract class AbstractPooledByteBufTest : AbstractByteBufTest
{
protected abstract IByteBuffer Alloc(int length);
protected override IByteBuffer NewBuffer(int length)
{
IByteBuffer buffer = this.Alloc(length);
Assert.Equal(0, buffer.WriterIndex);
Assert.Equal(0, buffer.ReaderIndex);
return buffer;
}
[Fact]
public void TestDiscardMarks()
{
this.TestDiscardMarks(4);
}
[Fact]
public void TestDiscardMarksUnpooled()
{
this.TestDiscardMarks(32 * 1024 * 1024);
}
void TestDiscardMarks(int capacity)
{
IByteBuffer buf = this.NewBuffer(capacity);
buf.WriteShort(1);
buf.SkipBytes(1);
buf.MarkReaderIndex();
buf.MarkWriterIndex();
Assert.True(buf.Release());
IByteBuffer buf2 = this.NewBuffer(capacity);
Assert.Same(UnwrapIfNeeded(buf), UnwrapIfNeeded(buf2));
buf2.WriteShort(1);
buf2.ResetReaderIndex();
buf2.ResetWriterIndex();
Assert.Equal(0, buf2.ReaderIndex);
Assert.Equal(0, buf2.WriterIndex);
Assert.True(buf2.Release());
}
static IByteBuffer UnwrapIfNeeded(IByteBuffer buf)
{
if (buf is AdvancedLeakAwareByteBuffer || buf is SimpleLeakAwareByteBuffer)
{
return buf.Unwrap();
}
return buf;
}
}
}

Просмотреть файл

@ -81,12 +81,15 @@
</Otherwise>
</Choose>
<ItemGroup>
<Compile Include="AbstractPooledByteBufTest.cs" />
<Compile Include="ByteBufferDerivationTests.cs" />
<Compile Include="LeakDetectionTest.cs" />
<Compile Include="PooledBigEndianHeapByteBufTest.cs" />
<Compile Include="PortionedMemoryStream.cs" />
<Compile Include="AbstractByteBufferTests.cs" />
<Compile Include="Properties\AssemblyInfo.cs" />
<Compile Include="PooledBufferAllocatorTests.cs" />
<Compile Include="AbstractByteBufTest.cs" />
</ItemGroup>
<ItemGroup>
<None Include="packages.config" />

Просмотреть файл

@ -5,6 +5,7 @@ namespace DotNetty.Buffers.Tests
{
using System;
using System.Diagnostics.Tracing;
using System.Runtime.CompilerServices;
using DotNetty.Common;
using DotNetty.Common.Internal.Logging;
using Microsoft.Practices.EnterpriseLibrary.SemanticLogging;
@ -20,24 +21,36 @@ namespace DotNetty.Buffers.Tests
[Fact]
public void UnderReleaseBufferLeak()
{
var eventListener = new ObservableEventListener();
Mock<IObserver<EventEntry>> logListener = this.mockRepo.Create<IObserver<EventEntry>>();
var eventTextFormatter = new EventTextFormatter();
Func<EventEntry, bool> leakPredicate = y => y.TryFormatAsString(eventTextFormatter).Contains("LEAK");
logListener.Setup(x => x.OnNext(It.Is<EventEntry>(y => leakPredicate(y)))).Verifiable();
logListener.Setup(x => x.OnNext(It.Is<EventEntry>(y => !leakPredicate(y))));
eventListener.Subscribe(logListener.Object);
eventListener.EnableEvents(DefaultEventSource.Log, EventLevel.Verbose);
ResourceLeakDetector.DetectionLevel preservedLevel = ResourceLeakDetector.Level;
try
{
ResourceLeakDetector.Level = ResourceLeakDetector.DetectionLevel.Paranoid;
var eventListener = new ObservableEventListener();
Mock<IObserver<EventEntry>> logListener = this.mockRepo.Create<IObserver<EventEntry>>();
var eventTextFormatter = new EventTextFormatter();
Func<EventEntry, bool> leakPredicate = y => y.TryFormatAsString(eventTextFormatter).Contains("LEAK");
logListener.Setup(x => x.OnNext(It.Is<EventEntry>(y => leakPredicate(y)))).Verifiable();
logListener.Setup(x => x.OnNext(It.Is<EventEntry>(y => !leakPredicate(y))));
eventListener.Subscribe(logListener.Object);
eventListener.EnableEvents(DefaultEventSource.Log, EventLevel.Verbose);
var bufPool = new PooledByteBufferAllocator(100, 1000);
IByteBuffer buffer = bufPool.Buffer(10);
this.CreateAndForgetBuffer();
buffer = null;
GC.Collect();
GC.WaitForPendingFinalizers();
GC.Collect();
GC.WaitForPendingFinalizers();
this.mockRepo.Verify();
}
finally
{
ResourceLeakDetector.Level = preservedLevel;
}
}
this.mockRepo.Verify();
[MethodImpl(MethodImplOptions.NoInlining)]
void CreateAndForgetBuffer()
{
IByteBuffer forgotten = PooledByteBufferAllocator.Default.Buffer(10);
}
[Fact]
@ -47,10 +60,9 @@ namespace DotNetty.Buffers.Tests
try
{
ResourceLeakDetector.Level = ResourceLeakDetector.DetectionLevel.Paranoid;
var bufPool = new PooledByteBufferAllocator(100, 1000);
IByteBuffer buffer = bufPool.Buffer(10);
IByteBuffer buffer = PooledByteBufferAllocator.Default.Buffer(10);
buffer.Release();
buffer = bufPool.Buffer(10);
buffer = PooledByteBufferAllocator.Default.Buffer(10);
buffer.Release();
}
finally

Просмотреть файл

@ -0,0 +1,13 @@
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace DotNetty.Buffers.Tests
{
public class PooledBigEndianHeapByteBufTest : AbstractPooledByteBufTest
{
protected override IByteBuffer Alloc(int length)
{
return PooledByteBufferAllocator.Default.Buffer(length);
}
}
}

Просмотреть файл

@ -3,11 +3,13 @@
namespace DotNetty.Buffers.Tests
{
using DotNetty.Common.Utilities;
using Xunit;
public class PooledBufferAllocatorTests
{
[Theory]
[InlineData(8000, 32000, new[] { 1024, 0, 10 * 1024 })]
[InlineData(16 * 1024, 10, new[] { 16 * 1024 - 100, 8 * 1024 })]
[InlineData(16 * 1024, 0, new[] { 16 * 1024 - 100, 8 * 1024 })]
[InlineData(1024, 2 * 1024, new[] { 16 * 1024 - 100, 8 * 1024 })]
@ -15,7 +17,7 @@ namespace DotNetty.Buffers.Tests
[InlineData(1024, 0, new[] { 1024, 0, 10 * 1024 })]
public void PooledBufferGrowTest(int bufferSize, int startSize, int[] writeSizes)
{
var alloc = new PooledByteBufferAllocator(bufferSize, int.MaxValue);
var alloc = new PooledByteBufferAllocator();
IByteBuffer buffer = alloc.Buffer(startSize);
int wrote = 0;
foreach (int size in writeSizes)

Просмотреть файл

@ -82,8 +82,8 @@ namespace DotNetty.Transport.Tests.Performance.Sockets
this.signal = new ManualResetEventSlimReadFinishedSignal(this.ResetEvent);
// reserve up to 10mb of 16kb buffers on both client and server; we're only sending about 700k worth of messages
this.serverBufferAllocator = new PooledByteBufferAllocator(256, 10 * 1024 * 1024 / Environment.ProcessorCount);
this.clientBufferAllocator = new PooledByteBufferAllocator(256, 10 * 1024 * 1024 / Environment.ProcessorCount);
this.serverBufferAllocator = new PooledByteBufferAllocator();
this.clientBufferAllocator = new PooledByteBufferAllocator();
ServerBootstrap sb = new ServerBootstrap()
.Group(this.ServerGroup, this.WorkerGroup)