зеркало из https://github.com/Azure/DotNetty.git
Merge pull request #101 from nayato/bufferpool
PooledByteBufferAllocator port, Byte Buffers improvements
This commit is contained in:
Коммит
45c8c70649
|
@ -1,7 +1,7 @@
|
||||||
|
|
||||||
Microsoft Visual Studio Solution File, Format Version 12.00
|
Microsoft Visual Studio Solution File, Format Version 12.00
|
||||||
# Visual Studio 14
|
# Visual Studio 14
|
||||||
VisualStudioVersion = 14.0.24720.0
|
VisualStudioVersion = 14.0.25123.0
|
||||||
MinimumVisualStudioVersion = 10.0.40219.1
|
MinimumVisualStudioVersion = 10.0.40219.1
|
||||||
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DotNetty.Common", "src\DotNetty.Common\DotNetty.Common.csproj", "{DE58FE41-5E99-44E5-86BC-FC9ED8761DAF}"
|
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DotNetty.Common", "src\DotNetty.Common\DotNetty.Common.csproj", "{DE58FE41-5E99-44E5-86BC-FC9ED8761DAF}"
|
||||||
EndProject
|
EndProject
|
||||||
|
@ -74,8 +74,8 @@ Global
|
||||||
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||||
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||||
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Release|Any CPU.Build.0 = Release|Any CPU
|
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Release|Any CPU.Build.0 = Release|Any CPU
|
||||||
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Signed|Any CPU.ActiveCfg = Release|Any CPU
|
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Signed|Any CPU.ActiveCfg = Signed|Any CPU
|
||||||
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Signed|Any CPU.Build.0 = Release|Any CPU
|
{5DE3C557-48BF-4CDB-9F47-474D343DD841}.Signed|Any CPU.Build.0 = Signed|Any CPU
|
||||||
{8218C9EE-0A4A-432F-A12A-B54202F97B05}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
{8218C9EE-0A4A-432F-A12A-B54202F97B05}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
|
||||||
{8218C9EE-0A4A-432F-A12A-B54202F97B05}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
{8218C9EE-0A4A-432F-A12A-B54202F97B05}.Debug|Any CPU.Build.0 = Debug|Any CPU
|
||||||
{8218C9EE-0A4A-432F-A12A-B54202F97B05}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
{8218C9EE-0A4A-432F-A12A-B54202F97B05}.Release|Any CPU.ActiveCfg = Release|Any CPU
|
||||||
|
|
|
@ -2,8 +2,10 @@
|
||||||
<s:Boolean x:Key="/Default/CodeInspection/ExcludedFiles/FileMasksToSkip/=_002A_002Emin_002Ejs/@EntryIndexedValue">True</s:Boolean>
|
<s:Boolean x:Key="/Default/CodeInspection/ExcludedFiles/FileMasksToSkip/=_002A_002Emin_002Ejs/@EntryIndexedValue">True</s:Boolean>
|
||||||
<s:Boolean x:Key="/Default/CodeInspection/Highlighting/IdentifierHighlightingEnabled/@EntryValue">True</s:Boolean>
|
<s:Boolean x:Key="/Default/CodeInspection/Highlighting/IdentifierHighlightingEnabled/@EntryValue">True</s:Boolean>
|
||||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=BuiltInTypeReferenceStyle/@EntryIndexedValue">WARNING</s:String>
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=BuiltInTypeReferenceStyle/@EntryIndexedValue">WARNING</s:String>
|
||||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertClosureToMethodGroup/@EntryIndexedValue">HINT</s:String>
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertClosureToMethodGroup/@EntryIndexedValue">DO_NOT_SHOW</s:String>
|
||||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertIfStatementToConditionalTernaryExpression/@EntryIndexedValue">HINT</s:String>
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertIfStatementToConditionalTernaryExpression/@EntryIndexedValue">HINT</s:String>
|
||||||
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertMethodToExpressionBody/@EntryIndexedValue">SUGGESTION</s:String>
|
||||||
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ConvertToExpressionBodyWhenPossible/@EntryIndexedValue">SUGGESTION</s:String>
|
||||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=IntroduceOptionalParameters_002ELocal/@EntryIndexedValue">DO_NOT_SHOW</s:String>
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=IntroduceOptionalParameters_002ELocal/@EntryIndexedValue">DO_NOT_SHOW</s:String>
|
||||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=LoopCanBeConvertedToQuery/@EntryIndexedValue">DO_NOT_SHOW</s:String>
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=LoopCanBeConvertedToQuery/@EntryIndexedValue">DO_NOT_SHOW</s:String>
|
||||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=MemberCanBeMadeStatic_002ELocal/@EntryIndexedValue">HINT</s:String>
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=MemberCanBeMadeStatic_002ELocal/@EntryIndexedValue">HINT</s:String>
|
||||||
|
@ -19,6 +21,7 @@
|
||||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=SuggestVarOrType_005FBuiltInTypes/@EntryIndexedValue">WARNING</s:String>
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=SuggestVarOrType_005FBuiltInTypes/@EntryIndexedValue">WARNING</s:String>
|
||||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=SuggestVarOrType_005FElsewhere/@EntryIndexedValue">WARNING</s:String>
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=SuggestVarOrType_005FElsewhere/@EntryIndexedValue">WARNING</s:String>
|
||||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=SuggestVarOrType_005FSimpleTypes/@EntryIndexedValue">WARNING</s:String>
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=SuggestVarOrType_005FSimpleTypes/@EntryIndexedValue">WARNING</s:String>
|
||||||
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=ThreadStaticAtInstanceField/@EntryIndexedValue">ERROR</s:String>
|
||||||
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=UseObjectOrCollectionInitializer/@EntryIndexedValue">HINT</s:String>
|
<s:String x:Key="/Default/CodeInspection/Highlighting/InspectionSeverities/=UseObjectOrCollectionInitializer/@EntryIndexedValue">HINT</s:String>
|
||||||
<s:String x:Key="/Default/CodeStyle/CodeCleanup/Profiles/=Simple/@EntryIndexedValue"><?xml version="1.0" encoding="utf-16"?><Profile name="Simple"><CSArrangeThisQualifier>True</CSArrangeThisQualifier><CSUseVar><BehavourStyle>CAN_CHANGE_TO_IMPLICIT</BehavourStyle><LocalVariableStyle>IMPLICIT_WHEN_INITIALIZER_HAS_TYPE</LocalVariableStyle><ForeachVariableStyle>IMPLICIT_EXCEPT_PRIMITIVE_TYPES</ForeachVariableStyle></CSUseVar><CSUpdateFileHeader>True</CSUpdateFileHeader><CSOptimizeUsings><OptimizeUsings>True</OptimizeUsings><EmbraceInRegion>False</EmbraceInRegion><RegionName></RegionName></CSOptimizeUsings><CSReformatCode>True</CSReformatCode><StyleCop.Documentation><SA1600ElementsMustBeDocumented>False</SA1600ElementsMustBeDocumented><SA1604ElementDocumentationMustHaveSummary>False</SA1604ElementDocumentationMustHaveSummary><SA1609PropertyDocumentationMustHaveValueDocumented>False</SA1609PropertyDocumentationMustHaveValueDocumented><SA1611ElementParametersMustBeDocumented>False</SA1611ElementParametersMustBeDocumented><SA1615ElementReturnValueMustBeDocumented>False</SA1615ElementReturnValueMustBeDocumented><SA1617VoidReturnValueMustNotBeDocumented>False</SA1617VoidReturnValueMustNotBeDocumented><SA1618GenericTypeParametersMustBeDocumented>False</SA1618GenericTypeParametersMustBeDocumented><SA1626SingleLineCommentsMustNotUseDocumentationStyleSlashes>False</SA1626SingleLineCommentsMustNotUseDocumentationStyleSlashes><SA1628DocumentationTextMustBeginWithACapitalLetter>False</SA1628DocumentationTextMustBeginWithACapitalLetter><SA1629DocumentationTextMustEndWithAPeriod>False</SA1629DocumentationTextMustEndWithAPeriod><SA1633SA1641UpdateFileHeader>ReplaceAll</SA1633SA1641UpdateFileHeader><SA1639FileHeaderMustHaveSummary>False</SA1639FileHeaderMustHaveSummary><SA1642ConstructorSummaryDocumentationMustBeginWithStandardText>False</SA1642ConstructorSummaryDocumentationMustBeginWithStandardText><SA1643DestructorSummaryDocumentationMustBeginWithStandardText>False</SA1643DestructorSummaryDocumentationMustBeginWithStandardText><SA1644DocumentationHeadersMustNotContainBlankLines>False</SA1644DocumentationHeadersMustNotContainBlankLines></StyleCop.Documentation><CSShortenReferences>True</CSShortenReferences><CSFixBuiltinTypeReferences>True</CSFixBuiltinTypeReferences><CSArrangeQualifiers>True</CSArrangeQualifiers><CSEnforceVarKeywordUsageSettings>True</CSEnforceVarKeywordUsageSettings><CSMakeFieldReadonly>True</CSMakeFieldReadonly><CSharpFormatDocComments>True</CSharpFormatDocComments><CSArrangeTypeModifiers>True</CSArrangeTypeModifiers><CSArrangeTypeMemberModifiers>True</CSArrangeTypeMemberModifiers><CSSortModifiers>True</CSSortModifiers><CSCodeStyleAttributes ArrangeTypeAccessModifier="True" ArrangeTypeMemberAccessModifier="True" SortModifiers="True" RemoveRedundantParentheses="False" AddMissingParentheses="True" ArrangeAttributes="False" /></Profile></s:String>
|
<s:String x:Key="/Default/CodeStyle/CodeCleanup/Profiles/=Simple/@EntryIndexedValue"><?xml version="1.0" encoding="utf-16"?><Profile name="Simple"><CSArrangeThisQualifier>True</CSArrangeThisQualifier><CSUseVar><BehavourStyle>CAN_CHANGE_TO_IMPLICIT</BehavourStyle><LocalVariableStyle>IMPLICIT_WHEN_INITIALIZER_HAS_TYPE</LocalVariableStyle><ForeachVariableStyle>IMPLICIT_EXCEPT_PRIMITIVE_TYPES</ForeachVariableStyle></CSUseVar><CSUpdateFileHeader>True</CSUpdateFileHeader><CSOptimizeUsings><OptimizeUsings>True</OptimizeUsings><EmbraceInRegion>False</EmbraceInRegion><RegionName></RegionName></CSOptimizeUsings><CSReformatCode>True</CSReformatCode><StyleCop.Documentation><SA1600ElementsMustBeDocumented>False</SA1600ElementsMustBeDocumented><SA1604ElementDocumentationMustHaveSummary>False</SA1604ElementDocumentationMustHaveSummary><SA1609PropertyDocumentationMustHaveValueDocumented>False</SA1609PropertyDocumentationMustHaveValueDocumented><SA1611ElementParametersMustBeDocumented>False</SA1611ElementParametersMustBeDocumented><SA1615ElementReturnValueMustBeDocumented>False</SA1615ElementReturnValueMustBeDocumented><SA1617VoidReturnValueMustNotBeDocumented>False</SA1617VoidReturnValueMustNotBeDocumented><SA1618GenericTypeParametersMustBeDocumented>False</SA1618GenericTypeParametersMustBeDocumented><SA1626SingleLineCommentsMustNotUseDocumentationStyleSlashes>False</SA1626SingleLineCommentsMustNotUseDocumentationStyleSlashes><SA1628DocumentationTextMustBeginWithACapitalLetter>False</SA1628DocumentationTextMustBeginWithACapitalLetter><SA1629DocumentationTextMustEndWithAPeriod>False</SA1629DocumentationTextMustEndWithAPeriod><SA1633SA1641UpdateFileHeader>ReplaceAll</SA1633SA1641UpdateFileHeader><SA1639FileHeaderMustHaveSummary>False</SA1639FileHeaderMustHaveSummary><SA1642ConstructorSummaryDocumentationMustBeginWithStandardText>False</SA1642ConstructorSummaryDocumentationMustBeginWithStandardText><SA1643DestructorSummaryDocumentationMustBeginWithStandardText>False</SA1643DestructorSummaryDocumentationMustBeginWithStandardText><SA1644DocumentationHeadersMustNotContainBlankLines>False</SA1644DocumentationHeadersMustNotContainBlankLines></StyleCop.Documentation><CSShortenReferences>True</CSShortenReferences><CSFixBuiltinTypeReferences>True</CSFixBuiltinTypeReferences><CSArrangeQualifiers>True</CSArrangeQualifiers><CSEnforceVarKeywordUsageSettings>True</CSEnforceVarKeywordUsageSettings><CSMakeFieldReadonly>True</CSMakeFieldReadonly><CSharpFormatDocComments>True</CSharpFormatDocComments><CSArrangeTypeModifiers>True</CSArrangeTypeModifiers><CSArrangeTypeMemberModifiers>True</CSArrangeTypeMemberModifiers><CSSortModifiers>True</CSSortModifiers><CSCodeStyleAttributes ArrangeTypeAccessModifier="True" ArrangeTypeMemberAccessModifier="True" SortModifiers="True" RemoveRedundantParentheses="False" AddMissingParentheses="True" ArrangeAttributes="False" /></Profile></s:String>
|
||||||
<s:String x:Key="/Default/CodeStyle/CodeCleanup/RecentlyUsedProfile/@EntryValue">Simple</s:String>
|
<s:String x:Key="/Default/CodeStyle/CodeCleanup/RecentlyUsedProfile/@EntryValue">Simple</s:String>
|
||||||
|
|
|
@ -79,25 +79,13 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
public virtual int MaxWritableBytes => this.MaxCapacity - this.WriterIndex;
|
public virtual int MaxWritableBytes => this.MaxCapacity - this.WriterIndex;
|
||||||
|
|
||||||
public bool IsReadable()
|
public bool IsReadable() => this.IsReadable(1);
|
||||||
{
|
|
||||||
return this.IsReadable(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
public bool IsReadable(int size)
|
public bool IsReadable(int size) => this.ReadableBytes >= size;
|
||||||
{
|
|
||||||
return this.ReadableBytes >= size;
|
|
||||||
}
|
|
||||||
|
|
||||||
public bool IsWritable()
|
public bool IsWritable() => this.IsWritable(1);
|
||||||
{
|
|
||||||
return this.IsWritable(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
public bool IsWritable(int size)
|
public bool IsWritable(int size) => this.WritableBytes >= size;
|
||||||
{
|
|
||||||
return this.WritableBytes >= size;
|
|
||||||
}
|
|
||||||
|
|
||||||
public virtual IByteBuffer Clear()
|
public virtual IByteBuffer Clear()
|
||||||
{
|
{
|
||||||
|
@ -262,10 +250,7 @@ namespace DotNetty.Buffers
|
||||||
return Math.Min(newCapacity, maxCapacity);
|
return Math.Min(newCapacity, maxCapacity);
|
||||||
}
|
}
|
||||||
|
|
||||||
public virtual bool GetBoolean(int index)
|
public virtual bool GetBoolean(int index) => this.GetByte(index) != 0;
|
||||||
{
|
|
||||||
return this.GetByte(index) != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
public virtual byte GetByte(int index)
|
public virtual byte GetByte(int index)
|
||||||
{
|
{
|
||||||
|
@ -315,15 +300,9 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
protected abstract long _GetLong(int index);
|
protected abstract long _GetLong(int index);
|
||||||
|
|
||||||
public virtual char GetChar(int index)
|
public virtual char GetChar(int index) => Convert.ToChar(this.GetShort(index));
|
||||||
{
|
|
||||||
return Convert.ToChar(this.GetShort(index));
|
|
||||||
}
|
|
||||||
|
|
||||||
public virtual double GetDouble(int index)
|
public virtual double GetDouble(int index) => BitConverter.Int64BitsToDouble(this.GetLong(index));
|
||||||
{
|
|
||||||
return BitConverter.Int64BitsToDouble(this.GetLong(index));
|
|
||||||
}
|
|
||||||
|
|
||||||
public virtual IByteBuffer GetBytes(int index, IByteBuffer destination)
|
public virtual IByteBuffer GetBytes(int index, IByteBuffer destination)
|
||||||
{
|
{
|
||||||
|
@ -334,6 +313,7 @@ namespace DotNetty.Buffers
|
||||||
public virtual IByteBuffer GetBytes(int index, IByteBuffer destination, int length)
|
public virtual IByteBuffer GetBytes(int index, IByteBuffer destination, int length)
|
||||||
{
|
{
|
||||||
this.GetBytes(index, destination, destination.WriterIndex, length);
|
this.GetBytes(index, destination, destination.WriterIndex, length);
|
||||||
|
destination.SetWriterIndex(destination.WriterIndex + length);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -452,10 +432,7 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
public abstract Task<int> SetBytesAsync(int index, Stream src, int length, CancellationToken cancellationToken);
|
public abstract Task<int> SetBytesAsync(int index, Stream src, int length, CancellationToken cancellationToken);
|
||||||
|
|
||||||
public virtual bool ReadBoolean()
|
public virtual bool ReadBoolean() => this.ReadByte() != 0;
|
||||||
{
|
|
||||||
return this.ReadByte() != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
public virtual byte ReadByte()
|
public virtual byte ReadByte()
|
||||||
{
|
{
|
||||||
|
@ -506,15 +483,9 @@ namespace DotNetty.Buffers
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
public virtual char ReadChar()
|
public virtual char ReadChar() => (char)this.ReadShort();
|
||||||
{
|
|
||||||
return (char)this.ReadShort();
|
|
||||||
}
|
|
||||||
|
|
||||||
public virtual double ReadDouble()
|
public virtual double ReadDouble() => BitConverter.Int64BitsToDouble(this.ReadLong());
|
||||||
{
|
|
||||||
return BitConverter.Int64BitsToDouble(this.ReadLong());
|
|
||||||
}
|
|
||||||
|
|
||||||
public IByteBuffer ReadBytes(int length)
|
public IByteBuffer ReadBytes(int length)
|
||||||
{
|
{
|
||||||
|
@ -524,7 +495,7 @@ namespace DotNetty.Buffers
|
||||||
return Unpooled.Empty;
|
return Unpooled.Empty;
|
||||||
}
|
}
|
||||||
|
|
||||||
IByteBuffer buf = Unpooled.Buffer(length, this.MaxCapacity);
|
IByteBuffer buf = this.Allocator.Buffer(length, this.MaxCapacity);
|
||||||
buf.WriteBytes(this, this.ReaderIndex, length);
|
buf.WriteBytes(this, this.ReaderIndex, length);
|
||||||
this.ReaderIndex += length;
|
this.ReaderIndex += length;
|
||||||
return buf;
|
return buf;
|
||||||
|
@ -592,6 +563,7 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
public virtual IByteBuffer WriteByte(int value)
|
public virtual IByteBuffer WriteByte(int value)
|
||||||
{
|
{
|
||||||
|
this.EnsureAccessible();
|
||||||
this.EnsureWritable(1);
|
this.EnsureWritable(1);
|
||||||
this.SetByte(this.WriterIndex, value);
|
this.SetByte(this.WriterIndex, value);
|
||||||
this.WriterIndex += 1;
|
this.WriterIndex += 1;
|
||||||
|
@ -600,6 +572,7 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
public virtual IByteBuffer WriteShort(int value)
|
public virtual IByteBuffer WriteShort(int value)
|
||||||
{
|
{
|
||||||
|
this.EnsureAccessible();
|
||||||
this.EnsureWritable(2);
|
this.EnsureWritable(2);
|
||||||
this._SetShort(this.WriterIndex, value);
|
this._SetShort(this.WriterIndex, value);
|
||||||
this.WriterIndex += 2;
|
this.WriterIndex += 2;
|
||||||
|
@ -617,6 +590,7 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
public virtual IByteBuffer WriteInt(int value)
|
public virtual IByteBuffer WriteInt(int value)
|
||||||
{
|
{
|
||||||
|
this.EnsureAccessible();
|
||||||
this.EnsureWritable(4);
|
this.EnsureWritable(4);
|
||||||
this._SetInt(this.WriterIndex, value);
|
this._SetInt(this.WriterIndex, value);
|
||||||
this.WriterIndex += 4;
|
this.WriterIndex += 4;
|
||||||
|
@ -634,6 +608,7 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
public virtual IByteBuffer WriteLong(long value)
|
public virtual IByteBuffer WriteLong(long value)
|
||||||
{
|
{
|
||||||
|
this.EnsureAccessible();
|
||||||
this.EnsureWritable(8);
|
this.EnsureWritable(8);
|
||||||
this._SetLong(this.WriterIndex, value);
|
this._SetLong(this.WriterIndex, value);
|
||||||
this.WriterIndex += 8;
|
this.WriterIndex += 8;
|
||||||
|
@ -710,10 +685,7 @@ namespace DotNetty.Buffers
|
||||||
this.SetWriterIndex(writerIndex + wrote);
|
this.SetWriterIndex(writerIndex + wrote);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Task WriteBytesAsync(Stream stream, int length)
|
public Task WriteBytesAsync(Stream stream, int length) => this.WriteBytesAsync(stream, length, CancellationToken.None);
|
||||||
{
|
|
||||||
return this.WriteBytesAsync(stream, length, CancellationToken.None);
|
|
||||||
}
|
|
||||||
|
|
||||||
public abstract bool HasArray { get; }
|
public abstract bool HasArray { get; }
|
||||||
|
|
||||||
|
@ -726,7 +698,7 @@ namespace DotNetty.Buffers
|
||||||
int readableBytes = this.ReadableBytes;
|
int readableBytes = this.ReadableBytes;
|
||||||
if (readableBytes == 0)
|
if (readableBytes == 0)
|
||||||
{
|
{
|
||||||
return ByteArrayExtensions.Empty;
|
return ArrayExtensions.ZeroBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (this.HasArray)
|
if (this.HasArray)
|
||||||
|
@ -739,10 +711,7 @@ namespace DotNetty.Buffers
|
||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public virtual IByteBuffer Duplicate()
|
public virtual IByteBuffer Duplicate() => new DuplicatedByteBuffer(this);
|
||||||
{
|
|
||||||
return new DuplicatedByteBuffer(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
public abstract IByteBuffer Unwrap();
|
public abstract IByteBuffer Unwrap();
|
||||||
|
|
||||||
|
@ -767,10 +736,7 @@ namespace DotNetty.Buffers
|
||||||
/// Creates a new <see cref="SwappedByteBuffer" /> for this <see cref="IByteBuffer" /> instance.
|
/// Creates a new <see cref="SwappedByteBuffer" /> for this <see cref="IByteBuffer" /> instance.
|
||||||
/// </summary>
|
/// </summary>
|
||||||
/// <returns>A <see cref="SwappedByteBuffer" /> for this buffer.</returns>
|
/// <returns>A <see cref="SwappedByteBuffer" /> for this buffer.</returns>
|
||||||
protected SwappedByteBuffer NewSwappedByteBuffer()
|
protected SwappedByteBuffer NewSwappedByteBuffer() => new SwappedByteBuffer(this);
|
||||||
{
|
|
||||||
return new SwappedByteBuffer(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void AdjustMarkers(int decrement)
|
protected void AdjustMarkers(int decrement)
|
||||||
{
|
{
|
||||||
|
@ -795,6 +761,51 @@ namespace DotNetty.Buffers
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public override int GetHashCode() => ByteBufferUtil.HashCode(this);
|
||||||
|
|
||||||
|
public override bool Equals(object o) => this.Equals(o as IByteBuffer);
|
||||||
|
|
||||||
|
public bool Equals(IByteBuffer buffer)
|
||||||
|
{
|
||||||
|
if (ReferenceEquals(this, buffer))
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (buffer != null)
|
||||||
|
{
|
||||||
|
return ByteBufferUtil.Equals(this, buffer);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int CompareTo(IByteBuffer that) => ByteBufferUtil.Compare(this, that);
|
||||||
|
|
||||||
|
public override string ToString()
|
||||||
|
{
|
||||||
|
if (this.ReferenceCount == 0)
|
||||||
|
{
|
||||||
|
return StringUtil.SimpleClassName(this) + "(freed)";
|
||||||
|
}
|
||||||
|
|
||||||
|
StringBuilder buf = new StringBuilder()
|
||||||
|
.Append(StringUtil.SimpleClassName(this))
|
||||||
|
.Append("(ridx: ").Append(this.ReaderIndex)
|
||||||
|
.Append(", widx: ").Append(this.WriterIndex)
|
||||||
|
.Append(", cap: ").Append(this.Capacity);
|
||||||
|
if (this.MaxCapacity != int.MaxValue)
|
||||||
|
{
|
||||||
|
buf.Append('/').Append(this.MaxCapacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
IByteBuffer unwrapped = this.Unwrap();
|
||||||
|
if (unwrapped != null)
|
||||||
|
{
|
||||||
|
buf.Append(", unwrapped: ").Append(unwrapped);
|
||||||
|
}
|
||||||
|
buf.Append(')');
|
||||||
|
return buf.ToString();
|
||||||
|
}
|
||||||
|
|
||||||
protected void CheckIndex(int index)
|
protected void CheckIndex(int index)
|
||||||
{
|
{
|
||||||
this.EnsureAccessible();
|
this.EnsureAccessible();
|
||||||
|
@ -862,22 +873,13 @@ namespace DotNetty.Buffers
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public IByteBuffer Copy()
|
public IByteBuffer Copy() => this.Copy(this.ReaderIndex, this.ReadableBytes);
|
||||||
{
|
|
||||||
return this.Copy(this.ReaderIndex, this.ReadableBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
public abstract IByteBuffer Copy(int index, int length);
|
public abstract IByteBuffer Copy(int index, int length);
|
||||||
|
|
||||||
public IByteBuffer Slice()
|
public IByteBuffer Slice() => this.Slice(this.ReaderIndex, this.ReadableBytes);
|
||||||
{
|
|
||||||
return this.Slice(this.ReaderIndex, this.ReadableBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
public virtual IByteBuffer Slice(int index, int length)
|
public virtual IByteBuffer Slice(int index, int length) => new SlicedByteBuffer(this, index, length);
|
||||||
{
|
|
||||||
return new SlicedByteBuffer(this, index, length);
|
|
||||||
}
|
|
||||||
|
|
||||||
public IByteBuffer ReadSlice(int length)
|
public IByteBuffer ReadSlice(int length)
|
||||||
{
|
{
|
||||||
|
@ -905,15 +907,9 @@ namespace DotNetty.Buffers
|
||||||
this.markedReaderIndex = this.markedWriterIndex = 0;
|
this.markedReaderIndex = this.markedWriterIndex = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
public string ToString(Encoding encoding)
|
public string ToString(Encoding encoding) => this.ToString(this.ReaderIndex, this.ReadableBytes, encoding);
|
||||||
{
|
|
||||||
return this.ToString(this.ReaderIndex, this.ReadableBytes, encoding);
|
|
||||||
}
|
|
||||||
|
|
||||||
public string ToString(int index, int length, Encoding encoding)
|
public string ToString(int index, int length, Encoding encoding) => ByteBufferUtil.DecodeString(this, index, length, encoding);
|
||||||
{
|
|
||||||
return ByteBufferUtil.DecodeString(this, index, length, encoding);
|
|
||||||
}
|
|
||||||
|
|
||||||
public int ForEachByte(ByteProcessor processor)
|
public int ForEachByte(ByteProcessor processor)
|
||||||
{
|
{
|
||||||
|
@ -935,7 +931,7 @@ namespace DotNetty.Buffers
|
||||||
{
|
{
|
||||||
if (processor == null)
|
if (processor == null)
|
||||||
{
|
{
|
||||||
throw new NullReferenceException("processor");
|
throw new ArgumentNullException("processor");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (length == 0)
|
if (length == 0)
|
||||||
|
@ -945,25 +941,18 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
int endIndex = index + length;
|
int endIndex = index + length;
|
||||||
int i = index;
|
int i = index;
|
||||||
try
|
do
|
||||||
{
|
{
|
||||||
do
|
if (processor.Process(this._GetByte(i)))
|
||||||
{
|
{
|
||||||
if (processor.Process(this._GetByte(i)))
|
i++;
|
||||||
{
|
}
|
||||||
i++;
|
else
|
||||||
}
|
{
|
||||||
else
|
return i;
|
||||||
{
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
while (i < endIndex);
|
|
||||||
}
|
|
||||||
catch
|
|
||||||
{
|
|
||||||
throw;
|
|
||||||
}
|
}
|
||||||
|
while (i < endIndex);
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -997,25 +986,18 @@ namespace DotNetty.Buffers
|
||||||
}
|
}
|
||||||
|
|
||||||
int i = index + length - 1;
|
int i = index + length - 1;
|
||||||
try
|
do
|
||||||
{
|
{
|
||||||
do
|
if (processor.Process(this._GetByte(i)))
|
||||||
{
|
{
|
||||||
if (processor.Process(this._GetByte(i)))
|
i--;
|
||||||
{
|
}
|
||||||
i--;
|
else
|
||||||
}
|
{
|
||||||
else
|
return i;
|
||||||
{
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
while (i >= index);
|
|
||||||
}
|
|
||||||
catch
|
|
||||||
{
|
|
||||||
throw;
|
|
||||||
}
|
}
|
||||||
|
while (i >= index);
|
||||||
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ namespace DotNetty.Buffers
|
||||||
leak = AbstractByteBuffer.LeakDetector.Open(buf);
|
leak = AbstractByteBuffer.LeakDetector.Open(buf);
|
||||||
if (leak != null)
|
if (leak != null)
|
||||||
{
|
{
|
||||||
buf = new SimpleLeakAwareByteBuf(buf, leak);
|
buf = new SimpleLeakAwareByteBuffer(buf, leak);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case ResourceLeakDetector.DetectionLevel.Advanced:
|
case ResourceLeakDetector.DetectionLevel.Advanced:
|
||||||
|
@ -31,7 +31,7 @@ namespace DotNetty.Buffers
|
||||||
leak = AbstractByteBuffer.LeakDetector.Open(buf);
|
leak = AbstractByteBuffer.LeakDetector.Open(buf);
|
||||||
if (leak != null)
|
if (leak != null)
|
||||||
{
|
{
|
||||||
buf = new AdvancedLeakAwareByteBuf(buf, leak);
|
buf = new AdvancedLeakAwareByteBuffer(buf, leak);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case ResourceLeakDetector.DetectionLevel.Disabled:
|
case ResourceLeakDetector.DetectionLevel.Disabled:
|
||||||
|
@ -71,15 +71,9 @@ namespace DotNetty.Buffers
|
||||||
return this.NewBuffer(initialCapacity, maxCapacity);
|
return this.NewBuffer(initialCapacity, maxCapacity);
|
||||||
}
|
}
|
||||||
|
|
||||||
public CompositeByteBuffer CompositeBuffer()
|
public CompositeByteBuffer CompositeBuffer() => this.CompositeBuffer(DefaultMaxComponents);
|
||||||
{
|
|
||||||
return this.CompositeBuffer(DefaultMaxComponents);
|
|
||||||
}
|
|
||||||
|
|
||||||
public CompositeByteBuffer CompositeBuffer(int maxComponents)
|
public CompositeByteBuffer CompositeBuffer(int maxComponents) => new CompositeByteBuffer(this, maxComponents);
|
||||||
{
|
|
||||||
return new CompositeByteBuffer(this, maxComponents);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected abstract IByteBuffer NewBuffer(int initialCapacity, int maxCapacity);
|
protected abstract IByteBuffer NewBuffer(int initialCapacity, int maxCapacity);
|
||||||
|
|
||||||
|
|
|
@ -42,14 +42,8 @@ namespace DotNetty.Buffers
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public sealed override bool Release()
|
public sealed override bool Release() => this.Unwrap().Release();
|
||||||
{
|
|
||||||
return this.Unwrap().Release();
|
|
||||||
}
|
|
||||||
|
|
||||||
public sealed override bool Release(int decrement)
|
public sealed override bool Release(int decrement) => this.Unwrap().Release(decrement);
|
||||||
{
|
|
||||||
return this.Unwrap().Release(decrement);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -9,26 +9,22 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
public abstract class AbstractReferenceCountedByteBuffer : AbstractByteBuffer
|
public abstract class AbstractReferenceCountedByteBuffer : AbstractByteBuffer
|
||||||
{
|
{
|
||||||
#pragma warning disable 420
|
int referenceCount = 1;
|
||||||
volatile int referenceCount = 1;
|
|
||||||
|
|
||||||
protected AbstractReferenceCountedByteBuffer(int maxCapacity)
|
protected AbstractReferenceCountedByteBuffer(int maxCapacity)
|
||||||
: base(maxCapacity)
|
: base(maxCapacity)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
public override int ReferenceCount => this.referenceCount;
|
public override int ReferenceCount => Volatile.Read(ref this.referenceCount);
|
||||||
|
|
||||||
protected void SetReferenceCount(int value)
|
protected void SetReferenceCount(int value) => Volatile.Write(ref this.referenceCount, value);
|
||||||
{
|
|
||||||
this.referenceCount = value;
|
|
||||||
}
|
|
||||||
|
|
||||||
public override IReferenceCounted Retain()
|
public override IReferenceCounted Retain()
|
||||||
{
|
{
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
int refCnt = this.referenceCount;
|
int refCnt = this.ReferenceCount;
|
||||||
if (refCnt == 0)
|
if (refCnt == 0)
|
||||||
{
|
{
|
||||||
throw new IllegalReferenceCountException(0, 1);
|
throw new IllegalReferenceCountException(0, 1);
|
||||||
|
@ -55,7 +51,7 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
int refCnt = this.referenceCount;
|
int refCnt = this.ReferenceCount;
|
||||||
if (refCnt == 0)
|
if (refCnt == 0)
|
||||||
{
|
{
|
||||||
throw new IllegalReferenceCountException(0, increment);
|
throw new IllegalReferenceCountException(0, increment);
|
||||||
|
@ -77,7 +73,7 @@ namespace DotNetty.Buffers
|
||||||
{
|
{
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
int refCnt = this.referenceCount;
|
int refCnt = this.ReferenceCount;
|
||||||
if (refCnt == 0)
|
if (refCnt == 0)
|
||||||
{
|
{
|
||||||
throw new IllegalReferenceCountException(0, -1);
|
throw new IllegalReferenceCountException(0, -1);
|
||||||
|
@ -104,7 +100,7 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
int refCnt = this.referenceCount;
|
int refCnt = this.ReferenceCount;
|
||||||
if (refCnt < decrement)
|
if (refCnt < decrement)
|
||||||
{
|
{
|
||||||
throw new IllegalReferenceCountException(refCnt, -decrement);
|
throw new IllegalReferenceCountException(refCnt, -decrement);
|
||||||
|
@ -122,15 +118,9 @@ namespace DotNetty.Buffers
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IReferenceCounted Touch()
|
public override IReferenceCounted Touch() => this;
|
||||||
{
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public override IReferenceCounted Touch(object hint)
|
public override IReferenceCounted Touch(object hint) => this;
|
||||||
{
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected abstract void Deallocate();
|
protected abstract void Deallocate();
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,14 +11,14 @@ namespace DotNetty.Buffers
|
||||||
using DotNetty.Common.Internal;
|
using DotNetty.Common.Internal;
|
||||||
using DotNetty.Common.Internal.Logging;
|
using DotNetty.Common.Internal.Logging;
|
||||||
|
|
||||||
class AdvancedLeakAwareByteBuf : WrappedByteBuffer
|
class AdvancedLeakAwareByteBuffer : WrappedByteBuffer
|
||||||
{
|
{
|
||||||
static readonly string PropAcquireAndReleaseOnly = "io.netty.leakDetection.acquireAndReleaseOnly";
|
static readonly string PropAcquireAndReleaseOnly = "io.netty.leakDetection.acquireAndReleaseOnly";
|
||||||
static readonly bool AcquireAndReleaseOnly;
|
static readonly bool AcquireAndReleaseOnly;
|
||||||
|
|
||||||
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance<AdvancedLeakAwareByteBuf>();
|
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance<AdvancedLeakAwareByteBuffer>();
|
||||||
|
|
||||||
static AdvancedLeakAwareByteBuf()
|
static AdvancedLeakAwareByteBuffer()
|
||||||
{
|
{
|
||||||
AcquireAndReleaseOnly = SystemPropertyUtil.GetBoolean(PropAcquireAndReleaseOnly, false);
|
AcquireAndReleaseOnly = SystemPropertyUtil.GetBoolean(PropAcquireAndReleaseOnly, false);
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
readonly IResourceLeak leak;
|
readonly IResourceLeak leak;
|
||||||
|
|
||||||
internal AdvancedLeakAwareByteBuf(IByteBuffer buf, IResourceLeak leak)
|
internal AdvancedLeakAwareByteBuffer(IByteBuffer buf, IResourceLeak leak)
|
||||||
: base(buf)
|
: base(buf)
|
||||||
{
|
{
|
||||||
this.leak = leak;
|
this.leak = leak;
|
||||||
|
@ -53,32 +53,32 @@ namespace DotNetty.Buffers
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
return new AdvancedLeakAwareByteBuf(base.WithOrder(endianness), this.leak);
|
return new AdvancedLeakAwareByteBuffer(base.WithOrder(endianness), this.leak);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IByteBuffer Slice()
|
public override IByteBuffer Slice()
|
||||||
{
|
{
|
||||||
this.RecordLeakNonRefCountingOperation();
|
this.RecordLeakNonRefCountingOperation();
|
||||||
return new AdvancedLeakAwareByteBuf(base.Slice(), this.leak);
|
return new AdvancedLeakAwareByteBuffer(base.Slice(), this.leak);
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IByteBuffer Slice(int index, int length)
|
public override IByteBuffer Slice(int index, int length)
|
||||||
{
|
{
|
||||||
this.RecordLeakNonRefCountingOperation();
|
this.RecordLeakNonRefCountingOperation();
|
||||||
return new AdvancedLeakAwareByteBuf(base.Slice(index, length), this.leak);
|
return new AdvancedLeakAwareByteBuffer(base.Slice(index, length), this.leak);
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IByteBuffer Duplicate()
|
public override IByteBuffer Duplicate()
|
||||||
{
|
{
|
||||||
this.RecordLeakNonRefCountingOperation();
|
this.RecordLeakNonRefCountingOperation();
|
||||||
return new AdvancedLeakAwareByteBuf(base.Duplicate(), this.leak);
|
return new AdvancedLeakAwareByteBuffer(base.Duplicate(), this.leak);
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IByteBuffer ReadSlice(int length)
|
public override IByteBuffer ReadSlice(int length)
|
||||||
{
|
{
|
||||||
this.RecordLeakNonRefCountingOperation();
|
this.RecordLeakNonRefCountingOperation();
|
||||||
return new AdvancedLeakAwareByteBuf(base.ReadSlice(length), this.leak);
|
return new AdvancedLeakAwareByteBuffer(base.ReadSlice(length), this.leak);
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IByteBuffer DiscardReadBytes()
|
public override IByteBuffer DiscardReadBytes()
|
|
@ -6,10 +6,14 @@ namespace DotNetty.Buffers
|
||||||
using System;
|
using System;
|
||||||
using System.Diagnostics.Contracts;
|
using System.Diagnostics.Contracts;
|
||||||
using System.Text;
|
using System.Text;
|
||||||
|
using DotNetty.Common.Internal;
|
||||||
|
using DotNetty.Common.Internal.Logging;
|
||||||
using DotNetty.Common.Utilities;
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
public static class ByteBufferUtil
|
public static class ByteBufferUtil
|
||||||
{
|
{
|
||||||
|
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance(typeof(ByteBufferUtil));
|
||||||
|
|
||||||
static readonly char[] HexdumpTable = new char[256 * 4];
|
static readonly char[] HexdumpTable = new char[256 * 4];
|
||||||
static readonly string Newline = StringUtil.Newline;
|
static readonly string Newline = StringUtil.Newline;
|
||||||
static readonly string[] Byte2Hex = new string[256];
|
static readonly string[] Byte2Hex = new string[256];
|
||||||
|
@ -18,6 +22,8 @@ namespace DotNetty.Buffers
|
||||||
static readonly char[] Byte2Char = new char[256];
|
static readonly char[] Byte2Char = new char[256];
|
||||||
static readonly string[] HexDumpRowPrefixes = new string[(int)((uint)65536 >> 4)];
|
static readonly string[] HexDumpRowPrefixes = new string[(int)((uint)65536 >> 4)];
|
||||||
|
|
||||||
|
public static readonly IByteBufferAllocator DefaultAllocator;
|
||||||
|
|
||||||
static ByteBufferUtil()
|
static ByteBufferUtil()
|
||||||
{
|
{
|
||||||
char[] digits = "0123456789abcdef".ToCharArray();
|
char[] digits = "0123456789abcdef".ToCharArray();
|
||||||
|
@ -81,42 +87,35 @@ namespace DotNetty.Buffers
|
||||||
HexDumpRowPrefixes[i] = buf.ToString();
|
HexDumpRowPrefixes[i] = buf.ToString();
|
||||||
}
|
}
|
||||||
|
|
||||||
//todo: port
|
string allocType = SystemPropertyUtil.Get(
|
||||||
//String allocType = SystemPropertyUtil.get(
|
"io.netty.allocator.type", "pooled");
|
||||||
// "io.netty.allocator.type", PlatformDependent.isAndroid() ? "unpooled" : "pooled");
|
allocType = allocType.Trim();
|
||||||
//allocType = allocType.toLowerCase(Locale.US).trim();
|
|
||||||
|
|
||||||
//ByteBufAllocator alloc;
|
IByteBufferAllocator alloc;
|
||||||
//if ("unpooled".equals(allocType))
|
if ("unpooled".Equals(allocType, StringComparison.OrdinalIgnoreCase))
|
||||||
//{
|
{
|
||||||
// alloc = UnpooledByteBufAllocator.DEFAULT;
|
alloc = UnpooledByteBufferAllocator.Default;
|
||||||
// logger.debug("-Dio.netty.allocator.type: {}", allocType);
|
Logger.Debug("-Dio.netty.allocator.type: {}", allocType);
|
||||||
//}
|
}
|
||||||
//else if ("pooled".equals(allocType))
|
else if ("pooled".Equals(allocType, StringComparison.OrdinalIgnoreCase))
|
||||||
//{
|
{
|
||||||
// alloc = PooledByteBufAllocator.DEFAULT;
|
alloc = PooledByteBufferAllocator.Default;
|
||||||
// logger.debug("-Dio.netty.allocator.type: {}", allocType);
|
Logger.Debug("-Dio.netty.allocator.type: {}", allocType);
|
||||||
//}
|
}
|
||||||
//else
|
else
|
||||||
//{
|
{
|
||||||
// alloc = PooledByteBufAllocator.DEFAULT;
|
alloc = PooledByteBufferAllocator.Default;
|
||||||
// logger.debug("-Dio.netty.allocator.type: pooled (unknown: {})", allocType);
|
Logger.Debug("-Dio.netty.allocator.type: pooled (unknown: {})", allocType);
|
||||||
//}
|
}
|
||||||
|
|
||||||
//DEFAULT_ALLOCATOR = alloc;
|
DefaultAllocator = alloc;
|
||||||
|
|
||||||
//THREAD_LOCAL_BUFFER_SIZE = SystemPropertyUtil.getInt("io.netty.threadLocalDirectBufferSize", 64 * 1024);
|
|
||||||
//logger.debug("-Dio.netty.threadLocalDirectBufferSize: {}", THREAD_LOCAL_BUFFER_SIZE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
|
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
|
||||||
/// of the specified buffer's sub-region.
|
/// of the specified buffer's sub-region.
|
||||||
/// </summary>
|
/// </summary>
|
||||||
public static string HexDump(IByteBuffer buffer)
|
public static string HexDump(IByteBuffer buffer) => HexDump(buffer, buffer.ReaderIndex, buffer.ReadableBytes);
|
||||||
{
|
|
||||||
return HexDump(buffer, buffer.ReaderIndex, buffer.ReadableBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
|
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
|
||||||
|
@ -148,10 +147,7 @@ namespace DotNetty.Buffers
|
||||||
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
|
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
|
||||||
/// of the specified buffer's sub-region.
|
/// of the specified buffer's sub-region.
|
||||||
/// </summary>
|
/// </summary>
|
||||||
public static string HexDump(byte[] array)
|
public static string HexDump(byte[] array) => HexDump(array, 0, array.Length);
|
||||||
{
|
|
||||||
return HexDump(array, 0, array.Length);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
|
/// Returns a <a href="http://en.wikipedia.org/wiki/Hex_dump">hex dump</a>
|
||||||
|
@ -371,27 +367,20 @@ namespace DotNetty.Buffers
|
||||||
/// Toggles the endianness of the specified 64-bit long integer.
|
/// Toggles the endianness of the specified 64-bit long integer.
|
||||||
/// </summary>
|
/// </summary>
|
||||||
public static long SwapLong(long value)
|
public static long SwapLong(long value)
|
||||||
{
|
=> (((long)SwapInt((int)value) & 0xFFFFFFFF) << 32)
|
||||||
return (((long)SwapInt((int)value) & 0xFFFFFFFF) << 32)
|
| ((long)SwapInt((int)(value >> 32)) & 0xFFFFFFFF);
|
||||||
| ((long)SwapInt((int)(value >> 32)) & 0xFFFFFFFF);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Toggles the endianness of the specified 32-bit integer.
|
/// Toggles the endianness of the specified 32-bit integer.
|
||||||
/// </summary>
|
/// </summary>
|
||||||
public static int SwapInt(int value)
|
public static int SwapInt(int value)
|
||||||
{
|
=> ((SwapShort((short)value) & 0xFFFF) << 16)
|
||||||
return ((SwapShort((short)value) & 0xFFFF) << 16)
|
| (SwapShort((short)(value >> 16)) & 0xFFFF);
|
||||||
| (SwapShort((short)(value >> 16)) & 0xFFFF);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Toggles the endianness of the specified 16-bit integer.
|
/// Toggles the endianness of the specified 16-bit integer.
|
||||||
/// </summary>
|
/// </summary>
|
||||||
public static short SwapShort(short value)
|
public static short SwapShort(short value) => (short)(((value & 0xFF) << 8) | (value >> 8) & 0xFF);
|
||||||
{
|
|
||||||
return (short)(((value & 0xFF) << 8) | (value >> 8) & 0xFF);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Read the given amount of bytes into a new {@link ByteBuf} that is allocated from the {@link ByteBufAllocator}.
|
/// Read the given amount of bytes into a new {@link ByteBuf} that is allocated from the {@link ByteBufAllocator}.
|
||||||
|
@ -431,10 +420,7 @@ namespace DotNetty.Buffers
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Returns a multi-line hexadecimal dump of the specified {@link ByteBuf} that is easy to read by humans.
|
/// Returns a multi-line hexadecimal dump of the specified {@link ByteBuf} that is easy to read by humans.
|
||||||
/// </summary>
|
/// </summary>
|
||||||
public static string PrettyHexDump(IByteBuffer buffer)
|
public static string PrettyHexDump(IByteBuffer buffer) => PrettyHexDump(buffer, buffer.ReaderIndex, buffer.ReadableBytes);
|
||||||
{
|
|
||||||
return PrettyHexDump(buffer, buffer.ReaderIndex, buffer.ReadableBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Returns a multi-line hexadecimal dump of the specified {@link ByteBuf} that is easy to read by humans,
|
/// Returns a multi-line hexadecimal dump of the specified {@link ByteBuf} that is easy to read by humans,
|
||||||
|
@ -459,10 +445,7 @@ namespace DotNetty.Buffers
|
||||||
/// Appends the prettified multi-line hexadecimal dump of the specified {@link ByteBuf} to the specified
|
/// Appends the prettified multi-line hexadecimal dump of the specified {@link ByteBuf} to the specified
|
||||||
/// {@link StringBuilder} that is easy to read by humans.
|
/// {@link StringBuilder} that is easy to read by humans.
|
||||||
/// </summary>
|
/// </summary>
|
||||||
public static void AppendPrettyHexDump(StringBuilder dump, IByteBuffer buf)
|
public static void AppendPrettyHexDump(StringBuilder dump, IByteBuffer buf) => AppendPrettyHexDump(dump, buf, buf.ReaderIndex, buf.ReadableBytes);
|
||||||
{
|
|
||||||
AppendPrettyHexDump(dump, buf, buf.ReaderIndex, buf.ReadableBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Appends the prettified multi-line hexadecimal dump of the specified {@link ByteBuf} to the specified
|
/// Appends the prettified multi-line hexadecimal dump of the specified {@link ByteBuf} to the specified
|
||||||
|
@ -566,10 +549,7 @@ namespace DotNetty.Buffers
|
||||||
/// <param name="alloc">The <see cref="IByteBufferAllocator" /> to allocate {@link IByteBuffer}.</param>
|
/// <param name="alloc">The <see cref="IByteBufferAllocator" /> to allocate {@link IByteBuffer}.</param>
|
||||||
/// <param name="src">src The <see cref="string" /> to encode.</param>
|
/// <param name="src">src The <see cref="string" /> to encode.</param>
|
||||||
/// <param name="encoding">charset The specified <see cref="Encoding" /></param>
|
/// <param name="encoding">charset The specified <see cref="Encoding" /></param>
|
||||||
public static IByteBuffer EncodeString(IByteBufferAllocator alloc, string src, Encoding encoding)
|
public static IByteBuffer EncodeString(IByteBufferAllocator alloc, string src, Encoding encoding) => EncodeString0(alloc, src, encoding, 0);
|
||||||
{
|
|
||||||
return EncodeString0(alloc, src, encoding, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Encode the given <see cref="CharBuffer" /> using the given <see cref="Encoding" /> into a new
|
/// Encode the given <see cref="CharBuffer" /> using the given <see cref="Encoding" /> into a new
|
||||||
|
@ -580,10 +560,7 @@ namespace DotNetty.Buffers
|
||||||
/// <param name="src">src The <see cref="string" /> to encode.</param>
|
/// <param name="src">src The <see cref="string" /> to encode.</param>
|
||||||
/// <param name="encoding">charset The specified <see cref="Encoding" /></param>
|
/// <param name="encoding">charset The specified <see cref="Encoding" /></param>
|
||||||
/// <param name="extraCapacity">the extra capacity to alloc except the space for decoding.</param>
|
/// <param name="extraCapacity">the extra capacity to alloc except the space for decoding.</param>
|
||||||
public static IByteBuffer EncodeString(IByteBufferAllocator alloc, string src, Encoding encoding, int extraCapacity)
|
public static IByteBuffer EncodeString(IByteBufferAllocator alloc, string src, Encoding encoding, int extraCapacity) => EncodeString0(alloc, src, encoding, extraCapacity);
|
||||||
{
|
|
||||||
return EncodeString0(alloc, src, encoding, extraCapacity);
|
|
||||||
}
|
|
||||||
|
|
||||||
static IByteBuffer EncodeString0(IByteBufferAllocator alloc, string src, Encoding encoding, int extraCapacity)
|
static IByteBuffer EncodeString0(IByteBufferAllocator alloc, string src, Encoding encoding, int extraCapacity)
|
||||||
{
|
{
|
||||||
|
|
|
@ -427,7 +427,7 @@ namespace DotNetty.Buffers
|
||||||
switch (this.components.Count)
|
switch (this.components.Count)
|
||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
return ByteArrayExtensions.Empty;
|
return ArrayExtensions.ZeroBytes;
|
||||||
case 1:
|
case 1:
|
||||||
return this.components[0].Buffer.Array;
|
return this.components[0].Buffer.Array;
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -31,6 +31,15 @@
|
||||||
<WarningLevel>4</WarningLevel>
|
<WarningLevel>4</WarningLevel>
|
||||||
<CheckForOverflowUnderflow>false</CheckForOverflowUnderflow>
|
<CheckForOverflowUnderflow>false</CheckForOverflowUnderflow>
|
||||||
</PropertyGroup>
|
</PropertyGroup>
|
||||||
|
<PropertyGroup Condition="'$(Configuration)|$(Platform)' == 'Signed|AnyCPU'">
|
||||||
|
<OutputPath>bin\Signed\</OutputPath>
|
||||||
|
<DefineConstants>TRACE;NOTEST</DefineConstants>
|
||||||
|
<Optimize>true</Optimize>
|
||||||
|
<DebugType>pdbonly</DebugType>
|
||||||
|
<PlatformTarget>AnyCPU</PlatformTarget>
|
||||||
|
<ErrorReport>prompt</ErrorReport>
|
||||||
|
<CodeAnalysisRuleSet>MinimumRecommendedRules.ruleset</CodeAnalysisRuleSet>
|
||||||
|
</PropertyGroup>
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<Reference Include="System" />
|
<Reference Include="System" />
|
||||||
<Reference Include="System.Core" />
|
<Reference Include="System.Core" />
|
||||||
|
@ -40,8 +49,12 @@
|
||||||
<Compile Include="..\SharedAssemblyInfo.cs">
|
<Compile Include="..\SharedAssemblyInfo.cs">
|
||||||
<Link>Properties\SharedAssemblyInfo.cs</Link>
|
<Link>Properties\SharedAssemblyInfo.cs</Link>
|
||||||
</Compile>
|
</Compile>
|
||||||
<Compile Include="AdvancedLeakAwareByteBuf.cs" />
|
<Compile Include="AdvancedLeakAwareByteBuffer.cs" />
|
||||||
<Compile Include="SimpleLeakAwareByteBuf.cs" />
|
<Compile Include="Properties\Friends.cs" />
|
||||||
|
<Compile Include="IPoolArenaMetric.cs" />
|
||||||
|
<Compile Include="IPoolChunkListMetric.cs" />
|
||||||
|
<Compile Include="IPoolSubpageMetric.cs" />
|
||||||
|
<Compile Include="SimpleLeakAwareByteBuffer.cs" />
|
||||||
<Compile Include="AbstractByteBuffer.cs" />
|
<Compile Include="AbstractByteBuffer.cs" />
|
||||||
<Compile Include="AbstractByteBufferAllocator.cs" />
|
<Compile Include="AbstractByteBufferAllocator.cs" />
|
||||||
<Compile Include="AbstractDerivedByteBuffer.cs" />
|
<Compile Include="AbstractDerivedByteBuffer.cs" />
|
||||||
|
@ -52,13 +65,20 @@
|
||||||
<Compile Include="CompositeByteBuffer.cs" />
|
<Compile Include="CompositeByteBuffer.cs" />
|
||||||
<Compile Include="DuplicatedByteBuffer.cs" />
|
<Compile Include="DuplicatedByteBuffer.cs" />
|
||||||
<Compile Include="EmptyByteBuffer.cs" />
|
<Compile Include="EmptyByteBuffer.cs" />
|
||||||
|
<Compile Include="IPoolChunkMetric.cs" />
|
||||||
|
<Compile Include="PoolArena.cs" />
|
||||||
|
<Compile Include="PoolChunk.cs" />
|
||||||
|
<Compile Include="PoolChunkList.cs" />
|
||||||
<Compile Include="PooledByteBuffer.cs" />
|
<Compile Include="PooledByteBuffer.cs" />
|
||||||
|
<Compile Include="PooledByteBufferAllocator.cs" />
|
||||||
|
<Compile Include="PooledHeapByteBuffer.cs" />
|
||||||
|
<Compile Include="PoolSubpage.cs" />
|
||||||
|
<Compile Include="PoolThreadCache.cs" />
|
||||||
<Compile Include="IByteBuffer.cs" />
|
<Compile Include="IByteBuffer.cs" />
|
||||||
<Compile Include="IByteBufferAllocator.cs" />
|
<Compile Include="IByteBufferAllocator.cs" />
|
||||||
<Compile Include="IllegalReferenceCountException.cs" />
|
<Compile Include="IllegalReferenceCountException.cs" />
|
||||||
<Compile Include="Properties\AssemblyInfo.cs" />
|
<Compile Include="Properties\AssemblyInfo.cs" />
|
||||||
<Compile Include="SlicedByteBuffer.cs" />
|
<Compile Include="SlicedByteBuffer.cs" />
|
||||||
<Compile Include="PooledByteBufferAllocator.cs" />
|
|
||||||
<Compile Include="SwappedByteBuffer.cs" />
|
<Compile Include="SwappedByteBuffer.cs" />
|
||||||
<Compile Include="Unpooled.cs" />
|
<Compile Include="Unpooled.cs" />
|
||||||
<Compile Include="UnpooledByteBufferAllocator.cs" />
|
<Compile Include="UnpooledByteBufferAllocator.cs" />
|
||||||
|
|
|
@ -459,11 +459,11 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
public bool HasArray => true;
|
public bool HasArray => true;
|
||||||
|
|
||||||
public byte[] Array => ByteArrayExtensions.Empty;
|
public byte[] Array => ArrayExtensions.ZeroBytes;
|
||||||
|
|
||||||
public byte[] ToArray()
|
public byte[] ToArray()
|
||||||
{
|
{
|
||||||
return ByteArrayExtensions.Empty;
|
return ArrayExtensions.ZeroBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public IByteBuffer Duplicate()
|
public IByteBuffer Duplicate()
|
||||||
|
@ -567,6 +567,27 @@ namespace DotNetty.Buffers
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public override int GetHashCode()
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override bool Equals(object obj)
|
||||||
|
{
|
||||||
|
var buffer = obj as IByteBuffer;
|
||||||
|
return this.Equals(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
public bool Equals(IByteBuffer buffer)
|
||||||
|
{
|
||||||
|
return buffer != null && !buffer.IsReadable();
|
||||||
|
}
|
||||||
|
|
||||||
|
public int CompareTo(IByteBuffer buffer)
|
||||||
|
{
|
||||||
|
return buffer.IsReadable() ? -1 : 0;
|
||||||
|
}
|
||||||
|
|
||||||
public override string ToString()
|
public override string ToString()
|
||||||
{
|
{
|
||||||
return this.str;
|
return this.str;
|
||||||
|
|
|
@ -19,7 +19,7 @@ namespace DotNetty.Buffers
|
||||||
/// /// <see cref="ReaderIndex" /> LESS THAN OR EQUAL TO <see cref="WriterIndex" /> LESS THAN OR EQUAL TO
|
/// /// <see cref="ReaderIndex" /> LESS THAN OR EQUAL TO <see cref="WriterIndex" /> LESS THAN OR EQUAL TO
|
||||||
/// <see cref="Capacity" />.
|
/// <see cref="Capacity" />.
|
||||||
/// </summary>
|
/// </summary>
|
||||||
public interface IByteBuffer : IReferenceCounted
|
public interface IByteBuffer : IReferenceCounted, IComparable<IByteBuffer>, IEquatable<IByteBuffer>
|
||||||
{
|
{
|
||||||
int Capacity { get; }
|
int Capacity { get; }
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,79 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers
|
||||||
|
{
|
||||||
|
using System.Collections.Generic;
|
||||||
|
|
||||||
|
public interface IPoolArenaMetric
|
||||||
|
{
|
||||||
|
/// Returns the number of thread caches backed by this arena.
|
||||||
|
int NumThreadCaches { get; }
|
||||||
|
|
||||||
|
/// Returns the number of tiny sub-pages for the arena.
|
||||||
|
int NumTinySubpages { get; }
|
||||||
|
|
||||||
|
/// Returns the number of small sub-pages for the arena.
|
||||||
|
int NumSmallSubpages { get; }
|
||||||
|
|
||||||
|
/// Returns the number of chunk lists for the arena.
|
||||||
|
int NumChunkLists { get; }
|
||||||
|
|
||||||
|
/// Returns an unmodifiable {@link List} which holds {@link PoolSubpageMetric}s for tiny sub-pages.
|
||||||
|
IReadOnlyList<IPoolSubpageMetric> TinySubpages { get; }
|
||||||
|
|
||||||
|
/// Returns an unmodifiable {@link List} which holds {@link PoolSubpageMetric}s for small sub-pages.
|
||||||
|
IReadOnlyList<IPoolSubpageMetric> SmallSubpages { get; }
|
||||||
|
|
||||||
|
/// Returns an unmodifiable {@link List} which holds {@link PoolChunkListMetric}s.
|
||||||
|
IReadOnlyList<IPoolChunkListMetric> ChunkLists { get; }
|
||||||
|
|
||||||
|
/// Return the number of allocations done via the arena. This includes all sizes.
|
||||||
|
long NumAllocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of tiny allocations done via the arena.
|
||||||
|
long NumTinyAllocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of small allocations done via the arena.
|
||||||
|
long NumSmallAllocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of normal allocations done via the arena.
|
||||||
|
long NumNormalAllocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of huge allocations done via the arena.
|
||||||
|
long NumHugeAllocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of deallocations done via the arena. This includes all sizes.
|
||||||
|
long NumDeallocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of tiny deallocations done via the arena.
|
||||||
|
long NumTinyDeallocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of small deallocations done via the arena.
|
||||||
|
long NumSmallDeallocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of normal deallocations done via the arena.
|
||||||
|
long NumNormalDeallocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of huge deallocations done via the arena.
|
||||||
|
long NumHugeDeallocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of currently active allocations.
|
||||||
|
long NumActiveAllocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of currently active tiny allocations.
|
||||||
|
long NumActiveTinyAllocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of currently active small allocations.
|
||||||
|
long NumActiveSmallAllocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of currently active normal allocations.
|
||||||
|
long NumActiveNormalAllocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of currently active huge allocations.
|
||||||
|
long NumActiveHugeAllocations { get; }
|
||||||
|
|
||||||
|
/// Return the number of active bytes that are currently allocated by the arena.
|
||||||
|
long NumActiveBytes { get; }
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers
|
||||||
|
{
|
||||||
|
using System.Collections.Generic;
|
||||||
|
|
||||||
|
public interface IPoolChunkListMetric : IEnumerable<IPoolChunkMetric>
|
||||||
|
{
|
||||||
|
/// Return the minum usage of the chunk list before which chunks are promoted to the previous list.
|
||||||
|
int MinUsage { get; }
|
||||||
|
|
||||||
|
/// Return the minum usage of the chunk list after which chunks are promoted to the next list.
|
||||||
|
int MaxUsage { get; }
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers
|
||||||
|
{
|
||||||
|
public interface IPoolChunkMetric
|
||||||
|
{
|
||||||
|
/// Return the percentage of the current usage of the chunk.
|
||||||
|
int Usage { get; }
|
||||||
|
|
||||||
|
/// Return the size of the chunk in bytes, this is the maximum of bytes that can be served out of the chunk.
|
||||||
|
int ChunkSize { get; }
|
||||||
|
|
||||||
|
/// Return the number of free bytes in the chunk.
|
||||||
|
int FreeBytes { get; }
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers
|
||||||
|
{
|
||||||
|
public interface IPoolSubpageMetric
|
||||||
|
{
|
||||||
|
/// Return the number of maximal elements that can be allocated out of the sub-page.
|
||||||
|
int MaxNumElements { get; }
|
||||||
|
|
||||||
|
/// Return the number of available elements to be allocated.
|
||||||
|
int NumAvailable { get; }
|
||||||
|
|
||||||
|
/// Return the size (in bytes) of the elements that will be allocated.
|
||||||
|
int ElementSize { get; }
|
||||||
|
|
||||||
|
/// Return the size (in bytes) of this page.
|
||||||
|
int PageSize { get; }
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,648 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers
|
||||||
|
{
|
||||||
|
using System;
|
||||||
|
using System.Collections.Generic;
|
||||||
|
using System.Diagnostics.Contracts;
|
||||||
|
using System.Runtime.CompilerServices;
|
||||||
|
using System.Text;
|
||||||
|
using System.Threading;
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
|
enum SizeClass
|
||||||
|
{
|
||||||
|
Tiny,
|
||||||
|
Small,
|
||||||
|
Normal
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class PoolArena<T> : IPoolArenaMetric
|
||||||
|
{
|
||||||
|
internal static readonly int NumTinySubpagePools = 512 >> 4;
|
||||||
|
|
||||||
|
internal readonly PooledByteBufferAllocator Parent;
|
||||||
|
|
||||||
|
readonly int maxOrder;
|
||||||
|
internal readonly int PageSize;
|
||||||
|
internal readonly int PageShifts;
|
||||||
|
internal readonly int ChunkSize;
|
||||||
|
internal readonly int SubpageOverflowMask;
|
||||||
|
internal readonly int NumSmallSubpagePools;
|
||||||
|
readonly PoolSubpage<T>[] tinySubpagePools;
|
||||||
|
readonly PoolSubpage<T>[] smallSubpagePools;
|
||||||
|
|
||||||
|
readonly PoolChunkList<T> q050;
|
||||||
|
readonly PoolChunkList<T> q025;
|
||||||
|
readonly PoolChunkList<T> q000;
|
||||||
|
readonly PoolChunkList<T> qInit;
|
||||||
|
readonly PoolChunkList<T> q075;
|
||||||
|
readonly PoolChunkList<T> q100;
|
||||||
|
|
||||||
|
readonly List<IPoolChunkListMetric> chunkListMetrics;
|
||||||
|
|
||||||
|
// Metrics for allocations and deallocations
|
||||||
|
long allocationsTiny;
|
||||||
|
long allocationsSmall;
|
||||||
|
long allocationsNormal;
|
||||||
|
// We need to use the LongCounter here as this is not guarded via synchronized block.
|
||||||
|
long allocationsHuge;
|
||||||
|
long activeBytesHuge;
|
||||||
|
|
||||||
|
long deallocationsTiny;
|
||||||
|
long deallocationsSmall;
|
||||||
|
long deallocationsNormal;
|
||||||
|
// We need to use the LongCounter here as this is not guarded via synchronized block.
|
||||||
|
long deallocationsHuge;
|
||||||
|
|
||||||
|
// Number of thread caches backed by this arena.
|
||||||
|
int numThreadCaches;
|
||||||
|
|
||||||
|
// TODO: Test if adding padding helps under contention
|
||||||
|
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
|
||||||
|
|
||||||
|
protected PoolArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize)
|
||||||
|
{
|
||||||
|
this.Parent = parent;
|
||||||
|
this.PageSize = pageSize;
|
||||||
|
this.maxOrder = maxOrder;
|
||||||
|
this.PageShifts = pageShifts;
|
||||||
|
this.ChunkSize = chunkSize;
|
||||||
|
this.SubpageOverflowMask = ~(pageSize - 1);
|
||||||
|
this.tinySubpagePools = this.NewSubpagePoolArray(NumTinySubpagePools);
|
||||||
|
for (int i = 0; i < this.tinySubpagePools.Length; i++)
|
||||||
|
{
|
||||||
|
this.tinySubpagePools[i] = this.NewSubpagePoolHead(pageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.NumSmallSubpagePools = pageShifts - 9;
|
||||||
|
this.smallSubpagePools = this.NewSubpagePoolArray(this.NumSmallSubpagePools);
|
||||||
|
for (int i = 0; i < this.smallSubpagePools.Length; i++)
|
||||||
|
{
|
||||||
|
this.smallSubpagePools[i] = this.NewSubpagePoolHead(pageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.q100 = new PoolChunkList<T>(null, 100, int.MaxValue, chunkSize);
|
||||||
|
this.q075 = new PoolChunkList<T>(this.q100, 75, 100, chunkSize);
|
||||||
|
this.q050 = new PoolChunkList<T>(this.q075, 50, 100, chunkSize);
|
||||||
|
this.q025 = new PoolChunkList<T>(this.q050, 25, 75, chunkSize);
|
||||||
|
this.q000 = new PoolChunkList<T>(this.q025, 1, 50, chunkSize);
|
||||||
|
this.qInit = new PoolChunkList<T>(this.q000, int.MinValue, 25, chunkSize);
|
||||||
|
|
||||||
|
this.q100.PrevList(this.q075);
|
||||||
|
this.q075.PrevList(this.q050);
|
||||||
|
this.q050.PrevList(this.q025);
|
||||||
|
this.q025.PrevList(this.q000);
|
||||||
|
this.q000.PrevList(null);
|
||||||
|
this.qInit.PrevList(this.qInit);
|
||||||
|
|
||||||
|
var metrics = new List<IPoolChunkListMetric>(6);
|
||||||
|
metrics.Add(this.qInit);
|
||||||
|
metrics.Add(this.q000);
|
||||||
|
metrics.Add(this.q025);
|
||||||
|
metrics.Add(this.q050);
|
||||||
|
metrics.Add(this.q075);
|
||||||
|
metrics.Add(this.q100);
|
||||||
|
this.chunkListMetrics = metrics;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int NumThreadCaches => Volatile.Read(ref this.numThreadCaches);
|
||||||
|
|
||||||
|
public void RegisterThreadCache() => Interlocked.Increment(ref this.numThreadCaches);
|
||||||
|
|
||||||
|
public void DeregisterThreadCache() => Interlocked.Decrement(ref this.numThreadCaches);
|
||||||
|
|
||||||
|
PoolSubpage<T> NewSubpagePoolHead(int pageSize)
|
||||||
|
{
|
||||||
|
var head = new PoolSubpage<T>(pageSize);
|
||||||
|
head.Prev = head;
|
||||||
|
head.Next = head;
|
||||||
|
return head;
|
||||||
|
}
|
||||||
|
|
||||||
|
PoolSubpage<T>[] NewSubpagePoolArray(int size) => new PoolSubpage<T>[size];
|
||||||
|
|
||||||
|
internal PooledByteBuffer<T> Allocate(PoolThreadCache<T> cache, int reqCapacity, int maxCapacity)
|
||||||
|
{
|
||||||
|
PooledByteBuffer<T> buf = this.NewByteBuf(maxCapacity);
|
||||||
|
this.Allocate(cache, buf, reqCapacity);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
internal static int TinyIdx(int normCapacity) => normCapacity.RightUShift(4);
|
||||||
|
|
||||||
|
internal static int SmallIdx(int normCapacity)
|
||||||
|
{
|
||||||
|
int tableIdx = 0;
|
||||||
|
int i = normCapacity.RightUShift(10);
|
||||||
|
while (i != 0)
|
||||||
|
{
|
||||||
|
i = i.RightUShift(1);
|
||||||
|
tableIdx++;
|
||||||
|
}
|
||||||
|
return tableIdx;
|
||||||
|
}
|
||||||
|
|
||||||
|
// capacity < pageSize
|
||||||
|
internal bool IsTinyOrSmall(int normCapacity) => (normCapacity & this.SubpageOverflowMask) == 0;
|
||||||
|
|
||||||
|
// normCapacity < 512
|
||||||
|
internal static bool IsTiny(int normCapacity) => (normCapacity & 0xFFFFFE00) == 0;
|
||||||
|
|
||||||
|
void Allocate(PoolThreadCache<T> cache, PooledByteBuffer<T> buf, int reqCapacity)
|
||||||
|
{
|
||||||
|
int normCapacity = this.NormalizeCapacity(reqCapacity);
|
||||||
|
if (this.IsTinyOrSmall(normCapacity))
|
||||||
|
{
|
||||||
|
// capacity < pageSize
|
||||||
|
int tableIdx;
|
||||||
|
PoolSubpage<T>[] table;
|
||||||
|
bool tiny = IsTiny(normCapacity);
|
||||||
|
if (tiny)
|
||||||
|
{
|
||||||
|
// < 512
|
||||||
|
if (cache.AllocateTiny(this, buf, reqCapacity, normCapacity))
|
||||||
|
{
|
||||||
|
// was able to allocate out of the cache so move on
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
tableIdx = TinyIdx(normCapacity);
|
||||||
|
table = this.tinySubpagePools;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (cache.AllocateSmall(this, buf, reqCapacity, normCapacity))
|
||||||
|
{
|
||||||
|
// was able to allocate out of the cache so move on
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
tableIdx = SmallIdx(normCapacity);
|
||||||
|
table = this.smallSubpagePools;
|
||||||
|
}
|
||||||
|
|
||||||
|
PoolSubpage<T> head = table[tableIdx];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Synchronize on the head. This is needed as {@link PoolSubpage#allocate()} and
|
||||||
|
* {@link PoolSubpage#free(int)} may modify the doubly linked list as well.
|
||||||
|
*/
|
||||||
|
lock (head)
|
||||||
|
{
|
||||||
|
PoolSubpage<T> s = head.Next;
|
||||||
|
if (s != head)
|
||||||
|
{
|
||||||
|
Contract.Assert(s.DoNotDestroy && s.ElemSize == normCapacity);
|
||||||
|
long handle = s.Allocate();
|
||||||
|
Contract.Assert(handle >= 0);
|
||||||
|
s.Chunk.InitBufWithSubpage(buf, handle, reqCapacity);
|
||||||
|
|
||||||
|
if (tiny)
|
||||||
|
{
|
||||||
|
++this.allocationsTiny;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
++this.allocationsSmall;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this.AllocateNormal(buf, reqCapacity, normCapacity);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (normCapacity <= this.ChunkSize)
|
||||||
|
{
|
||||||
|
if (cache.AllocateNormal(this, buf, reqCapacity, normCapacity))
|
||||||
|
{
|
||||||
|
// was able to allocate out of the cache so move on
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.AllocateNormal(buf, reqCapacity, normCapacity);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Huge allocations are never served via the cache so just call allocateHuge
|
||||||
|
this.AllocateHuge(buf, reqCapacity);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
[MethodImpl(MethodImplOptions.Synchronized)]
|
||||||
|
void AllocateNormal(PooledByteBuffer<T> buf, int reqCapacity, int normCapacity)
|
||||||
|
{
|
||||||
|
if (this.q050.Allocate(buf, reqCapacity, normCapacity) || this.q025.Allocate(buf, reqCapacity, normCapacity)
|
||||||
|
|| this.q000.Allocate(buf, reqCapacity, normCapacity) || this.qInit.Allocate(buf, reqCapacity, normCapacity)
|
||||||
|
|| this.q075.Allocate(buf, reqCapacity, normCapacity))
|
||||||
|
{
|
||||||
|
++this.allocationsNormal;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a new chunk.
|
||||||
|
PoolChunk<T> c = this.NewChunk(this.PageSize, this.maxOrder, this.PageShifts, this.ChunkSize);
|
||||||
|
long handle = c.Allocate(normCapacity);
|
||||||
|
++this.allocationsNormal;
|
||||||
|
Contract.Assert(handle > 0);
|
||||||
|
c.InitBuf(buf, handle, reqCapacity);
|
||||||
|
this.qInit.Add(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AllocateHuge(PooledByteBuffer<T> buf, int reqCapacity)
|
||||||
|
{
|
||||||
|
PoolChunk<T> chunk = this.NewUnpooledChunk(reqCapacity);
|
||||||
|
Interlocked.Add(ref this.activeBytesHuge, chunk.ChunkSize);
|
||||||
|
buf.InitUnpooled(chunk, reqCapacity);
|
||||||
|
Interlocked.Increment(ref this.allocationsHuge);
|
||||||
|
}
|
||||||
|
|
||||||
|
internal void Free(PoolChunk<T> chunk, long handle, int normCapacity, PoolThreadCache<T> cache)
|
||||||
|
{
|
||||||
|
if (chunk.Unpooled)
|
||||||
|
{
|
||||||
|
int size = chunk.ChunkSize;
|
||||||
|
this.DestroyChunk(chunk);
|
||||||
|
Interlocked.Add(ref this.activeBytesHuge, -size);
|
||||||
|
Interlocked.Decrement(ref this.deallocationsHuge);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
SizeClass sc = this.SizeClass(normCapacity);
|
||||||
|
if (cache != null && cache.Add(this, chunk, handle, normCapacity, sc))
|
||||||
|
{
|
||||||
|
// cached so not free it.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
this.FreeChunk(chunk, handle, sc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SizeClass SizeClass(int normCapacity)
|
||||||
|
{
|
||||||
|
if (!this.IsTinyOrSmall(normCapacity))
|
||||||
|
{
|
||||||
|
return Buffers.SizeClass.Normal;
|
||||||
|
}
|
||||||
|
return IsTiny(normCapacity) ? Buffers.SizeClass.Tiny : Buffers.SizeClass.Small;
|
||||||
|
}
|
||||||
|
|
||||||
|
internal void FreeChunk(PoolChunk<T> chunk, long handle, SizeClass sizeClass)
|
||||||
|
{
|
||||||
|
bool mustDestroyChunk;
|
||||||
|
lock (this)
|
||||||
|
{
|
||||||
|
switch (sizeClass)
|
||||||
|
{
|
||||||
|
case Buffers.SizeClass.Normal:
|
||||||
|
++this.deallocationsNormal;
|
||||||
|
break;
|
||||||
|
case Buffers.SizeClass.Small:
|
||||||
|
++this.deallocationsSmall;
|
||||||
|
break;
|
||||||
|
case Buffers.SizeClass.Tiny:
|
||||||
|
++this.deallocationsTiny;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new ArgumentOutOfRangeException();
|
||||||
|
}
|
||||||
|
mustDestroyChunk = !chunk.Parent.Free(chunk, handle);
|
||||||
|
}
|
||||||
|
if (mustDestroyChunk)
|
||||||
|
{
|
||||||
|
// destroyChunk not need to be called while holding the synchronized lock.
|
||||||
|
this.DestroyChunk(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
internal PoolSubpage<T> FindSubpagePoolHead(int elemSize)
|
||||||
|
{
|
||||||
|
int tableIdx;
|
||||||
|
PoolSubpage<T>[] table;
|
||||||
|
if (IsTiny(elemSize))
|
||||||
|
{
|
||||||
|
// < 512
|
||||||
|
tableIdx = elemSize.RightUShift(4);
|
||||||
|
table = this.tinySubpagePools;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
tableIdx = 0;
|
||||||
|
elemSize = elemSize.RightUShift(10);
|
||||||
|
while (elemSize != 0)
|
||||||
|
{
|
||||||
|
elemSize = elemSize.RightUShift(1);
|
||||||
|
tableIdx++;
|
||||||
|
}
|
||||||
|
table = this.smallSubpagePools;
|
||||||
|
}
|
||||||
|
|
||||||
|
return table[tableIdx];
|
||||||
|
}
|
||||||
|
|
||||||
|
internal int NormalizeCapacity(int reqCapacity)
|
||||||
|
{
|
||||||
|
Contract.Requires(reqCapacity >= 0);
|
||||||
|
|
||||||
|
if (reqCapacity >= this.ChunkSize)
|
||||||
|
{
|
||||||
|
return reqCapacity;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!IsTiny(reqCapacity))
|
||||||
|
{
|
||||||
|
// >= 512
|
||||||
|
// Doubled
|
||||||
|
|
||||||
|
int normalizedCapacity = reqCapacity;
|
||||||
|
normalizedCapacity--;
|
||||||
|
normalizedCapacity |= normalizedCapacity.RightUShift(1);
|
||||||
|
normalizedCapacity |= normalizedCapacity.RightUShift(2);
|
||||||
|
normalizedCapacity |= normalizedCapacity.RightUShift(4);
|
||||||
|
normalizedCapacity |= normalizedCapacity.RightUShift(8);
|
||||||
|
normalizedCapacity |= normalizedCapacity.RightUShift(16);
|
||||||
|
normalizedCapacity++;
|
||||||
|
|
||||||
|
if (normalizedCapacity < 0)
|
||||||
|
{
|
||||||
|
normalizedCapacity = normalizedCapacity.RightUShift(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
return normalizedCapacity;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Quantum-spaced
|
||||||
|
if ((reqCapacity & 15) == 0)
|
||||||
|
{
|
||||||
|
return reqCapacity;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (reqCapacity & ~15) + 16;
|
||||||
|
}
|
||||||
|
|
||||||
|
internal void Reallocate(PooledByteBuffer<T> buf, int newCapacity, bool freeOldMemory)
|
||||||
|
{
|
||||||
|
Contract.Requires(newCapacity >= 0 && newCapacity <= buf.MaxCapacity);
|
||||||
|
|
||||||
|
int oldCapacity = buf.Length;
|
||||||
|
if (oldCapacity == newCapacity)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
PoolChunk<T> oldChunk = buf.Chunk;
|
||||||
|
long oldHandle = buf.Handle;
|
||||||
|
T oldMemory = buf.Memory;
|
||||||
|
int oldOffset = buf.Offset;
|
||||||
|
int oldMaxLength = buf.MaxLength;
|
||||||
|
int readerIndex = buf.ReaderIndex;
|
||||||
|
int writerIndex = buf.WriterIndex;
|
||||||
|
|
||||||
|
this.Allocate(this.Parent.ThreadCache<T>(), buf, newCapacity);
|
||||||
|
if (newCapacity > oldCapacity)
|
||||||
|
{
|
||||||
|
this.MemoryCopy(
|
||||||
|
oldMemory, oldOffset,
|
||||||
|
buf.Memory, buf.Offset, oldCapacity);
|
||||||
|
}
|
||||||
|
else if (newCapacity < oldCapacity)
|
||||||
|
{
|
||||||
|
if (readerIndex < newCapacity)
|
||||||
|
{
|
||||||
|
if (writerIndex > newCapacity)
|
||||||
|
{
|
||||||
|
writerIndex = newCapacity;
|
||||||
|
}
|
||||||
|
this.MemoryCopy(
|
||||||
|
oldMemory, oldOffset + readerIndex,
|
||||||
|
buf.Memory, buf.Offset + readerIndex, writerIndex - readerIndex);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
readerIndex = writerIndex = newCapacity;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.SetIndex(readerIndex, writerIndex);
|
||||||
|
|
||||||
|
if (freeOldMemory)
|
||||||
|
{
|
||||||
|
this.Free(oldChunk, oldHandle, oldMaxLength, buf.Cache);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public int NumTinySubpages => this.tinySubpagePools.Length;
|
||||||
|
|
||||||
|
public int NumSmallSubpages => this.smallSubpagePools.Length;
|
||||||
|
|
||||||
|
public int NumChunkLists => this.chunkListMetrics.Count;
|
||||||
|
|
||||||
|
public IReadOnlyList<IPoolSubpageMetric> TinySubpages => SubPageMetricList(this.tinySubpagePools);
|
||||||
|
|
||||||
|
public IReadOnlyList<IPoolSubpageMetric> SmallSubpages => SubPageMetricList(this.smallSubpagePools);
|
||||||
|
|
||||||
|
public IReadOnlyList<IPoolChunkListMetric> ChunkLists => this.chunkListMetrics;
|
||||||
|
|
||||||
|
static List<IPoolSubpageMetric> SubPageMetricList(PoolSubpage<T>[] pages)
|
||||||
|
{
|
||||||
|
var metrics = new List<IPoolSubpageMetric>();
|
||||||
|
for (int i = 1; i < pages.Length; i++)
|
||||||
|
{
|
||||||
|
PoolSubpage<T> head = pages[i];
|
||||||
|
if (head.Next == head)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
PoolSubpage<T> s = head.Next;
|
||||||
|
for (;;)
|
||||||
|
{
|
||||||
|
metrics.Add(s);
|
||||||
|
s = s.Next;
|
||||||
|
if (s == head)
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return metrics;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long NumAllocations => this.allocationsTiny + this.allocationsSmall + this.allocationsNormal + this.NumHugeAllocations;
|
||||||
|
|
||||||
|
public long NumTinyAllocations => this.allocationsTiny;
|
||||||
|
|
||||||
|
public long NumSmallAllocations => this.allocationsSmall;
|
||||||
|
|
||||||
|
public long NumNormalAllocations => this.allocationsNormal;
|
||||||
|
|
||||||
|
public long NumDeallocations => this.deallocationsTiny + this.deallocationsSmall + this.allocationsNormal + this.NumHugeDeallocations;
|
||||||
|
|
||||||
|
public long NumTinyDeallocations => this.deallocationsTiny;
|
||||||
|
|
||||||
|
public long NumSmallDeallocations => this.deallocationsSmall;
|
||||||
|
|
||||||
|
public long NumNormalDeallocations => this.deallocationsNormal;
|
||||||
|
|
||||||
|
public long NumHugeAllocations => Volatile.Read(ref this.allocationsHuge);
|
||||||
|
|
||||||
|
public long NumHugeDeallocations => Volatile.Read(ref this.deallocationsHuge);
|
||||||
|
|
||||||
|
public long NumActiveAllocations => Math.Max(this.NumAllocations - this.NumDeallocations, 0);
|
||||||
|
|
||||||
|
public long NumActiveTinyAllocations => Math.Max(this.NumTinyAllocations - this.NumTinyDeallocations, 0);
|
||||||
|
|
||||||
|
public long NumActiveSmallAllocations => Math.Max(this.NumSmallAllocations - this.NumSmallDeallocations, 0);
|
||||||
|
|
||||||
|
public long NumActiveNormalAllocations
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
long val;
|
||||||
|
lock (this)
|
||||||
|
{
|
||||||
|
val = this.NumNormalAllocations - this.NumNormalDeallocations;
|
||||||
|
}
|
||||||
|
return Math.Max(val, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public long NumActiveHugeAllocations => Math.Max(this.NumHugeAllocations - this.NumHugeDeallocations, 0);
|
||||||
|
|
||||||
|
public long NumActiveBytes
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
long val = Volatile.Read(ref this.activeBytesHuge);
|
||||||
|
lock (this)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < this.chunkListMetrics.Count; i++)
|
||||||
|
{
|
||||||
|
foreach (IPoolChunkMetric m in this.chunkListMetrics[i])
|
||||||
|
{
|
||||||
|
val += m.ChunkSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Math.Max(0, val);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected abstract PoolChunk<T> NewChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize);
|
||||||
|
|
||||||
|
protected abstract PoolChunk<T> NewUnpooledChunk(int capacity);
|
||||||
|
|
||||||
|
protected abstract PooledByteBuffer<T> NewByteBuf(int maxCapacity);
|
||||||
|
|
||||||
|
protected abstract void MemoryCopy(T src, int srcOffset, T dst, int dstOffset, int length);
|
||||||
|
|
||||||
|
protected abstract void DestroyChunk(PoolChunk<T> chunk);
|
||||||
|
|
||||||
|
[MethodImpl(MethodImplOptions.Synchronized)]
|
||||||
|
public override string ToString()
|
||||||
|
{
|
||||||
|
StringBuilder buf = new StringBuilder()
|
||||||
|
.Append("Chunk(s) at 0~25%:")
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append(this.qInit)
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append("Chunk(s) at 0~50%:")
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append(this.q000)
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append("Chunk(s) at 25~75%:")
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append(this.q025)
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append("Chunk(s) at 50~100%:")
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append(this.q050)
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append("Chunk(s) at 75~100%:")
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append(this.q075)
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append("Chunk(s) at 100%:")
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append(this.q100)
|
||||||
|
.Append(StringUtil.Newline)
|
||||||
|
.Append("tiny subpages:");
|
||||||
|
for (int i = 1; i < this.tinySubpagePools.Length; i++)
|
||||||
|
{
|
||||||
|
PoolSubpage<T> head = this.tinySubpagePools[i];
|
||||||
|
if (head.Next == head)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Append(StringUtil.Newline)
|
||||||
|
.Append(i)
|
||||||
|
.Append(": ");
|
||||||
|
PoolSubpage<T> s = head.Next;
|
||||||
|
for (;;)
|
||||||
|
{
|
||||||
|
buf.Append(s);
|
||||||
|
s = s.Next;
|
||||||
|
if (s == head)
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.Append(StringUtil.Newline)
|
||||||
|
.Append("small subpages:");
|
||||||
|
for (int i = 1; i < this.smallSubpagePools.Length; i++)
|
||||||
|
{
|
||||||
|
PoolSubpage<T> head = this.smallSubpagePools[i];
|
||||||
|
if (head.Next == head)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Append(StringUtil.Newline)
|
||||||
|
.Append(i)
|
||||||
|
.Append(": ");
|
||||||
|
PoolSubpage<T> s = head.Next;
|
||||||
|
for (;;)
|
||||||
|
{
|
||||||
|
buf.Append(s);
|
||||||
|
s = s.Next;
|
||||||
|
if (s == head)
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.Append(StringUtil.Newline);
|
||||||
|
|
||||||
|
return buf.ToString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sealed class HeapArena : PoolArena<byte[]>
|
||||||
|
{
|
||||||
|
public HeapArena(PooledByteBufferAllocator parent, int pageSize, int maxOrder, int pageShifts, int chunkSize)
|
||||||
|
: base(parent, pageSize, maxOrder, pageShifts, chunkSize)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override PoolChunk<byte[]> NewChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize) => new PoolChunk<byte[]>(this, new byte[chunkSize], pageSize, maxOrder, pageShifts, chunkSize);
|
||||||
|
|
||||||
|
protected override PoolChunk<byte[]> NewUnpooledChunk(int capacity) => new PoolChunk<byte[]>(this, new byte[capacity], capacity);
|
||||||
|
|
||||||
|
protected override void DestroyChunk(PoolChunk<byte[]> chunk)
|
||||||
|
{
|
||||||
|
// Rely on GC.
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override PooledByteBuffer<byte[]> NewByteBuf(int maxCapacity) => PooledHeapByteBuffer.NewInstance(maxCapacity);
|
||||||
|
|
||||||
|
protected override void MemoryCopy(byte[] src, int srcOffset, byte[] dst, int dstOffset, int length)
|
||||||
|
{
|
||||||
|
if (length == 0)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
Array.Copy(src, srcOffset, dst, dstOffset, length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,457 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers
|
||||||
|
{
|
||||||
|
using System.Diagnostics.Contracts;
|
||||||
|
using System.Runtime.CompilerServices;
|
||||||
|
using System.Text;
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk
|
||||||
|
/// Notation: The following terms are important to understand the code
|
||||||
|
/// > page - a page is the smallest unit of memory chunk that can be allocated
|
||||||
|
/// > chunk - a chunk is a collection of pages
|
||||||
|
/// > in this code chunkSize = 2^{maxOrder} /// pageSize
|
||||||
|
/// To begin we allocate a byte array of size = chunkSize
|
||||||
|
/// Whenever a ByteBuf of given size needs to be created we search for the first position
|
||||||
|
/// in the byte array that has enough empty space to accommodate the requested size and
|
||||||
|
/// return a (long) handle that encodes this offset information, (this memory segment is then
|
||||||
|
/// marked as reserved so it is always used by exactly one ByteBuf and no more)
|
||||||
|
/// For simplicity all sizes are normalized according to PoolArena#normalizeCapacity method
|
||||||
|
/// This ensures that when we request for memory segments of size >= pageSize the normalizedCapacity
|
||||||
|
/// equals the next nearest power of 2
|
||||||
|
/// To search for the first offset in chunk that has at least requested size available we construct a
|
||||||
|
/// complete balanced binary tree and store it in an array (just like heaps) - memoryMap
|
||||||
|
/// The tree looks like this (the size of each node being mentioned in the parenthesis)
|
||||||
|
/// depth=0 1 node (chunkSize)
|
||||||
|
/// depth=1 2 nodes (chunkSize/2)
|
||||||
|
/// ..
|
||||||
|
/// ..
|
||||||
|
/// depth=d 2^d nodes (chunkSize/2^d)
|
||||||
|
/// ..
|
||||||
|
/// depth=maxOrder 2^maxOrder nodes (chunkSize/2^{maxOrder} = pageSize)
|
||||||
|
/// depth=maxOrder is the last level and the leafs consist of pages
|
||||||
|
/// With this tree available searching in chunkArray translates like this:
|
||||||
|
/// To allocate a memory segment of size chunkSize/2^k we search for the first node (from left) at height k
|
||||||
|
/// which is unused
|
||||||
|
/// Algorithm:
|
||||||
|
/// ----------
|
||||||
|
/// Encode the tree in memoryMap with the notation
|
||||||
|
/// memoryMap[id] = x => in the subtree rooted at id, the first node that is free to be allocated
|
||||||
|
/// is at depth x (counted from depth=0) i.e., at depths [depth_of_id, x), there is no node that is free
|
||||||
|
/// As we allocate & free nodes, we update values stored in memoryMap so that the property is maintained
|
||||||
|
/// Initialization -
|
||||||
|
/// In the beginning we construct the memoryMap array by storing the depth of a node at each node
|
||||||
|
/// i.e., memoryMap[id] = depth_of_id
|
||||||
|
/// Observations:
|
||||||
|
/// -------------
|
||||||
|
/// 1) memoryMap[id] = depth_of_id => it is free / unallocated
|
||||||
|
/// 2) memoryMap[id] > depth_of_id => at least one of its child nodes is allocated, so we cannot allocate it, but
|
||||||
|
/// some of its children can still be allocated based on their availability
|
||||||
|
/// 3) memoryMap[id] = maxOrder + 1 => the node is fully allocated & thus none of its children can be allocated, it
|
||||||
|
/// is thus marked as unusable
|
||||||
|
/// Algorithm: [allocateNode(d) => we want to find the first node (from left) at height h that can be allocated]
|
||||||
|
/// ----------
|
||||||
|
/// 1) start at root (i.e., depth = 0 or id = 1)
|
||||||
|
/// 2) if memoryMap[1] > d => cannot be allocated from this chunk
|
||||||
|
/// 3) if left node value <= h; we can allocate from left subtree so move to left and repeat until found
|
||||||
|
/// 4) else try in right subtree
|
||||||
|
/// Algorithm: [allocateRun(size)]
|
||||||
|
/// ----------
|
||||||
|
/// 1) Compute d = log_2(chunkSize/size)
|
||||||
|
/// 2) Return allocateNode(d)
|
||||||
|
/// Algorithm: [allocateSubpage(size)]
|
||||||
|
/// ----------
|
||||||
|
/// 1) use allocateNode(maxOrder) to find an empty (i.e., unused) leaf (i.e., page)
|
||||||
|
/// 2) use this handle to construct the PoolSubpage object or if it already exists just call init(normCapacity)
|
||||||
|
/// note that this PoolSubpage object is added to subpagesPool in the PoolArena when we init() it
|
||||||
|
/// Note:
|
||||||
|
/// -----
|
||||||
|
/// In the implementation for improving cache coherence,
|
||||||
|
/// we store 2 pieces of information (i.e, 2 byte vals) as a short value in memoryMap
|
||||||
|
/// memoryMap[id]= (depth_of_id, x)
|
||||||
|
/// where as per convention defined above
|
||||||
|
/// the second value (i.e, x) indicates that the first node which is free to be allocated is at depth x (from root)
|
||||||
|
/// </summary>
|
||||||
|
sealed class PoolChunk<T> : IPoolChunkMetric
|
||||||
|
{
|
||||||
|
internal readonly PoolArena<T> Arena;
|
||||||
|
internal readonly T Memory;
|
||||||
|
internal readonly bool Unpooled;
|
||||||
|
|
||||||
|
readonly sbyte[] memoryMap;
|
||||||
|
readonly sbyte[] depthMap;
|
||||||
|
readonly PoolSubpage<T>[] subpages;
|
||||||
|
/** Used to determine if the requested capacity is equal to or greater than pageSize. */
|
||||||
|
readonly int subpageOverflowMask;
|
||||||
|
readonly int pageSize;
|
||||||
|
readonly int pageShifts;
|
||||||
|
readonly int maxOrder;
|
||||||
|
readonly int chunkSize;
|
||||||
|
readonly int log2ChunkSize;
|
||||||
|
readonly int maxSubpageAllocs;
|
||||||
|
/** Used to mark memory as unusable */
|
||||||
|
readonly sbyte unusable;
|
||||||
|
|
||||||
|
int freeBytes;
|
||||||
|
|
||||||
|
internal PoolChunkList<T> Parent;
|
||||||
|
internal PoolChunk<T> Prev;
|
||||||
|
internal PoolChunk<T> Next;
|
||||||
|
|
||||||
|
// TODO: Test if adding padding helps under contention
|
||||||
|
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
|
||||||
|
|
||||||
|
internal PoolChunk(PoolArena<T> arena, T memory, int pageSize, int maxOrder, int pageShifts, int chunkSize)
|
||||||
|
{
|
||||||
|
Contract.Requires(maxOrder < 30, "maxOrder should be < 30, but is: " + maxOrder);
|
||||||
|
|
||||||
|
this.Unpooled = false;
|
||||||
|
this.Arena = arena;
|
||||||
|
this.Memory = memory;
|
||||||
|
this.pageSize = pageSize;
|
||||||
|
this.pageShifts = pageShifts;
|
||||||
|
this.maxOrder = maxOrder;
|
||||||
|
this.chunkSize = chunkSize;
|
||||||
|
this.unusable = (sbyte)(maxOrder + 1);
|
||||||
|
this.log2ChunkSize = IntegerExtensions.Log2(chunkSize);
|
||||||
|
this.subpageOverflowMask = ~(pageSize - 1);
|
||||||
|
this.freeBytes = chunkSize;
|
||||||
|
|
||||||
|
Contract.Assert(maxOrder < 30, "maxOrder should be < 30, but is: " + maxOrder);
|
||||||
|
this.maxSubpageAllocs = 1 << maxOrder;
|
||||||
|
|
||||||
|
// Generate the memory map.
|
||||||
|
this.memoryMap = new sbyte[this.maxSubpageAllocs << 1];
|
||||||
|
this.depthMap = new sbyte[this.memoryMap.Length];
|
||||||
|
int memoryMapIndex = 1;
|
||||||
|
for (int d = 0; d <= maxOrder; ++d)
|
||||||
|
{
|
||||||
|
// move down the tree one level at a time
|
||||||
|
int depth = 1 << d;
|
||||||
|
for (int p = 0; p < depth; ++p)
|
||||||
|
{
|
||||||
|
// in each level traverse left to right and set value to the depth of subtree
|
||||||
|
this.memoryMap[memoryMapIndex] = (sbyte)d;
|
||||||
|
this.depthMap[memoryMapIndex] = (sbyte)d;
|
||||||
|
memoryMapIndex++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.subpages = this.NewSubpageArray(this.maxSubpageAllocs);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Creates a special chunk that is not pooled. */
|
||||||
|
|
||||||
|
internal PoolChunk(PoolArena<T> arena, T memory, int size)
|
||||||
|
{
|
||||||
|
this.Unpooled = true;
|
||||||
|
this.Arena = arena;
|
||||||
|
this.Memory = memory;
|
||||||
|
this.memoryMap = null;
|
||||||
|
this.depthMap = null;
|
||||||
|
this.subpages = null;
|
||||||
|
this.subpageOverflowMask = 0;
|
||||||
|
this.pageSize = 0;
|
||||||
|
this.pageShifts = 0;
|
||||||
|
this.maxOrder = 0;
|
||||||
|
this.unusable = (sbyte)(this.maxOrder + 1);
|
||||||
|
this.chunkSize = size;
|
||||||
|
this.log2ChunkSize = IntegerExtensions.Log2(this.chunkSize);
|
||||||
|
this.maxSubpageAllocs = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
PoolSubpage<T>[] NewSubpageArray(int size) => new PoolSubpage<T>[size];
|
||||||
|
|
||||||
|
public int Usage
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
int freeBytes = this.freeBytes;
|
||||||
|
if (freeBytes == 0)
|
||||||
|
{
|
||||||
|
return 100;
|
||||||
|
}
|
||||||
|
|
||||||
|
int freePercentage = (int)(freeBytes * 100L / this.chunkSize);
|
||||||
|
if (freePercentage == 0)
|
||||||
|
{
|
||||||
|
return 99;
|
||||||
|
}
|
||||||
|
return 100 - freePercentage;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
internal long Allocate(int normCapacity)
|
||||||
|
{
|
||||||
|
if ((normCapacity & this.subpageOverflowMask) != 0)
|
||||||
|
{
|
||||||
|
// >= pageSize
|
||||||
|
return this.AllocateRun(normCapacity);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return this.AllocateSubpage(normCapacity);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update method used by allocate
|
||||||
|
* This is triggered only when a successor is allocated and all its predecessors
|
||||||
|
* need to update their state
|
||||||
|
* The minimal depth at which subtree rooted at id has some free space
|
||||||
|
*
|
||||||
|
* @param id id
|
||||||
|
*/
|
||||||
|
|
||||||
|
void UpdateParentsAlloc(int id)
|
||||||
|
{
|
||||||
|
while (id > 1)
|
||||||
|
{
|
||||||
|
int parentId = id.RightUShift(1);
|
||||||
|
sbyte val1 = this.Value(id);
|
||||||
|
sbyte val2 = this.Value(id ^ 1);
|
||||||
|
sbyte val = val1 < val2 ? val1 : val2;
|
||||||
|
this.SetValue(parentId, val);
|
||||||
|
id = parentId;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Update method used by free
|
||||||
|
* This needs to handle the special case when both children are completely free
|
||||||
|
* in which case parent be directly allocated on request of size = child-size * 2
|
||||||
|
*
|
||||||
|
* @param id id
|
||||||
|
*/
|
||||||
|
|
||||||
|
void UpdateParentsFree(int id)
|
||||||
|
{
|
||||||
|
int logChild = this.Depth(id) + 1;
|
||||||
|
while (id > 1)
|
||||||
|
{
|
||||||
|
int parentId = id.RightUShift(1);
|
||||||
|
sbyte val1 = this.Value(id);
|
||||||
|
sbyte val2 = this.Value(id ^ 1);
|
||||||
|
logChild -= 1; // in first iteration equals log, subsequently reduce 1 from logChild as we traverse up
|
||||||
|
|
||||||
|
if (val1 == logChild && val2 == logChild)
|
||||||
|
{
|
||||||
|
this.SetValue(parentId, (sbyte)(logChild - 1));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
sbyte val = val1 < val2 ? val1 : val2;
|
||||||
|
this.SetValue(parentId, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
id = parentId;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Algorithm to allocate an index in memoryMap when we query for a free node
|
||||||
|
* at depth d
|
||||||
|
*
|
||||||
|
* @param d depth
|
||||||
|
* @return index in memoryMap
|
||||||
|
*/
|
||||||
|
|
||||||
|
int AllocateNode(int d)
|
||||||
|
{
|
||||||
|
int id = 1;
|
||||||
|
int initial = -(1 << d); // has last d bits = 0 and rest all = 1
|
||||||
|
sbyte val = this.Value(id);
|
||||||
|
if (val > d)
|
||||||
|
{
|
||||||
|
// unusable
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
while (val < d || (id & initial) == 0)
|
||||||
|
{
|
||||||
|
// id & initial == 1 << d for all ids at depth d, for < d it is 0
|
||||||
|
id <<= 1;
|
||||||
|
val = this.Value(id);
|
||||||
|
if (val > d)
|
||||||
|
{
|
||||||
|
id ^= 1;
|
||||||
|
val = this.Value(id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sbyte value = this.Value(id);
|
||||||
|
Contract.Assert(value == d && (id & initial) == 1 << d, $"val = {value}, id & initial = {id & initial}, d = {d}");
|
||||||
|
this.SetValue(id, this.unusable); // mark as unusable
|
||||||
|
this.UpdateParentsAlloc(id);
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocate a run of pages (>=1)
|
||||||
|
*
|
||||||
|
* @param normCapacity normalized capacity
|
||||||
|
* @return index in memoryMap
|
||||||
|
*/
|
||||||
|
|
||||||
|
long AllocateRun(int normCapacity)
|
||||||
|
{
|
||||||
|
int d = this.maxOrder - (IntegerExtensions.Log2(normCapacity) - this.pageShifts);
|
||||||
|
int id = this.AllocateNode(d);
|
||||||
|
if (id < 0)
|
||||||
|
{
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
this.freeBytes -= this.RunLength(id);
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create/ initialize a new PoolSubpage of normCapacity
|
||||||
|
* Any PoolSubpage created/ initialized here is added to subpage pool in the PoolArena that owns this PoolChunk
|
||||||
|
*
|
||||||
|
* @param normCapacity normalized capacity
|
||||||
|
* @return index in memoryMap
|
||||||
|
*/
|
||||||
|
|
||||||
|
long AllocateSubpage(int normCapacity)
|
||||||
|
{
|
||||||
|
int d = this.maxOrder; // subpages are only be allocated from pages i.e., leaves
|
||||||
|
int id = this.AllocateNode(d);
|
||||||
|
if (id < 0)
|
||||||
|
{
|
||||||
|
return id;
|
||||||
|
}
|
||||||
|
|
||||||
|
PoolSubpage<T>[] subpages = this.subpages;
|
||||||
|
int pageSize = this.pageSize;
|
||||||
|
|
||||||
|
this.freeBytes -= pageSize;
|
||||||
|
|
||||||
|
int subpageIdx = this.SubpageIdx(id);
|
||||||
|
PoolSubpage<T> subpage = subpages[subpageIdx];
|
||||||
|
if (subpage == null)
|
||||||
|
{
|
||||||
|
subpage = new PoolSubpage<T>(this, id, this.RunOffset(id), pageSize, normCapacity);
|
||||||
|
subpages[subpageIdx] = subpage;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
subpage.Init(normCapacity);
|
||||||
|
}
|
||||||
|
return subpage.Allocate();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Free a subpage or a run of pages
|
||||||
|
* When a subpage is freed from PoolSubpage, it might be added back to subpage pool of the owning PoolArena
|
||||||
|
* If the subpage pool in PoolArena has at least one other PoolSubpage of given elemSize, we can
|
||||||
|
* completely free the owning Page so it is available for subsequent allocations
|
||||||
|
*
|
||||||
|
* @param handle handle to free
|
||||||
|
*/
|
||||||
|
|
||||||
|
internal void Free(long handle)
|
||||||
|
{
|
||||||
|
int memoryMapIdx = MemoryMapIdx(handle);
|
||||||
|
int bitmapIdx = BitmapIdx(handle);
|
||||||
|
|
||||||
|
if (bitmapIdx != 0)
|
||||||
|
{
|
||||||
|
// free a subpage
|
||||||
|
PoolSubpage<T> subpage = this.subpages[this.SubpageIdx(memoryMapIdx)];
|
||||||
|
Contract.Assert(subpage != null && subpage.DoNotDestroy);
|
||||||
|
|
||||||
|
// Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it.
|
||||||
|
// This is need as we may add it back and so alter the linked-list structure.
|
||||||
|
PoolSubpage<T> head = this.Arena.FindSubpagePoolHead(subpage.ElemSize);
|
||||||
|
lock (head)
|
||||||
|
{
|
||||||
|
if (subpage.Free(bitmapIdx & 0x3FFFFFFF))
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
this.freeBytes += this.RunLength(memoryMapIdx);
|
||||||
|
this.SetValue(memoryMapIdx, this.Depth(memoryMapIdx));
|
||||||
|
this.UpdateParentsFree(memoryMapIdx);
|
||||||
|
}
|
||||||
|
|
||||||
|
internal void InitBuf(PooledByteBuffer<T> buf, long handle, int reqCapacity)
|
||||||
|
{
|
||||||
|
int memoryMapIdx = MemoryMapIdx(handle);
|
||||||
|
int bitmapIdx = BitmapIdx(handle);
|
||||||
|
if (bitmapIdx == 0)
|
||||||
|
{
|
||||||
|
sbyte val = this.Value(memoryMapIdx);
|
||||||
|
Contract.Assert(val == this.unusable, val.ToString());
|
||||||
|
buf.Init(this, handle, this.RunOffset(memoryMapIdx), reqCapacity, this.RunLength(memoryMapIdx),
|
||||||
|
this.Arena.Parent.ThreadCache<T>());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this.InitBufWithSubpage(buf, handle, bitmapIdx, reqCapacity);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
internal void InitBufWithSubpage(PooledByteBuffer<T> buf, long handle, int reqCapacity) => this.InitBufWithSubpage(buf, handle, BitmapIdx(handle), reqCapacity);
|
||||||
|
|
||||||
|
void InitBufWithSubpage(PooledByteBuffer<T> buf, long handle, int bitmapIdx, int reqCapacity)
|
||||||
|
{
|
||||||
|
Contract.Assert(bitmapIdx != 0);
|
||||||
|
|
||||||
|
int memoryMapIdx = MemoryMapIdx(handle);
|
||||||
|
|
||||||
|
PoolSubpage<T> subpage = this.subpages[this.SubpageIdx(memoryMapIdx)];
|
||||||
|
Contract.Assert(subpage.DoNotDestroy);
|
||||||
|
Contract.Assert(reqCapacity <= subpage.ElemSize);
|
||||||
|
|
||||||
|
buf.Init(
|
||||||
|
this, handle,
|
||||||
|
this.RunOffset(memoryMapIdx) + (bitmapIdx & 0x3FFFFFFF) * subpage.ElemSize, reqCapacity, subpage.ElemSize,
|
||||||
|
this.Arena.Parent.ThreadCache<T>());
|
||||||
|
}
|
||||||
|
|
||||||
|
sbyte Value(int id) => this.memoryMap[id];
|
||||||
|
|
||||||
|
void SetValue(int id, sbyte val) => this.memoryMap[id] = val;
|
||||||
|
|
||||||
|
sbyte Depth(int id) => this.depthMap[id];
|
||||||
|
|
||||||
|
/// represents the size in #bytes supported by node 'id' in the tree
|
||||||
|
int RunLength(int id) => 1 << this.log2ChunkSize - this.Depth(id);
|
||||||
|
|
||||||
|
int RunOffset(int id)
|
||||||
|
{
|
||||||
|
// represents the 0-based offset in #bytes from start of the byte-array chunk
|
||||||
|
int shift = id ^ 1 << this.Depth(id);
|
||||||
|
return shift * this.RunLength(id);
|
||||||
|
}
|
||||||
|
|
||||||
|
int SubpageIdx(int memoryMapIdx) => memoryMapIdx ^ this.maxSubpageAllocs; // remove highest set bit, to get offset
|
||||||
|
|
||||||
|
static int MemoryMapIdx(long handle) => (int)handle;
|
||||||
|
|
||||||
|
static int BitmapIdx(long handle) => (int)handle.RightUShift(IntegerExtensions.SizeInBits);
|
||||||
|
|
||||||
|
public int ChunkSize => this.chunkSize;
|
||||||
|
|
||||||
|
public int FreeBytes => this.freeBytes;
|
||||||
|
|
||||||
|
public override string ToString()
|
||||||
|
{
|
||||||
|
return new StringBuilder()
|
||||||
|
.Append("Chunk(")
|
||||||
|
.Append(RuntimeHelpers.GetHashCode(this).ToString("X"))
|
||||||
|
.Append(": ")
|
||||||
|
.Append(this.Usage)
|
||||||
|
.Append("%, ")
|
||||||
|
.Append(this.chunkSize - this.freeBytes)
|
||||||
|
.Append('/')
|
||||||
|
.Append(this.chunkSize)
|
||||||
|
.Append(')')
|
||||||
|
.ToString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,230 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers
|
||||||
|
{
|
||||||
|
using System;
|
||||||
|
using System.Collections;
|
||||||
|
using System.Collections.Generic;
|
||||||
|
using System.Diagnostics.Contracts;
|
||||||
|
using System.Linq;
|
||||||
|
using System.Text;
|
||||||
|
|
||||||
|
sealed class PoolChunkList<T> : IPoolChunkListMetric
|
||||||
|
{
|
||||||
|
static readonly IEnumerable<IPoolChunkMetric> EMPTY_METRICS = Enumerable.Empty<IPoolChunkMetric>();
|
||||||
|
readonly PoolChunkList<T> nextList;
|
||||||
|
readonly int minUsage;
|
||||||
|
readonly int maxUsage;
|
||||||
|
readonly int maxCapacity;
|
||||||
|
|
||||||
|
PoolChunk<T> head;
|
||||||
|
|
||||||
|
// This is only update once when create the linked like list of PoolChunkList in PoolArena constructor.
|
||||||
|
PoolChunkList<T> prevList;
|
||||||
|
|
||||||
|
// TODO: Test if adding padding helps under contention
|
||||||
|
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
|
||||||
|
|
||||||
|
public PoolChunkList(PoolChunkList<T> nextList, int minUsage, int maxUsage, int chunkSize)
|
||||||
|
{
|
||||||
|
Contract.Assert(minUsage <= maxUsage);
|
||||||
|
|
||||||
|
this.nextList = nextList;
|
||||||
|
this.minUsage = minUsage;
|
||||||
|
this.maxUsage = maxUsage;
|
||||||
|
this.maxCapacity = CalculateMaxCapacity(minUsage, chunkSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculates the maximum capacity of a buffer that will ever be possible to allocate out of the {@link PoolChunk}s
|
||||||
|
/// that belong to the {@link PoolChunkList} with the given {@code minUsage} and {@code maxUsage} settings.
|
||||||
|
static int CalculateMaxCapacity(int minUsage, int chunkSize)
|
||||||
|
{
|
||||||
|
minUsage = MinUsage0(minUsage);
|
||||||
|
|
||||||
|
if (minUsage == 100)
|
||||||
|
{
|
||||||
|
// If the minUsage is 100 we can not allocate anything out of this list.
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the maximum amount of bytes that can be allocated from a PoolChunk in this PoolChunkList.
|
||||||
|
//
|
||||||
|
// As an example:
|
||||||
|
// - If a PoolChunkList has minUsage == 25 we are allowed to allocate at most 75% of the chunkSize because
|
||||||
|
// this is the maximum amount available in any PoolChunk in this PoolChunkList.
|
||||||
|
return (int)(chunkSize * (100L - minUsage) / 100L);
|
||||||
|
}
|
||||||
|
|
||||||
|
internal void PrevList(PoolChunkList<T> prevList)
|
||||||
|
{
|
||||||
|
Contract.Requires(this.prevList == null);
|
||||||
|
this.prevList = prevList;
|
||||||
|
}
|
||||||
|
|
||||||
|
internal bool Allocate(PooledByteBuffer<T> buf, int reqCapacity, int normCapacity)
|
||||||
|
{
|
||||||
|
if (this.head == null || normCapacity > this.maxCapacity)
|
||||||
|
{
|
||||||
|
// Either this PoolChunkList is empty or the requested capacity is larger then the capacity which can
|
||||||
|
// be handled by the PoolChunks that are contained in this PoolChunkList.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (PoolChunk<T> cur = this.head;;)
|
||||||
|
{
|
||||||
|
long handle = cur.Allocate(normCapacity);
|
||||||
|
if (handle < 0)
|
||||||
|
{
|
||||||
|
cur = cur.Next;
|
||||||
|
if (cur == null)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
cur.InitBuf(buf, handle, reqCapacity);
|
||||||
|
if (cur.Usage >= this.maxUsage)
|
||||||
|
{
|
||||||
|
this.Remove(cur);
|
||||||
|
this.nextList.Add(cur);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
internal bool Free(PoolChunk<T> chunk, long handle)
|
||||||
|
{
|
||||||
|
chunk.Free(handle);
|
||||||
|
if (chunk.Usage < this.minUsage)
|
||||||
|
{
|
||||||
|
this.Remove(chunk);
|
||||||
|
// Move the PoolChunk down the PoolChunkList linked-list.
|
||||||
|
return this.Move0(chunk);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Move(PoolChunk<T> chunk)
|
||||||
|
{
|
||||||
|
Contract.Assert(chunk.Usage < this.maxUsage);
|
||||||
|
|
||||||
|
if (chunk.Usage < this.minUsage)
|
||||||
|
{
|
||||||
|
// Move the PoolChunk down the PoolChunkList linked-list.
|
||||||
|
return this.Move0(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
// PoolChunk fits into this PoolChunkList, adding it here.
|
||||||
|
this.Add0(chunk);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Moves the {@link PoolChunk} down the {@link PoolChunkList} linked-list so it will end up in the right
|
||||||
|
/// {@link PoolChunkList} that has the correct minUsage / maxUsage in respect to {@link PoolChunk#usage()}.
|
||||||
|
bool Move0(PoolChunk<T> chunk)
|
||||||
|
{
|
||||||
|
if (this.prevList == null)
|
||||||
|
{
|
||||||
|
// There is no previous PoolChunkList so return false which result in having the PoolChunk destroyed and
|
||||||
|
// all memory associated with the PoolChunk will be released.
|
||||||
|
Contract.Assert(chunk.Usage == 0);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return this.prevList.Move(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
internal void Add(PoolChunk<T> chunk)
|
||||||
|
{
|
||||||
|
if (chunk.Usage >= this.maxUsage)
|
||||||
|
{
|
||||||
|
this.nextList.Add(chunk);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.Add0(chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds the {@link PoolChunk} to this {@link PoolChunkList}.
|
||||||
|
void Add0(PoolChunk<T> chunk)
|
||||||
|
{
|
||||||
|
chunk.Parent = this;
|
||||||
|
if (this.head == null)
|
||||||
|
{
|
||||||
|
this.head = chunk;
|
||||||
|
chunk.Prev = null;
|
||||||
|
chunk.Next = null;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
chunk.Prev = null;
|
||||||
|
chunk.Next = this.head;
|
||||||
|
this.head.Prev = chunk;
|
||||||
|
this.head = chunk;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Remove(PoolChunk<T> cur)
|
||||||
|
{
|
||||||
|
if (cur == this.head)
|
||||||
|
{
|
||||||
|
this.head = cur.Next;
|
||||||
|
if (this.head != null)
|
||||||
|
{
|
||||||
|
this.head.Prev = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
PoolChunk<T> next = cur.Next;
|
||||||
|
cur.Prev.Next = next;
|
||||||
|
if (next != null)
|
||||||
|
{
|
||||||
|
next.Prev = cur.Prev;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public int MinUsage => MinUsage0(this.minUsage);
|
||||||
|
|
||||||
|
public int MaxUsage => Math.Min(this.maxUsage, 100);
|
||||||
|
|
||||||
|
static int MinUsage0(int value) => Math.Max(1, value);
|
||||||
|
|
||||||
|
public IEnumerator<IPoolChunkMetric> GetEnumerator() => this.head == null ? EMPTY_METRICS.GetEnumerator() : this.GetEnumeratorInternal();
|
||||||
|
|
||||||
|
IEnumerator IEnumerable.GetEnumerator() => this.GetEnumerator();
|
||||||
|
|
||||||
|
IEnumerator<IPoolChunkMetric> GetEnumeratorInternal()
|
||||||
|
{
|
||||||
|
for (PoolChunk<T> cur = this.head; cur != null;)
|
||||||
|
{
|
||||||
|
yield return cur;
|
||||||
|
cur = cur.Next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public override string ToString()
|
||||||
|
{
|
||||||
|
if (this.head == null)
|
||||||
|
{
|
||||||
|
return "none";
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf = new StringBuilder();
|
||||||
|
for (PoolChunk<T> cur = this.head;;)
|
||||||
|
{
|
||||||
|
buf.Append(cur);
|
||||||
|
cur = cur.Next;
|
||||||
|
if (cur == null)
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
buf.Append(Environment.NewLine); // todo: StringUtil.NEWLINE
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.ToString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,267 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers
|
||||||
|
{
|
||||||
|
using System.Diagnostics.Contracts;
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
|
sealed class PoolSubpage<T> : IPoolSubpageMetric
|
||||||
|
{
|
||||||
|
internal readonly PoolChunk<T> Chunk;
|
||||||
|
readonly int memoryMapIdx;
|
||||||
|
readonly int runOffset;
|
||||||
|
readonly int pageSize;
|
||||||
|
readonly long[] bitmap;
|
||||||
|
|
||||||
|
internal PoolSubpage<T> Prev;
|
||||||
|
internal PoolSubpage<T> Next;
|
||||||
|
|
||||||
|
internal bool DoNotDestroy;
|
||||||
|
internal int ElemSize;
|
||||||
|
int maxNumElems;
|
||||||
|
int bitmapLength;
|
||||||
|
int nextAvail;
|
||||||
|
int numAvail;
|
||||||
|
|
||||||
|
// TODO: Test if adding padding helps under contention
|
||||||
|
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
|
||||||
|
|
||||||
|
/** Special constructor that creates a linked list head */
|
||||||
|
|
||||||
|
public PoolSubpage(int pageSize)
|
||||||
|
{
|
||||||
|
this.Chunk = null;
|
||||||
|
this.memoryMapIdx = -1;
|
||||||
|
this.runOffset = -1;
|
||||||
|
this.ElemSize = -1;
|
||||||
|
this.pageSize = pageSize;
|
||||||
|
this.bitmap = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public PoolSubpage(PoolChunk<T> chunk, int memoryMapIdx, int runOffset, int pageSize, int elemSize)
|
||||||
|
{
|
||||||
|
this.Chunk = chunk;
|
||||||
|
this.memoryMapIdx = memoryMapIdx;
|
||||||
|
this.runOffset = runOffset;
|
||||||
|
this.pageSize = pageSize;
|
||||||
|
this.bitmap = new long[pageSize.RightUShift(10)]; // pageSize / 16 / 64
|
||||||
|
this.Init(elemSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void Init(int elemSize)
|
||||||
|
{
|
||||||
|
this.DoNotDestroy = true;
|
||||||
|
this.ElemSize = elemSize;
|
||||||
|
if (elemSize != 0)
|
||||||
|
{
|
||||||
|
this.maxNumElems = this.numAvail = this.pageSize / elemSize;
|
||||||
|
this.nextAvail = 0;
|
||||||
|
this.bitmapLength = this.maxNumElems.RightUShift(6);
|
||||||
|
if ((this.maxNumElems & 63) != 0)
|
||||||
|
{
|
||||||
|
this.bitmapLength++;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < this.bitmapLength; i++)
|
||||||
|
{
|
||||||
|
this.bitmap[i] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
PoolSubpage<T> head = this.Chunk.Arena.FindSubpagePoolHead(elemSize);
|
||||||
|
lock (head)
|
||||||
|
{
|
||||||
|
this.AddToPool(head);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the bitmap index of the subpage allocation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
internal long Allocate()
|
||||||
|
{
|
||||||
|
if (this.ElemSize == 0)
|
||||||
|
{
|
||||||
|
return this.ToHandle(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Synchronize on the head of the SubpagePool stored in the {@link PoolArena. This is needed as we synchronize
|
||||||
|
* on it when calling {@link PoolArena#allocate(PoolThreadCache, int, int)} und try to allocate out of the
|
||||||
|
* {@link PoolSubpage} pool for a given size.
|
||||||
|
*/
|
||||||
|
PoolSubpage<T> head = this.Chunk.Arena.FindSubpagePoolHead(this.ElemSize);
|
||||||
|
lock (head)
|
||||||
|
{
|
||||||
|
if (this.numAvail == 0 || !this.DoNotDestroy)
|
||||||
|
{
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int bitmapIdx = this.GetNextAvail();
|
||||||
|
int q = bitmapIdx.RightUShift(6);
|
||||||
|
int r = bitmapIdx & 63;
|
||||||
|
Contract.Assert((this.bitmap[q].RightUShift(r) & 1) == 0);
|
||||||
|
this.bitmap[q] |= 1L << r;
|
||||||
|
|
||||||
|
if (--this.numAvail == 0)
|
||||||
|
{
|
||||||
|
this.RemoveFromPool();
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.ToHandle(bitmapIdx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return {@code true} if this subpage is in use.
|
||||||
|
* {@code false} if this subpage is not used by its chunk and thus it's OK to be released.
|
||||||
|
*/
|
||||||
|
|
||||||
|
internal bool Free(int bitmapIdx)
|
||||||
|
{
|
||||||
|
if (this.ElemSize == 0)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Synchronize on the head of the SubpagePool stored in the {@link PoolArena. This is needed as we synchronize
|
||||||
|
* on it when calling {@link PoolArena#allocate(PoolThreadCache, int, int)} und try to allocate out of the
|
||||||
|
* {@link PoolSubpage} pool for a given size.
|
||||||
|
*/
|
||||||
|
PoolSubpage<T> head = this.Chunk.Arena.FindSubpagePoolHead(this.ElemSize);
|
||||||
|
|
||||||
|
lock (head)
|
||||||
|
{
|
||||||
|
int q = bitmapIdx.RightUShift(6);
|
||||||
|
int r = bitmapIdx & 63;
|
||||||
|
Contract.Assert((this.bitmap[q].RightUShift(r) & 1) != 0)
|
||||||
|
;
|
||||||
|
this.bitmap[q] ^= 1L << r;
|
||||||
|
|
||||||
|
this.SetNextAvail(bitmapIdx);
|
||||||
|
|
||||||
|
if (this.numAvail++ == 0)
|
||||||
|
{
|
||||||
|
this.AddToPool(head);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (this.numAvail != this.maxNumElems)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Subpage not in use (numAvail == maxNumElems)
|
||||||
|
if (this.Prev == this.Next)
|
||||||
|
{
|
||||||
|
// Do not remove if this subpage is the only one left in the pool.
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove this subpage from the pool if there are other subpages left in the pool.
|
||||||
|
this.DoNotDestroy = false;
|
||||||
|
this.RemoveFromPool();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void AddToPool(PoolSubpage<T> head)
|
||||||
|
{
|
||||||
|
Contract.Assert(this.Prev == null && this.Next == null);
|
||||||
|
|
||||||
|
this.Prev = head;
|
||||||
|
this.Next = head.Next;
|
||||||
|
this.Next.Prev = this;
|
||||||
|
head.Next = this;
|
||||||
|
}
|
||||||
|
|
||||||
|
void RemoveFromPool()
|
||||||
|
{
|
||||||
|
Contract.Assert(this.Prev != null && this.Next != null);
|
||||||
|
|
||||||
|
this.Prev.Next = this.Next;
|
||||||
|
this.Next.Prev = this.Prev;
|
||||||
|
this.Next = null;
|
||||||
|
this.Prev = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
void SetNextAvail(int bitmapIdx) => this.nextAvail = bitmapIdx;
|
||||||
|
|
||||||
|
int GetNextAvail()
|
||||||
|
{
|
||||||
|
int nextAvail = this.nextAvail;
|
||||||
|
if (nextAvail >= 0)
|
||||||
|
{
|
||||||
|
this.nextAvail = -1;
|
||||||
|
return nextAvail;
|
||||||
|
}
|
||||||
|
return this.FindNextAvail();
|
||||||
|
}
|
||||||
|
|
||||||
|
int FindNextAvail()
|
||||||
|
{
|
||||||
|
long[] bitmap = this.bitmap;
|
||||||
|
int bitmapLength = this.bitmapLength;
|
||||||
|
for (int i = 0; i < bitmapLength; i++)
|
||||||
|
{
|
||||||
|
long bits = bitmap[i];
|
||||||
|
if (~bits != 0)
|
||||||
|
{
|
||||||
|
return this.FindNextAvail0(i, bits);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int FindNextAvail0(int i, long bits)
|
||||||
|
{
|
||||||
|
int maxNumElems = this.maxNumElems;
|
||||||
|
int baseVal = i << 6;
|
||||||
|
|
||||||
|
for (int j = 0; j < 64; j++)
|
||||||
|
{
|
||||||
|
if ((bits & 1) == 0)
|
||||||
|
{
|
||||||
|
int val = baseVal | j;
|
||||||
|
if (val < maxNumElems)
|
||||||
|
{
|
||||||
|
return val;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bits = bits.RightUShift(1);
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
long ToHandle(int bitmapIdx) => 0x4000000000000000L | (long)bitmapIdx << 32 | this.memoryMapIdx;
|
||||||
|
|
||||||
|
public override string ToString()
|
||||||
|
{
|
||||||
|
if (!this.DoNotDestroy)
|
||||||
|
{
|
||||||
|
return "(" + this.memoryMapIdx + ": not in use)";
|
||||||
|
}
|
||||||
|
|
||||||
|
return '(' + this.memoryMapIdx + ": " + (this.maxNumElems - this.numAvail) + '/' + this.maxNumElems +
|
||||||
|
", offset: " + this.runOffset + ", length: " + this.pageSize + ", elemSize: " + this.ElemSize + ')';
|
||||||
|
}
|
||||||
|
|
||||||
|
public int MaxNumElements => this.maxNumElems;
|
||||||
|
|
||||||
|
public int NumAvailable => this.numAvail;
|
||||||
|
|
||||||
|
public int ElementSize => this.ElemSize;
|
||||||
|
|
||||||
|
public int PageSize => this.pageSize;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,469 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers
|
||||||
|
{
|
||||||
|
using System;
|
||||||
|
using System.Diagnostics.Contracts;
|
||||||
|
using System.Threading;
|
||||||
|
using DotNetty.Common;
|
||||||
|
using DotNetty.Common.Internal;
|
||||||
|
using DotNetty.Common.Internal.Logging;
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Acts a Thread cache for allocations. This implementation is moduled after
|
||||||
|
/// <a href="http://people.freebsd.org/~jasone/jemalloc/bsdcan2006/jemalloc.pdf">jemalloc</a> and the descripted
|
||||||
|
/// technics of
|
||||||
|
/// <a
|
||||||
|
/// href="https://www.facebook.com/notes/facebook-engineering/scalable-memory-allocation-using-jemalloc/
|
||||||
|
/// 480222803919">
|
||||||
|
/// Scalable
|
||||||
|
/// memory allocation using jemalloc
|
||||||
|
/// </a>
|
||||||
|
/// .
|
||||||
|
/// </summary>
|
||||||
|
sealed class PoolThreadCache<T>
|
||||||
|
{
|
||||||
|
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance<PoolThreadCache<T>>();
|
||||||
|
|
||||||
|
internal readonly PoolArena<T> HeapArena;
|
||||||
|
|
||||||
|
// Hold the caches for the different size classes, which are tiny, small and normal.
|
||||||
|
readonly MemoryRegionCache[] tinySubPageHeapCaches;
|
||||||
|
readonly MemoryRegionCache[] smallSubPageHeapCaches;
|
||||||
|
readonly MemoryRegionCache[] normalHeapCaches;
|
||||||
|
|
||||||
|
// Used for bitshifting when calculate the index of normal caches later
|
||||||
|
readonly int numShiftsNormalHeap;
|
||||||
|
readonly int freeSweepAllocationThreshold;
|
||||||
|
|
||||||
|
int allocations;
|
||||||
|
|
||||||
|
readonly Thread thread = Thread.CurrentThread;
|
||||||
|
readonly Action freeTask;
|
||||||
|
|
||||||
|
// TODO: Test if adding padding helps under contention
|
||||||
|
//private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7;
|
||||||
|
|
||||||
|
internal PoolThreadCache(PoolArena<T> heapArena,
|
||||||
|
int tinyCacheSize, int smallCacheSize, int normalCacheSize,
|
||||||
|
int maxCachedBufferCapacity, int freeSweepAllocationThreshold)
|
||||||
|
{
|
||||||
|
Contract.Requires(maxCachedBufferCapacity >= 0);
|
||||||
|
Contract.Requires(freeSweepAllocationThreshold > 0);
|
||||||
|
|
||||||
|
this.freeTask = this.Free0;
|
||||||
|
|
||||||
|
this.freeSweepAllocationThreshold = freeSweepAllocationThreshold;
|
||||||
|
this.HeapArena = heapArena;
|
||||||
|
if (heapArena != null)
|
||||||
|
{
|
||||||
|
// Create the caches for the heap allocations
|
||||||
|
this.tinySubPageHeapCaches = CreateSubPageCaches(
|
||||||
|
tinyCacheSize, PoolArena<T>.NumTinySubpagePools, SizeClass.Tiny);
|
||||||
|
this.smallSubPageHeapCaches = CreateSubPageCaches(
|
||||||
|
smallCacheSize, heapArena.NumSmallSubpagePools, SizeClass.Small);
|
||||||
|
|
||||||
|
this.numShiftsNormalHeap = Log2(heapArena.PageSize);
|
||||||
|
this.normalHeapCaches = CreateNormalCaches(
|
||||||
|
normalCacheSize, maxCachedBufferCapacity, heapArena);
|
||||||
|
|
||||||
|
heapArena.RegisterThreadCache();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// No heapArea is configured so just null out all caches
|
||||||
|
this.tinySubPageHeapCaches = null;
|
||||||
|
this.smallSubPageHeapCaches = null;
|
||||||
|
this.normalHeapCaches = null;
|
||||||
|
this.numShiftsNormalHeap = -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The thread-local cache will keep a list of pooled buffers which must be returned to
|
||||||
|
// the pool when the thread is not alive anymore.
|
||||||
|
ThreadDeathWatcher.Watch(this.thread, this.freeTask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static MemoryRegionCache[] CreateSubPageCaches(
|
||||||
|
int cacheSize, int numCaches, SizeClass sizeClass)
|
||||||
|
{
|
||||||
|
if (cacheSize > 0)
|
||||||
|
{
|
||||||
|
var cache = new MemoryRegionCache[numCaches];
|
||||||
|
for (int i = 0; i < cache.Length; i++)
|
||||||
|
{
|
||||||
|
// TODO: maybe use cacheSize / cache.length
|
||||||
|
cache[i] = new SubPageMemoryRegionCache(cacheSize, sizeClass);
|
||||||
|
}
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static MemoryRegionCache[] CreateNormalCaches(
|
||||||
|
int cacheSize, int maxCachedBufferCapacity, PoolArena<T> area)
|
||||||
|
{
|
||||||
|
if (cacheSize > 0)
|
||||||
|
{
|
||||||
|
int max = Math.Min(area.ChunkSize, maxCachedBufferCapacity);
|
||||||
|
int arraySize = Math.Max(1, Log2(max / area.PageSize) + 1);
|
||||||
|
|
||||||
|
var cache = new MemoryRegionCache[arraySize];
|
||||||
|
for (int i = 0; i < cache.Length; i++)
|
||||||
|
{
|
||||||
|
cache[i] = new NormalMemoryRegionCache(cacheSize);
|
||||||
|
}
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int Log2(int val)
|
||||||
|
{
|
||||||
|
// todo: revisit this vs IntegerExtensions.(Ceil/Floor)Log2
|
||||||
|
int res = 0;
|
||||||
|
while (val > 1)
|
||||||
|
{
|
||||||
|
val >>= 1;
|
||||||
|
res++;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Try to allocate a tiny buffer out of the cache. Returns {@code true} if successful {@code false} otherwise
|
||||||
|
*/
|
||||||
|
|
||||||
|
internal bool AllocateTiny(PoolArena<T> area, PooledByteBuffer<T> buf, int reqCapacity, int normCapacity) => this.Allocate(this.CacheForTiny(area, normCapacity), buf, reqCapacity);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Try to allocate a small buffer out of the cache. Returns {@code true} if successful {@code false} otherwise
|
||||||
|
*/
|
||||||
|
|
||||||
|
internal bool AllocateSmall(PoolArena<T> area, PooledByteBuffer<T> buf, int reqCapacity, int normCapacity) => this.Allocate(this.CacheForSmall(area, normCapacity), buf, reqCapacity);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Try to allocate a small buffer out of the cache. Returns {@code true} if successful {@code false} otherwise
|
||||||
|
*/
|
||||||
|
|
||||||
|
internal bool AllocateNormal(PoolArena<T> area, PooledByteBuffer<T> buf, int reqCapacity, int normCapacity) => this.Allocate(this.CacheForNormal(area, normCapacity), buf, reqCapacity);
|
||||||
|
|
||||||
|
bool Allocate(MemoryRegionCache cache, PooledByteBuffer<T> buf, int reqCapacity)
|
||||||
|
{
|
||||||
|
if (cache == null)
|
||||||
|
{
|
||||||
|
// no cache found so just return false here
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
bool allocated = cache.Allocate(buf, reqCapacity);
|
||||||
|
if (++this.allocations >= this.freeSweepAllocationThreshold)
|
||||||
|
{
|
||||||
|
this.allocations = 0;
|
||||||
|
this.Trim();
|
||||||
|
}
|
||||||
|
return allocated;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add {@link PoolChunk} and {@code handle} to the cache if there is enough room.
|
||||||
|
* Returns {@code true} if it fit into the cache {@code false} otherwise.
|
||||||
|
*/
|
||||||
|
|
||||||
|
internal bool Add(PoolArena<T> area, PoolChunk<T> chunk, long handle, int normCapacity, SizeClass sizeClass)
|
||||||
|
{
|
||||||
|
MemoryRegionCache c = this.Cache(area, normCapacity, sizeClass);
|
||||||
|
if (c == null)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return c.Add(chunk, handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryRegionCache Cache(PoolArena<T> area, int normCapacity, SizeClass sizeClass)
|
||||||
|
{
|
||||||
|
switch (sizeClass)
|
||||||
|
{
|
||||||
|
case SizeClass.Normal:
|
||||||
|
return this.CacheForNormal(area, normCapacity);
|
||||||
|
case SizeClass.Small:
|
||||||
|
return this.CacheForSmall(area, normCapacity);
|
||||||
|
case SizeClass.Tiny:
|
||||||
|
return this.CacheForTiny(area, normCapacity);
|
||||||
|
default:
|
||||||
|
throw new ArgumentOutOfRangeException();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Should be called if the Thread that uses this cache is about to exist to release resources out of the cache
|
||||||
|
*/
|
||||||
|
|
||||||
|
internal void Free()
|
||||||
|
{
|
||||||
|
ThreadDeathWatcher.Unwatch(this.thread, this.freeTask);
|
||||||
|
this.Free0();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Free0()
|
||||||
|
{
|
||||||
|
int numFreed = Free(this.tinySubPageHeapCaches) +
|
||||||
|
Free(this.smallSubPageHeapCaches) +
|
||||||
|
Free(this.normalHeapCaches);
|
||||||
|
|
||||||
|
if (numFreed > 0 && Logger.DebugEnabled)
|
||||||
|
{
|
||||||
|
Logger.Debug("Freed {} thread-local buffer(s) from thread: {}", numFreed, this.thread.Name);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.HeapArena?.DeregisterThreadCache();
|
||||||
|
}
|
||||||
|
|
||||||
|
static int Free(MemoryRegionCache[] caches)
|
||||||
|
{
|
||||||
|
if (caches == null)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int numFreed = 0;
|
||||||
|
foreach (MemoryRegionCache c in caches)
|
||||||
|
{
|
||||||
|
numFreed += Free(c);
|
||||||
|
}
|
||||||
|
return numFreed;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int Free(MemoryRegionCache cache)
|
||||||
|
{
|
||||||
|
if (cache == null)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return cache.Free();
|
||||||
|
}
|
||||||
|
|
||||||
|
internal void Trim()
|
||||||
|
{
|
||||||
|
Trim(this.tinySubPageHeapCaches);
|
||||||
|
Trim(this.smallSubPageHeapCaches);
|
||||||
|
Trim(this.normalHeapCaches);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void Trim(MemoryRegionCache[] caches)
|
||||||
|
{
|
||||||
|
if (caches == null)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
foreach (MemoryRegionCache c in caches)
|
||||||
|
{
|
||||||
|
Trim(c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void Trim(MemoryRegionCache cache) => cache?.Trim();
|
||||||
|
|
||||||
|
MemoryRegionCache CacheForTiny(PoolArena<T> area, int normCapacity)
|
||||||
|
{
|
||||||
|
int idx = PoolArena<T>.TinyIdx(normCapacity);
|
||||||
|
return Cache(this.tinySubPageHeapCaches, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryRegionCache CacheForSmall(PoolArena<T> area, int normCapacity)
|
||||||
|
{
|
||||||
|
int idx = PoolArena<T>.SmallIdx(normCapacity);
|
||||||
|
return Cache(this.smallSubPageHeapCaches, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
MemoryRegionCache CacheForNormal(PoolArena<T> area, int normCapacity)
|
||||||
|
{
|
||||||
|
int idx1 = Log2(normCapacity >> this.numShiftsNormalHeap);
|
||||||
|
return Cache(this.normalHeapCaches, idx1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static MemoryRegionCache Cache(MemoryRegionCache[] cache, int idx)
|
||||||
|
{
|
||||||
|
if (cache == null || idx > cache.Length - 1)
|
||||||
|
{
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return cache[idx];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cache used for buffers which are backed by TINY or SMALL size.
|
||||||
|
*/
|
||||||
|
|
||||||
|
sealed class SubPageMemoryRegionCache : MemoryRegionCache
|
||||||
|
{
|
||||||
|
internal SubPageMemoryRegionCache(int size, SizeClass sizeClass)
|
||||||
|
: base(size, sizeClass)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override void InitBuf(
|
||||||
|
PoolChunk<T> chunk, long handle, PooledByteBuffer<T> buf, int reqCapacity) => chunk.InitBufWithSubpage(buf, handle, reqCapacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cache used for buffers which are backed by NORMAL size.
|
||||||
|
*/
|
||||||
|
|
||||||
|
sealed class NormalMemoryRegionCache : MemoryRegionCache
|
||||||
|
{
|
||||||
|
internal NormalMemoryRegionCache(int size)
|
||||||
|
: base(size, SizeClass.Normal)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override void InitBuf(
|
||||||
|
PoolChunk<T> chunk, long handle, PooledByteBuffer<T> buf, int reqCapacity) => chunk.InitBuf(buf, handle, reqCapacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class MemoryRegionCache
|
||||||
|
{
|
||||||
|
readonly int size;
|
||||||
|
readonly IQueue<Entry> queue;
|
||||||
|
readonly SizeClass sizeClass;
|
||||||
|
int allocations;
|
||||||
|
|
||||||
|
protected MemoryRegionCache(int size, SizeClass sizeClass)
|
||||||
|
{
|
||||||
|
this.size = IntegerExtensions.RoundUpToPowerOfTwo(size);
|
||||||
|
this.queue = PlatformDependent.NewFixedMpscQueue<Entry>(this.size);
|
||||||
|
this.sizeClass = sizeClass;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Init the {@link PooledByteBuffer} using the provided chunk and handle with the capacity restrictions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
protected abstract void InitBuf(PoolChunk<T> chunk, long handle,
|
||||||
|
PooledByteBuffer<T> buf, int reqCapacity);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Add to cache if not already full.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public bool Add(PoolChunk<T> chunk, long handle)
|
||||||
|
{
|
||||||
|
Entry entry = NewEntry(chunk, handle);
|
||||||
|
bool queued = this.queue.TryEnqueue(entry);
|
||||||
|
if (!queued)
|
||||||
|
{
|
||||||
|
// If it was not possible to cache the chunk, immediately recycle the entry
|
||||||
|
entry.Recycle();
|
||||||
|
}
|
||||||
|
|
||||||
|
return queued;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allocate something out of the cache if possible and remove the entry from the cache.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public bool Allocate(PooledByteBuffer<T> buf, int reqCapacity)
|
||||||
|
{
|
||||||
|
Entry entry = this.queue.Dequeue();
|
||||||
|
if (entry == null)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
this.InitBuf(entry.Chunk, entry.Handle, buf, reqCapacity);
|
||||||
|
entry.Recycle();
|
||||||
|
|
||||||
|
// allocations is not thread-safe which is fine as this is only called from the same thread all time.
|
||||||
|
++this.allocations;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clear out this cache and free up all previous cached {@link PoolChunk}s and {@code handle}s.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public int Free() => this.Free(int.MaxValue);
|
||||||
|
|
||||||
|
int Free(int max)
|
||||||
|
{
|
||||||
|
int numFreed = 0;
|
||||||
|
for (; numFreed < max; numFreed++)
|
||||||
|
{
|
||||||
|
Entry entry = this.queue.Dequeue();
|
||||||
|
if (entry != null)
|
||||||
|
{
|
||||||
|
this.FreeEntry(entry);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// all cleared
|
||||||
|
return numFreed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return numFreed;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Free up cached {@link PoolChunk}s if not allocated frequently enough.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public void Trim()
|
||||||
|
{
|
||||||
|
int toFree = this.size - this.allocations;
|
||||||
|
this.allocations = 0;
|
||||||
|
|
||||||
|
// We not even allocated all the number that are
|
||||||
|
if (toFree > 0)
|
||||||
|
{
|
||||||
|
this.Free(toFree);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void FreeEntry(Entry entry)
|
||||||
|
{
|
||||||
|
PoolChunk<T> chunk = entry.Chunk;
|
||||||
|
long handle = entry.Handle;
|
||||||
|
|
||||||
|
// recycle now so PoolChunk can be GC'ed.
|
||||||
|
entry.Recycle();
|
||||||
|
|
||||||
|
chunk.Arena.FreeChunk(chunk, handle, this.sizeClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
sealed class Entry
|
||||||
|
{
|
||||||
|
readonly ThreadLocalPool.Handle recyclerHandle;
|
||||||
|
public PoolChunk<T> Chunk;
|
||||||
|
public long Handle = -1;
|
||||||
|
|
||||||
|
public Entry(ThreadLocalPool.Handle recyclerHandle)
|
||||||
|
{
|
||||||
|
this.recyclerHandle = recyclerHandle;
|
||||||
|
}
|
||||||
|
|
||||||
|
internal void Recycle()
|
||||||
|
{
|
||||||
|
this.Chunk = null;
|
||||||
|
this.Handle = -1;
|
||||||
|
this.recyclerHandle.Release(this);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static Entry NewEntry(PoolChunk<T> chunk, long handle)
|
||||||
|
{
|
||||||
|
Entry entry = Recycler.Take();
|
||||||
|
entry.Chunk = chunk;
|
||||||
|
entry.Handle = handle;
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
static readonly ThreadLocalPool<Entry> Recycler = new ThreadLocalPool<Entry>(handle => new Entry(handle));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -6,54 +6,103 @@ namespace DotNetty.Buffers
|
||||||
using System;
|
using System;
|
||||||
using System.Diagnostics.Contracts;
|
using System.Diagnostics.Contracts;
|
||||||
using DotNetty.Common;
|
using DotNetty.Common;
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
class PooledByteBuffer : UnpooledHeapByteBuffer
|
abstract class PooledByteBuffer<T> : AbstractReferenceCountedByteBuffer
|
||||||
{
|
{
|
||||||
readonly ThreadLocalPool.Handle returnHandle;
|
readonly ThreadLocalPool.Handle recyclerHandle;
|
||||||
int length;
|
|
||||||
readonly byte[] pooledArray;
|
|
||||||
|
|
||||||
public PooledByteBuffer(ThreadLocalPool.Handle returnHandle, IByteBufferAllocator allocator, int maxFixedCapacity, int maxCapacity)
|
protected internal PoolChunk<T> Chunk;
|
||||||
: this(returnHandle, allocator, new byte[maxFixedCapacity], maxCapacity)
|
protected internal long Handle;
|
||||||
|
protected internal T Memory;
|
||||||
|
protected internal int Offset;
|
||||||
|
protected internal int Length;
|
||||||
|
internal int MaxLength;
|
||||||
|
internal PoolThreadCache<T> Cache;
|
||||||
|
//private ByteBuffer tmpNioBuf;
|
||||||
|
|
||||||
|
protected PooledByteBuffer(ThreadLocalPool.Handle recyclerHandle, int maxCapacity)
|
||||||
|
: base(maxCapacity)
|
||||||
{
|
{
|
||||||
|
this.recyclerHandle = recyclerHandle;
|
||||||
}
|
}
|
||||||
|
|
||||||
PooledByteBuffer(ThreadLocalPool.Handle returnHandle, IByteBufferAllocator allocator, byte[] pooledArray, int maxCapacity)
|
internal void Init(PoolChunk<T> chunk, long handle, int offset, int length, int maxLength, PoolThreadCache<T> cache)
|
||||||
: base(allocator, pooledArray, 0, 0, maxCapacity)
|
|
||||||
{
|
{
|
||||||
this.length = pooledArray.Length;
|
Contract.Assert(handle >= 0);
|
||||||
this.returnHandle = returnHandle;
|
Contract.Assert(chunk != null);
|
||||||
this.pooledArray = pooledArray;
|
|
||||||
}
|
|
||||||
|
|
||||||
internal void Init()
|
this.Chunk = chunk;
|
||||||
{
|
this.Handle = handle;
|
||||||
|
this.Memory = chunk.Memory;
|
||||||
|
this.Offset = offset;
|
||||||
|
this.Length = length;
|
||||||
|
this.MaxLength = maxLength;
|
||||||
this.SetIndex(0, 0);
|
this.SetIndex(0, 0);
|
||||||
this.DiscardMarkers();
|
this.DiscardMarkers();
|
||||||
|
//tmpNioBuf = null;
|
||||||
|
this.Cache = cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
public override int Capacity => this.length;
|
internal void InitUnpooled(PoolChunk<T> chunk, int length)
|
||||||
|
{
|
||||||
|
Contract.Assert(chunk != null);
|
||||||
|
|
||||||
public override IByteBuffer AdjustCapacity(int newCapacity)
|
this.Chunk = chunk;
|
||||||
|
this.Handle = 0;
|
||||||
|
this.Memory = chunk.Memory;
|
||||||
|
this.Offset = 0;
|
||||||
|
this.Length = this.MaxLength = length;
|
||||||
|
this.SetIndex(0, 0);
|
||||||
|
//tmpNioBuf = null;
|
||||||
|
this.Cache = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override int Capacity => this.Length;
|
||||||
|
|
||||||
|
public sealed override IByteBuffer AdjustCapacity(int newCapacity)
|
||||||
{
|
{
|
||||||
this.EnsureAccessible();
|
this.EnsureAccessible();
|
||||||
Contract.Requires(newCapacity >= 0 && newCapacity <= this.MaxCapacity);
|
|
||||||
|
|
||||||
if (this.Array == this.pooledArray)
|
// If the request capacity does not require reallocation, just update the length of the memory.
|
||||||
|
if (this.Chunk.Unpooled)
|
||||||
{
|
{
|
||||||
if (newCapacity > this.length)
|
if (newCapacity == this.Length)
|
||||||
{
|
{
|
||||||
if (newCapacity < this.pooledArray.Length)
|
return this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (newCapacity > this.Length)
|
||||||
|
{
|
||||||
|
if (newCapacity <= this.MaxLength)
|
||||||
{
|
{
|
||||||
this.length = newCapacity;
|
this.Length = newCapacity;
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (newCapacity < this.length)
|
else if (newCapacity < this.Length)
|
||||||
{
|
{
|
||||||
this.length = newCapacity;
|
if (newCapacity > this.MaxLength.RightUShift(1))
|
||||||
this.SetIndex(Math.Min(this.ReaderIndex, newCapacity), Math.Min(this.WriterIndex, newCapacity));
|
{
|
||||||
return this;
|
if (this.MaxLength <= 512)
|
||||||
|
{
|
||||||
|
if (newCapacity > this.MaxLength - 16)
|
||||||
|
{
|
||||||
|
this.Length = newCapacity;
|
||||||
|
this.SetIndex(Math.Min(this.ReaderIndex, newCapacity), Math.Min(this.WriterIndex, newCapacity));
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// > 512 (i.e. >= 1024)
|
||||||
|
this.Length = newCapacity;
|
||||||
|
this.SetIndex(Math.Min(this.ReaderIndex, newCapacity), Math.Min(this.WriterIndex, newCapacity));
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -61,25 +110,42 @@ namespace DotNetty.Buffers
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: fall through to here means buffer pool is being used inefficiently. consider providing insight on such events
|
// Reallocation required.
|
||||||
base.AdjustCapacity(newCapacity);
|
this.Chunk.Arena.Reallocate(this, newCapacity, true);
|
||||||
this.length = newCapacity;
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IByteBuffer Copy(int index, int length)
|
public sealed override IByteBufferAllocator Allocator => this.Chunk.Arena.Parent;
|
||||||
|
|
||||||
|
public sealed override ByteOrder Order => ByteOrder.BigEndian;
|
||||||
|
|
||||||
|
public sealed override IByteBuffer Unwrap() => null;
|
||||||
|
|
||||||
|
//protected IByteBuffer internalNioBuffer() {
|
||||||
|
// ByteBuffer tmpNioBuf = this.tmpNioBuf;
|
||||||
|
// if (tmpNioBuf == null)
|
||||||
|
// {
|
||||||
|
// this.tmpNioBuf = tmpNioBuf = newInternalNioBuffer(memory);
|
||||||
|
// }
|
||||||
|
// return tmpNioBuf;
|
||||||
|
//}
|
||||||
|
|
||||||
|
//protected abstract ByteBuffer newInternalNioBuffer(T memory);
|
||||||
|
|
||||||
|
protected sealed override void Deallocate()
|
||||||
{
|
{
|
||||||
this.CheckIndex(index, length);
|
if (this.Handle >= 0)
|
||||||
IByteBuffer copy = this.Allocator.Buffer(length, this.MaxCapacity);
|
{
|
||||||
copy.WriteBytes(this.Array, this.ArrayOffset + index, length);
|
long handle = this.Handle;
|
||||||
return copy;
|
this.Handle = -1;
|
||||||
|
this.Memory = default(T);
|
||||||
|
this.Chunk.Arena.Free(this.Chunk, handle, this.MaxLength, this.Cache);
|
||||||
|
this.Recycle();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected override void Deallocate()
|
void Recycle() => this.recyclerHandle.Release(this);
|
||||||
{
|
|
||||||
this.SetArray(this.pooledArray); // release byte array that has been allocated in response to capacity adjustment to a value higher than max pooled size
|
protected int Idx(int index) => this.Offset + index;
|
||||||
this.SetReferenceCount(1); // ensures that next time buffer is pulled from the pool it has "fresh" ref count
|
|
||||||
this.returnHandle.Release(this);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -1,39 +1,323 @@
|
||||||
// Copyright (c) Microsoft. All rights reserved.
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
namespace DotNetty.Buffers
|
namespace DotNetty.Buffers
|
||||||
{
|
{
|
||||||
|
using System;
|
||||||
|
using System.Collections.Generic;
|
||||||
using System.Diagnostics.Contracts;
|
using System.Diagnostics.Contracts;
|
||||||
|
using System.Text;
|
||||||
using DotNetty.Common;
|
using DotNetty.Common;
|
||||||
|
using DotNetty.Common.Internal;
|
||||||
|
using DotNetty.Common.Internal.Logging;
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
public class PooledByteBufferAllocator : AbstractByteBufferAllocator
|
public class PooledByteBufferAllocator : AbstractByteBufferAllocator
|
||||||
{
|
{
|
||||||
readonly ThreadLocalPool<PooledByteBuffer> pool;
|
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance<PooledByteBufferAllocator>();
|
||||||
|
static readonly int DEFAULT_NUM_HEAP_ARENA;
|
||||||
|
|
||||||
public PooledByteBufferAllocator(int maxPooledBufSize, int maxLocalPoolSize)
|
static readonly int DEFAULT_PAGE_SIZE;
|
||||||
|
static readonly int DEFAULT_MAX_ORDER; // 8192 << 11 = 16 MiB per chunk
|
||||||
|
static readonly int DEFAULT_TINY_CACHE_SIZE;
|
||||||
|
static readonly int DEFAULT_SMALL_CACHE_SIZE;
|
||||||
|
static readonly int DEFAULT_NORMAL_CACHE_SIZE;
|
||||||
|
static readonly int DEFAULT_MAX_CACHED_BUFFER_CAPACITY;
|
||||||
|
static readonly int DEFAULT_CACHE_TRIM_INTERVAL;
|
||||||
|
|
||||||
|
static readonly int MIN_PAGE_SIZE = 4096;
|
||||||
|
static readonly int MAX_CHUNK_SIZE = (int)((int.MaxValue + 1L) / 2);
|
||||||
|
|
||||||
|
static PooledByteBufferAllocator()
|
||||||
{
|
{
|
||||||
Contract.Requires(maxLocalPoolSize > maxPooledBufSize);
|
int defaultPageSize = SystemPropertyUtil.GetInt("io.netty.allocator.pageSize", 8192);
|
||||||
|
Exception pageSizeFallbackCause = null;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
ValidateAndCalculatePageShifts(defaultPageSize);
|
||||||
|
}
|
||||||
|
catch (Exception t)
|
||||||
|
{
|
||||||
|
pageSizeFallbackCause = t;
|
||||||
|
defaultPageSize = 8192;
|
||||||
|
}
|
||||||
|
DEFAULT_PAGE_SIZE = defaultPageSize;
|
||||||
|
|
||||||
this.MaxPooledBufSize = maxPooledBufSize;
|
int defaultMaxOrder = SystemPropertyUtil.GetInt("io.netty.allocator.maxOrder", 11);
|
||||||
this.pool = new ThreadLocalPool<PooledByteBuffer>(
|
Exception maxOrderFallbackCause = null;
|
||||||
handle => new PooledByteBuffer(handle, this, maxPooledBufSize, int.MaxValue),
|
try
|
||||||
maxLocalPoolSize / maxPooledBufSize,
|
{
|
||||||
false);
|
ValidateAndCalculateChunkSize(DEFAULT_PAGE_SIZE, defaultMaxOrder);
|
||||||
|
}
|
||||||
|
catch (Exception t)
|
||||||
|
{
|
||||||
|
maxOrderFallbackCause = t;
|
||||||
|
defaultMaxOrder = 11;
|
||||||
|
}
|
||||||
|
DEFAULT_MAX_ORDER = defaultMaxOrder;
|
||||||
|
|
||||||
|
// Determine reasonable default for nHeapArena and nDirectArena.
|
||||||
|
// Assuming each arena has 3 chunks, the pool should not consume more than 50% of max memory.
|
||||||
|
|
||||||
|
// Use 2 * cores by default to reduce contention as we use 2 * cores for the number of EventLoops
|
||||||
|
// in NIO and EPOLL as well. If we choose a smaller number we will run into hotspots as allocation and
|
||||||
|
// deallocation needs to be synchronized on the PoolArena.
|
||||||
|
// See https://github.com/netty/netty/issues/3888
|
||||||
|
int defaultMinNumArena = Environment.ProcessorCount * 2;
|
||||||
|
int defaultChunkSize = DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER;
|
||||||
|
DEFAULT_NUM_HEAP_ARENA = Math.Max(0, SystemPropertyUtil.GetInt("dotNetty.allocator.numHeapArenas", defaultMinNumArena));
|
||||||
|
|
||||||
|
// cache sizes
|
||||||
|
DEFAULT_TINY_CACHE_SIZE = SystemPropertyUtil.GetInt("io.netty.allocator.tinyCacheSize", 512);
|
||||||
|
DEFAULT_SMALL_CACHE_SIZE = SystemPropertyUtil.GetInt("io.netty.allocator.smallCacheSize", 256);
|
||||||
|
DEFAULT_NORMAL_CACHE_SIZE = SystemPropertyUtil.GetInt("io.netty.allocator.normalCacheSize", 64);
|
||||||
|
|
||||||
|
// 32 kb is the default maximum capacity of the cached buffer. Similar to what is explained in
|
||||||
|
// 'Scalable memory allocation using jemalloc'
|
||||||
|
DEFAULT_MAX_CACHED_BUFFER_CAPACITY = SystemPropertyUtil.GetInt("io.netty.allocator.maxCachedBufferCapacity", 32 * 1024);
|
||||||
|
|
||||||
|
// the number of threshold of allocations when cached entries will be freed up if not frequently used
|
||||||
|
DEFAULT_CACHE_TRIM_INTERVAL = SystemPropertyUtil.GetInt(
|
||||||
|
"io.netty.allocator.cacheTrimInterval", 8192);
|
||||||
|
|
||||||
|
if (Logger.DebugEnabled)
|
||||||
|
{
|
||||||
|
Logger.Debug("-Dio.netty.allocator.numHeapArenas: {}", DEFAULT_NUM_HEAP_ARENA);
|
||||||
|
if (pageSizeFallbackCause == null)
|
||||||
|
{
|
||||||
|
Logger.Debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Logger.Debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE, pageSizeFallbackCause);
|
||||||
|
}
|
||||||
|
if (maxOrderFallbackCause == null)
|
||||||
|
{
|
||||||
|
Logger.Debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Logger.Debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER, maxOrderFallbackCause);
|
||||||
|
}
|
||||||
|
Logger.Debug("-Dio.netty.allocator.chunkSize: {}", DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER);
|
||||||
|
Logger.Debug("-Dio.netty.allocator.tinyCacheSize: {}", DEFAULT_TINY_CACHE_SIZE);
|
||||||
|
Logger.Debug("-Dio.netty.allocator.smallCacheSize: {}", DEFAULT_SMALL_CACHE_SIZE);
|
||||||
|
Logger.Debug("-Dio.netty.allocator.normalCacheSize: {}", DEFAULT_NORMAL_CACHE_SIZE);
|
||||||
|
Logger.Debug("-Dio.netty.allocator.maxCachedBufferCapacity: {}", DEFAULT_MAX_CACHED_BUFFER_CAPACITY);
|
||||||
|
Logger.Debug("-Dio.netty.allocator.cacheTrimInterval: {}", DEFAULT_CACHE_TRIM_INTERVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
Default = new PooledByteBufferAllocator();
|
||||||
}
|
}
|
||||||
|
|
||||||
public int MaxPooledBufSize { get; }
|
public static readonly PooledByteBufferAllocator Default;
|
||||||
|
|
||||||
|
readonly PoolArena<byte[]>[] heapArenas;
|
||||||
|
readonly int tinyCacheSize;
|
||||||
|
readonly int smallCacheSize;
|
||||||
|
readonly int normalCacheSize;
|
||||||
|
readonly IReadOnlyList<IPoolArenaMetric> heapArenaMetrics;
|
||||||
|
readonly PoolThreadLocalCache threadCache;
|
||||||
|
|
||||||
|
public PooledByteBufferAllocator()
|
||||||
|
: this(DEFAULT_NUM_HEAP_ARENA, DEFAULT_PAGE_SIZE, DEFAULT_MAX_ORDER)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
public PooledByteBufferAllocator(int nHeapArena, int pageSize, int maxOrder)
|
||||||
|
: this(nHeapArena, pageSize, maxOrder,
|
||||||
|
DEFAULT_TINY_CACHE_SIZE, DEFAULT_SMALL_CACHE_SIZE, DEFAULT_NORMAL_CACHE_SIZE)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
public PooledByteBufferAllocator(int nHeapArena, int pageSize, int maxOrder,
|
||||||
|
int tinyCacheSize, int smallCacheSize, int normalCacheSize)
|
||||||
|
{
|
||||||
|
Contract.Requires(nHeapArena >= 0);
|
||||||
|
|
||||||
|
//super(preferDirect);
|
||||||
|
this.threadCache = new PoolThreadLocalCache(this);
|
||||||
|
this.tinyCacheSize = tinyCacheSize;
|
||||||
|
this.smallCacheSize = smallCacheSize;
|
||||||
|
this.normalCacheSize = normalCacheSize;
|
||||||
|
int chunkSize = ValidateAndCalculateChunkSize(pageSize, maxOrder);
|
||||||
|
|
||||||
|
int pageShifts = ValidateAndCalculatePageShifts(pageSize);
|
||||||
|
|
||||||
|
if (nHeapArena > 0)
|
||||||
|
{
|
||||||
|
this.heapArenas = NewArenaArray<byte[]>(nHeapArena);
|
||||||
|
var metrics = new List<IPoolArenaMetric>(this.heapArenas.Length);
|
||||||
|
for (int i = 0; i < this.heapArenas.Length; i++)
|
||||||
|
{
|
||||||
|
var arena = new HeapArena(this, pageSize, maxOrder, pageShifts, chunkSize);
|
||||||
|
this.heapArenas[i] = arena;
|
||||||
|
metrics.Add(arena);
|
||||||
|
}
|
||||||
|
this.heapArenaMetrics = metrics.AsReadOnly();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this.heapArenas = null;
|
||||||
|
this.heapArenaMetrics = new List<IPoolArenaMetric>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static PoolArena<T>[] NewArenaArray<T>(int size) => new PoolArena<T>[size];
|
||||||
|
|
||||||
|
static int ValidateAndCalculatePageShifts(int pageSize)
|
||||||
|
{
|
||||||
|
Contract.Requires(pageSize >= MIN_PAGE_SIZE);
|
||||||
|
Contract.Requires((pageSize & pageSize - 1) == 0, "Expected power of 2");
|
||||||
|
|
||||||
|
// Logarithm base 2. At this point we know that pageSize is a power of two.
|
||||||
|
return IntegerExtensions.Log2(pageSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ValidateAndCalculateChunkSize(int pageSize, int maxOrder)
|
||||||
|
{
|
||||||
|
if (maxOrder > 14)
|
||||||
|
{
|
||||||
|
throw new ArgumentException("maxOrder: " + maxOrder + " (expected: 0-14)");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the resulting chunkSize does not overflow.
|
||||||
|
int chunkSize = pageSize;
|
||||||
|
for (int i = maxOrder; i > 0; i--)
|
||||||
|
{
|
||||||
|
if (chunkSize > MAX_CHUNK_SIZE >> 1)
|
||||||
|
{
|
||||||
|
throw new ArgumentException($"pageSize ({pageSize}) << maxOrder ({maxOrder}) must not exceed {MAX_CHUNK_SIZE}");
|
||||||
|
}
|
||||||
|
chunkSize <<= 1;
|
||||||
|
}
|
||||||
|
return chunkSize;
|
||||||
|
}
|
||||||
|
|
||||||
protected override IByteBuffer NewBuffer(int initialCapacity, int maxCapacity)
|
protected override IByteBuffer NewBuffer(int initialCapacity, int maxCapacity)
|
||||||
{
|
{
|
||||||
if (initialCapacity > this.MaxPooledBufSize)
|
PoolThreadCache<byte[]> cache = this.threadCache.Value;
|
||||||
|
PoolArena<byte[]> heapArena = cache.HeapArena;
|
||||||
|
|
||||||
|
IByteBuffer buf;
|
||||||
|
if (heapArena != null)
|
||||||
{
|
{
|
||||||
return new UnpooledHeapByteBuffer(this, initialCapacity, maxCapacity);
|
buf = heapArena.Allocate(cache, initialCapacity, maxCapacity);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
buf = new UnpooledHeapByteBuffer(this, initialCapacity, maxCapacity);
|
||||||
}
|
}
|
||||||
|
|
||||||
PooledByteBuffer buffer = this.pool.Take();
|
return ToLeakAwareBuffer(buf);
|
||||||
buffer.Init();
|
}
|
||||||
|
|
||||||
return ToLeakAwareBuffer(buffer);
|
sealed class PoolThreadLocalCache : FastThreadLocal<PoolThreadCache<byte[]>>
|
||||||
|
{
|
||||||
|
readonly PooledByteBufferAllocator owner;
|
||||||
|
|
||||||
|
public PoolThreadLocalCache(PooledByteBufferAllocator owner)
|
||||||
|
{
|
||||||
|
this.owner = owner;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override PoolThreadCache<byte[]> GetInitialValue()
|
||||||
|
{
|
||||||
|
lock (this)
|
||||||
|
{
|
||||||
|
PoolArena<byte[]> heapArena = this.GetLeastUsedArena(this.owner.heapArenas);
|
||||||
|
|
||||||
|
return new PoolThreadCache<byte[]>(
|
||||||
|
heapArena, this.owner.tinyCacheSize, this.owner.smallCacheSize, this.owner.normalCacheSize,
|
||||||
|
DEFAULT_MAX_CACHED_BUFFER_CAPACITY, DEFAULT_CACHE_TRIM_INTERVAL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override void OnRemoval(PoolThreadCache<byte[]> threadCache) => threadCache.Free();
|
||||||
|
|
||||||
|
PoolArena<T> GetLeastUsedArena<T>(PoolArena<T>[] arenas)
|
||||||
|
{
|
||||||
|
if (arenas == null || arenas.Length == 0)
|
||||||
|
{
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
PoolArena<T> minArena = arenas[0];
|
||||||
|
for (int i = 1; i < arenas.Length; i++)
|
||||||
|
{
|
||||||
|
PoolArena<T> arena = arenas[i];
|
||||||
|
if (arena.NumThreadCaches < minArena.NumThreadCaches)
|
||||||
|
{
|
||||||
|
minArena = arena;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return minArena;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the number of heap arenas.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public int NumHeapArenas() => this.heapArenaMetrics.Count;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return a {@link List} of all heap {@link PoolArenaMetric}s that are provided by this pool.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public IReadOnlyList<IPoolArenaMetric> HeapArenas() => this.heapArenaMetrics;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the number of thread local caches used by this {@link PooledByteBufferAllocator}.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public int NumThreadLocalCaches()
|
||||||
|
{
|
||||||
|
PoolArena<byte[]>[] arenas = this.heapArenas;
|
||||||
|
if (arenas == null)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int total = 0;
|
||||||
|
for (int i = 0; i < arenas.Length; i++)
|
||||||
|
{
|
||||||
|
total += arenas[i].NumThreadCaches;
|
||||||
|
}
|
||||||
|
|
||||||
|
return total;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the size of the tiny cache.
|
||||||
|
public int TinyCacheSize => this.tinyCacheSize;
|
||||||
|
|
||||||
|
/// Return the size of the small cache.
|
||||||
|
public int SmallCacheSize => this.smallCacheSize;
|
||||||
|
|
||||||
|
/// Return the size of the normal cache.
|
||||||
|
public int NormalCacheSize => this.normalCacheSize;
|
||||||
|
|
||||||
|
internal PoolThreadCache<T> ThreadCache<T>() => (PoolThreadCache<T>)(object)this.threadCache.Value;
|
||||||
|
|
||||||
|
/// Returns the status of the allocator (which contains all metrics) as string. Be aware this may be expensive
|
||||||
|
/// and so should not called too frequently.
|
||||||
|
public string DumpStats()
|
||||||
|
{
|
||||||
|
int heapArenasLen = this.heapArenas?.Length ?? 0;
|
||||||
|
StringBuilder buf = new StringBuilder(512)
|
||||||
|
.Append(heapArenasLen)
|
||||||
|
.Append(" heap arena(s):")
|
||||||
|
.Append(StringUtil.Newline);
|
||||||
|
if (heapArenasLen > 0)
|
||||||
|
{
|
||||||
|
foreach (PoolArena<byte[]> a in this.heapArenas)
|
||||||
|
{
|
||||||
|
buf.Append(a);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.ToString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -0,0 +1,207 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers
|
||||||
|
{
|
||||||
|
using System.IO;
|
||||||
|
using System.Threading;
|
||||||
|
using System.Threading.Tasks;
|
||||||
|
using DotNetty.Common;
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
|
sealed class PooledHeapByteBuffer : PooledByteBuffer<byte[]>
|
||||||
|
{
|
||||||
|
static readonly ThreadLocalPool<PooledHeapByteBuffer> Recycler = new ThreadLocalPool<PooledHeapByteBuffer>(handle => new PooledHeapByteBuffer(handle, 0));
|
||||||
|
|
||||||
|
internal static PooledHeapByteBuffer NewInstance(int maxCapacity)
|
||||||
|
{
|
||||||
|
PooledHeapByteBuffer buf = Recycler.Take();
|
||||||
|
buf.SetReferenceCount(1); // todo: reuse method?
|
||||||
|
buf.MaxCapacity = maxCapacity;
|
||||||
|
buf.SetIndex(0, 0);
|
||||||
|
buf.DiscardMarkers();
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
PooledHeapByteBuffer(ThreadLocalPool.Handle recyclerHandle, int maxCapacity)
|
||||||
|
: base(recyclerHandle, maxCapacity)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override byte _GetByte(int index) => this.Memory[this.Idx(index)];
|
||||||
|
|
||||||
|
protected override short _GetShort(int index)
|
||||||
|
{
|
||||||
|
index = this.Idx(index);
|
||||||
|
return (short)(this.Memory[index] << 8 | this.Memory[index + 1] & 0xFF);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override int _GetInt(int index)
|
||||||
|
{
|
||||||
|
index = this.Idx(index);
|
||||||
|
return (this.Memory[index] & 0xff) << 24 |
|
||||||
|
(this.Memory[index + 1] & 0xff) << 16 |
|
||||||
|
(this.Memory[index + 2] & 0xff) << 8 |
|
||||||
|
this.Memory[index + 3] & 0xff;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override long _GetLong(int index)
|
||||||
|
{
|
||||||
|
index = this.Idx(index);
|
||||||
|
return ((long)this.Memory[index] & 0xff) << 56 |
|
||||||
|
((long)this.Memory[index + 1] & 0xff) << 48 |
|
||||||
|
((long)this.Memory[index + 2] & 0xff) << 40 |
|
||||||
|
((long)this.Memory[index + 3] & 0xff) << 32 |
|
||||||
|
((long)this.Memory[index + 4] & 0xff) << 24 |
|
||||||
|
((long)this.Memory[index + 5] & 0xff) << 16 |
|
||||||
|
((long)this.Memory[index + 6] & 0xff) << 8 |
|
||||||
|
(long)this.Memory[index + 7] & 0xff;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override IByteBuffer GetBytes(int index, IByteBuffer dst, int dstIndex, int length)
|
||||||
|
{
|
||||||
|
this.CheckDstIndex(index, length, dstIndex, dst.Capacity);
|
||||||
|
if (dst.HasArray)
|
||||||
|
{
|
||||||
|
this.GetBytes(index, dst.Array, dst.ArrayOffset + dstIndex, length);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
dst.SetBytes(dstIndex, this.Memory, this.Idx(index), length);
|
||||||
|
}
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override IByteBuffer GetBytes(int index, byte[] dst, int dstIndex, int length)
|
||||||
|
{
|
||||||
|
this.CheckDstIndex(index, length, dstIndex, dst.Length);
|
||||||
|
System.Array.Copy(this.Memory, this.Idx(index), dst, dstIndex, length);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override IByteBuffer GetBytes(int index, Stream destination, int length)
|
||||||
|
{
|
||||||
|
this.CheckIndex(index, length);
|
||||||
|
destination.Write(this.Memory, this.Idx(index), length);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override void _SetByte(int index, int value) => this.Memory[this.Idx(index)] = (byte)value;
|
||||||
|
|
||||||
|
protected override void _SetShort(int index, int value)
|
||||||
|
{
|
||||||
|
index = this.Idx(index);
|
||||||
|
this.Memory[index] = (byte)value.RightUShift(8);
|
||||||
|
this.Memory[index + 1] = (byte)value;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override void _SetInt(int index, int value)
|
||||||
|
{
|
||||||
|
index = this.Idx(index);
|
||||||
|
this.Memory[index] = (byte)value.RightUShift(24);
|
||||||
|
this.Memory[index + 1] = (byte)value.RightUShift(16);
|
||||||
|
this.Memory[index + 2] = (byte)value.RightUShift(8);
|
||||||
|
this.Memory[index + 3] = (byte)value;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override void _SetLong(int index, long value)
|
||||||
|
{
|
||||||
|
index = this.Idx(index);
|
||||||
|
this.Memory[index] = (byte)value.RightUShift(56);
|
||||||
|
this.Memory[index + 1] = (byte)value.RightUShift(48);
|
||||||
|
this.Memory[index + 2] = (byte)value.RightUShift(40);
|
||||||
|
this.Memory[index + 3] = (byte)value.RightUShift(32);
|
||||||
|
this.Memory[index + 4] = (byte)value.RightUShift(24);
|
||||||
|
this.Memory[index + 5] = (byte)value.RightUShift(16);
|
||||||
|
this.Memory[index + 6] = (byte)value.RightUShift(8);
|
||||||
|
this.Memory[index + 7] = (byte)value;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override IByteBuffer SetBytes(int index, IByteBuffer src, int srcIndex, int length)
|
||||||
|
{
|
||||||
|
this.CheckSrcIndex(index, length, srcIndex, src.Capacity);
|
||||||
|
if (src.HasArray)
|
||||||
|
{
|
||||||
|
this.SetBytes(index, src.Array, src.ArrayOffset + srcIndex, length);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
src.GetBytes(srcIndex, this.Memory, this.Idx(index), length);
|
||||||
|
}
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override async Task<int> SetBytesAsync(int index, Stream src, int length, CancellationToken cancellationToken)
|
||||||
|
{
|
||||||
|
int readTotal = 0;
|
||||||
|
int read;
|
||||||
|
int offset = this.ArrayOffset + index;
|
||||||
|
do
|
||||||
|
{
|
||||||
|
read = await src.ReadAsync(this.Array, offset + readTotal, length - readTotal, cancellationToken);
|
||||||
|
readTotal += read;
|
||||||
|
}
|
||||||
|
while (read > 0 && readTotal < length);
|
||||||
|
|
||||||
|
return readTotal;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override IByteBuffer SetBytes(int index, byte[] src, int srcIndex, int length)
|
||||||
|
{
|
||||||
|
this.CheckSrcIndex(index, length, srcIndex, src.Length);
|
||||||
|
System.Array.Copy(src, srcIndex, this.Memory, this.Idx(index), length);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override IByteBuffer Copy(int index, int length)
|
||||||
|
{
|
||||||
|
this.CheckIndex(index, length);
|
||||||
|
IByteBuffer copy = this.Allocator.Buffer(length, this.MaxCapacity);
|
||||||
|
copy.WriteBytes(this.Memory, this.Idx(index), length);
|
||||||
|
return copy;
|
||||||
|
}
|
||||||
|
|
||||||
|
//public int nioBufferCount()
|
||||||
|
//{
|
||||||
|
// return 1;
|
||||||
|
//}
|
||||||
|
|
||||||
|
//public ByteBuffer[] nioBuffers(int index, int length)
|
||||||
|
//{
|
||||||
|
// return new ByteBuffer[] { this.nioBuffer(index, length) };
|
||||||
|
//}
|
||||||
|
|
||||||
|
//public ByteBuffer nioBuffer(int index, int length)
|
||||||
|
//{
|
||||||
|
// checkIndex(index, length);
|
||||||
|
// index = idx(index);
|
||||||
|
// ByteBuffer buf = ByteBuffer.wrap(this.memory, index, length);
|
||||||
|
// return buf.slice();
|
||||||
|
//}
|
||||||
|
|
||||||
|
//public ByteBuffer internalNioBuffer(int index, int length)
|
||||||
|
//{
|
||||||
|
// checkIndex(index, length);
|
||||||
|
// index = idx(index);
|
||||||
|
// return (ByteBuffer)internalNioBuffer().clear().position(index).limit(index + length);
|
||||||
|
//}
|
||||||
|
|
||||||
|
public override bool HasArray => true;
|
||||||
|
|
||||||
|
public override byte[] Array
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
this.EnsureAccessible();
|
||||||
|
return this.Memory;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public override int ArrayOffset => this.Offset;
|
||||||
|
|
||||||
|
//protected ByteBuffer newInternalNioBuffer(byte[] memory)
|
||||||
|
//{
|
||||||
|
// return ByteBuffer.wrap(memory);
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
using System.Runtime.CompilerServices;
|
||||||
|
|
||||||
|
#if !NOTEST
|
||||||
|
|
||||||
|
[assembly: InternalsVisibleTo("DotNetty.Buffers.Tests")]
|
||||||
|
|
||||||
|
#endif
|
|
@ -5,11 +5,11 @@ namespace DotNetty.Buffers
|
||||||
{
|
{
|
||||||
using DotNetty.Common;
|
using DotNetty.Common;
|
||||||
|
|
||||||
sealed class SimpleLeakAwareByteBuf : WrappedByteBuffer
|
sealed class SimpleLeakAwareByteBuffer : WrappedByteBuffer
|
||||||
{
|
{
|
||||||
readonly IResourceLeak leak;
|
readonly IResourceLeak leak;
|
||||||
|
|
||||||
internal SimpleLeakAwareByteBuf(IByteBuffer buf, IResourceLeak leak)
|
internal SimpleLeakAwareByteBuffer(IByteBuffer buf, IResourceLeak leak)
|
||||||
: base(buf)
|
: base(buf)
|
||||||
{
|
{
|
||||||
this.leak = leak;
|
this.leak = leak;
|
||||||
|
@ -54,28 +54,28 @@ namespace DotNetty.Buffers
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
return new SimpleLeakAwareByteBuf(base.WithOrder(endianness), this.leak);
|
return new SimpleLeakAwareByteBuffer(base.WithOrder(endianness), this.leak);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IByteBuffer Slice()
|
public override IByteBuffer Slice()
|
||||||
{
|
{
|
||||||
return new SimpleLeakAwareByteBuf(base.Slice(), this.leak);
|
return new SimpleLeakAwareByteBuffer(base.Slice(), this.leak);
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IByteBuffer Slice(int index, int length)
|
public override IByteBuffer Slice(int index, int length)
|
||||||
{
|
{
|
||||||
return new SimpleLeakAwareByteBuf(base.Slice(index, length), this.leak);
|
return new SimpleLeakAwareByteBuffer(base.Slice(index, length), this.leak);
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IByteBuffer Duplicate()
|
public override IByteBuffer Duplicate()
|
||||||
{
|
{
|
||||||
return new SimpleLeakAwareByteBuf(base.Duplicate(), this.leak);
|
return new SimpleLeakAwareByteBuffer(base.Duplicate(), this.leak);
|
||||||
}
|
}
|
||||||
|
|
||||||
public override IByteBuffer ReadSlice(int length)
|
public override IByteBuffer ReadSlice(int length)
|
||||||
{
|
{
|
||||||
return new SimpleLeakAwareByteBuf(base.ReadSlice(length), this.leak);
|
return new SimpleLeakAwareByteBuffer(base.ReadSlice(length), this.leak);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -648,6 +648,34 @@ namespace DotNetty.Buffers
|
||||||
return this.buf.ForEachByteDesc(index, length, processor);
|
return this.buf.ForEachByteDesc(index, length, processor);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public override int GetHashCode()
|
||||||
|
{
|
||||||
|
return this.buf.GetHashCode();
|
||||||
|
}
|
||||||
|
|
||||||
|
public override bool Equals(object obj)
|
||||||
|
{
|
||||||
|
return this.Equals(obj as IByteBuffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
public bool Equals(IByteBuffer buffer)
|
||||||
|
{
|
||||||
|
if (ReferenceEquals(this, buffer))
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (buffer != null)
|
||||||
|
{
|
||||||
|
return ByteBufferUtil.Equals(this, buffer);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int CompareTo(IByteBuffer buffer)
|
||||||
|
{
|
||||||
|
return ByteBufferUtil.Compare(this, buffer);
|
||||||
|
}
|
||||||
|
|
||||||
public override string ToString()
|
public override string ToString()
|
||||||
{
|
{
|
||||||
return "Swapped(" + this.buf + ")";
|
return "Swapped(" + this.buf + ")";
|
||||||
|
|
|
@ -554,26 +554,6 @@ namespace DotNetty.Buffers
|
||||||
// return this.buf.BytesBefore(index, length, value);
|
// return this.buf.BytesBefore(index, length, value);
|
||||||
//}
|
//}
|
||||||
|
|
||||||
//public virtual int ForEachByte(ByteProcessor processor)
|
|
||||||
//{
|
|
||||||
// return this.buf.ForEachByte(processor);
|
|
||||||
//}
|
|
||||||
|
|
||||||
//public virtual int ForEachByte(int index, int length, ByteProcessor processor)
|
|
||||||
//{
|
|
||||||
// return this.buf.ForEachByte(index, length, processor);
|
|
||||||
//}
|
|
||||||
|
|
||||||
//public virtual int ForEachByteDesc(ByteProcessor processor)
|
|
||||||
//{
|
|
||||||
// return this.buf.ForEachByteDesc(processor);
|
|
||||||
//}
|
|
||||||
|
|
||||||
//public virtual int ForEachByteDesc(int index, int length, ByteProcessor processor)
|
|
||||||
//{
|
|
||||||
// return this.buf.ForEachByteDesc(index, length, processor);
|
|
||||||
//}
|
|
||||||
|
|
||||||
public virtual IByteBuffer Copy()
|
public virtual IByteBuffer Copy()
|
||||||
{
|
{
|
||||||
return this.Buf.Copy();
|
return this.Buf.Copy();
|
||||||
|
@ -610,17 +590,6 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
public virtual int ArrayOffset => this.Buf.ArrayOffset;
|
public virtual int ArrayOffset => this.Buf.ArrayOffset;
|
||||||
|
|
||||||
// todo: port: complete
|
|
||||||
// public virtual String toString(Charset charset)
|
|
||||||
// {
|
|
||||||
// return buf.toString(charset);
|
|
||||||
// }
|
|
||||||
|
|
||||||
//public virtual String toString(int index, int length, Charset charset)
|
|
||||||
// {
|
|
||||||
// return buf.ToString(index, length, charset);
|
|
||||||
// }
|
|
||||||
|
|
||||||
public override int GetHashCode()
|
public override int GetHashCode()
|
||||||
{
|
{
|
||||||
return this.Buf.GetHashCode();
|
return this.Buf.GetHashCode();
|
||||||
|
@ -631,11 +600,15 @@ namespace DotNetty.Buffers
|
||||||
return this.Buf.Equals(obj);
|
return this.Buf.Equals(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: port: complete
|
public bool Equals(IByteBuffer buffer)
|
||||||
//public virtual int CompareTo(IByteBuffer buffer)
|
{
|
||||||
//{
|
return this.Buf.Equals(buffer);
|
||||||
// return this.buf.CompareTo(buffer);
|
}
|
||||||
//}
|
|
||||||
|
public virtual int CompareTo(IByteBuffer buffer)
|
||||||
|
{
|
||||||
|
return this.Buf.CompareTo(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
public override string ToString()
|
public override string ToString()
|
||||||
{
|
{
|
||||||
|
@ -690,22 +663,22 @@ namespace DotNetty.Buffers
|
||||||
|
|
||||||
public int ForEachByte(ByteProcessor processor)
|
public int ForEachByte(ByteProcessor processor)
|
||||||
{
|
{
|
||||||
return this.ForEachByte(processor);
|
return this.Buf.ForEachByte(processor);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int ForEachByte(int index, int length, ByteProcessor processor)
|
public int ForEachByte(int index, int length, ByteProcessor processor)
|
||||||
{
|
{
|
||||||
return this.ForEachByte(index, length, processor);
|
return this.Buf.ForEachByte(index, length, processor);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int ForEachByteDesc(ByteProcessor processor)
|
public int ForEachByteDesc(ByteProcessor processor)
|
||||||
{
|
{
|
||||||
return this.ForEachByteDesc(processor);
|
return this.Buf.ForEachByteDesc(processor);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int ForEachByteDesc(int index, int length, ByteProcessor processor)
|
public int ForEachByteDesc(int index, int length, ByteProcessor processor)
|
||||||
{
|
{
|
||||||
return this.ForEachByteDesc(processor);
|
return this.Buf.ForEachByteDesc(processor);
|
||||||
}
|
}
|
||||||
|
|
||||||
public virtual string ToString(Encoding encoding)
|
public virtual string ToString(Encoding encoding)
|
||||||
|
|
|
@ -96,7 +96,7 @@ namespace DotNetty.Common.Concurrency
|
||||||
|
|
||||||
public override void Execute(IRunnable task)
|
public override void Execute(IRunnable task)
|
||||||
{
|
{
|
||||||
this.taskQueue.Enqueue(task);
|
this.taskQueue.TryEnqueue(task);
|
||||||
|
|
||||||
if (!this.InEventLoop)
|
if (!this.InEventLoop)
|
||||||
{
|
{
|
||||||
|
@ -371,7 +371,7 @@ namespace DotNetty.Common.Concurrency
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.taskQueue.Enqueue(scheduledTask);
|
this.taskQueue.TryEnqueue(scheduledTask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -161,6 +161,13 @@
|
||||||
<Compile Include="Concurrency\StateActionWithContextScheduledAsyncTask.cs" />
|
<Compile Include="Concurrency\StateActionWithContextScheduledAsyncTask.cs" />
|
||||||
<Compile Include="Concurrency\StateActionWithContextScheduledTask.cs" />
|
<Compile Include="Concurrency\StateActionWithContextScheduledTask.cs" />
|
||||||
<Compile Include="Deque.cs" />
|
<Compile Include="Deque.cs" />
|
||||||
|
<Compile Include="FastThreadLocal.cs" />
|
||||||
|
<Compile Include="InternalThreadLocalMap.cs" />
|
||||||
|
<Compile Include="Internal\AbstractQueue.cs" />
|
||||||
|
<Compile Include="Internal\IQueue.cs" />
|
||||||
|
<Compile Include="Internal\ConcurrentCircularArrayQueue.cs" />
|
||||||
|
<Compile Include="Internal\MpscArrayQueue.cs" />
|
||||||
|
<Compile Include="Internal\PlatformDependent.cs" />
|
||||||
<Compile Include="Properties\Friends.cs" />
|
<Compile Include="Properties\Friends.cs" />
|
||||||
<Compile Include="Concurrency\AbstractEventExecutor.cs" />
|
<Compile Include="Concurrency\AbstractEventExecutor.cs" />
|
||||||
<Compile Include="Concurrency\AbstractScheduledEventExecutor.cs" />
|
<Compile Include="Concurrency\AbstractScheduledEventExecutor.cs" />
|
||||||
|
@ -185,15 +192,18 @@
|
||||||
<Compile Include="IResourceLeak.cs" />
|
<Compile Include="IResourceLeak.cs" />
|
||||||
<Compile Include="ResourceLeakDetector.cs" />
|
<Compile Include="ResourceLeakDetector.cs" />
|
||||||
<Compile Include="IResourceLeakHint.cs" />
|
<Compile Include="IResourceLeakHint.cs" />
|
||||||
|
<Compile Include="ThreadDeathWatcher.cs" />
|
||||||
<Compile Include="ThreadLocalObjectList.cs" />
|
<Compile Include="ThreadLocalObjectList.cs" />
|
||||||
<Compile Include="ThreadLocalPool.cs" />
|
<Compile Include="ThreadLocalPool.cs" />
|
||||||
<Compile Include="PreciseTimeSpan.cs" />
|
<Compile Include="PreciseTimeSpan.cs" />
|
||||||
<Compile Include="Utilities\AtomicReference.cs" />
|
<Compile Include="Utilities\AtomicReference.cs" />
|
||||||
<Compile Include="Utilities\BitOps.cs" />
|
<Compile Include="Utilities\BitOps.cs" />
|
||||||
<Compile Include="Utilities\ByteArrayExtensions.cs" />
|
<Compile Include="Utilities\ArrayExtensions.cs" />
|
||||||
<Compile Include="Utilities\DebugExtensions.cs" />
|
<Compile Include="Utilities\DebugExtensions.cs" />
|
||||||
|
<Compile Include="Utilities\IntegerExtensions.cs" />
|
||||||
<Compile Include="Utilities\MpscLinkedQueue.cs" />
|
<Compile Include="Utilities\MpscLinkedQueue.cs" />
|
||||||
<Compile Include="Utilities\PriorityQueue.cs" />
|
<Compile Include="Utilities\PriorityQueue.cs" />
|
||||||
|
<Compile Include="Utilities\RandomExtensions.cs" />
|
||||||
<Compile Include="Utilities\RecyclableMpscLinkedQueueNode.cs" />
|
<Compile Include="Utilities\RecyclableMpscLinkedQueueNode.cs" />
|
||||||
<Compile Include="Utilities\ReferenceCountUtil.cs" />
|
<Compile Include="Utilities\ReferenceCountUtil.cs" />
|
||||||
<Compile Include="Internal\SystemPropertyUtil.cs" />
|
<Compile Include="Internal\SystemPropertyUtil.cs" />
|
||||||
|
|
|
@ -0,0 +1,210 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Common
|
||||||
|
{
|
||||||
|
using System.Collections.Generic;
|
||||||
|
using System.Runtime.CompilerServices;
|
||||||
|
|
||||||
|
public abstract class FastThreadLocal
|
||||||
|
{
|
||||||
|
static readonly int VariablesToRemoveIndex = InternalThreadLocalMap.NextVariableIndex();
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Removes all {@link FastThreadLocal} variables bound to the current thread. This operation is useful when you
|
||||||
|
/// are in a container environment, and you don't want to leave the thread local variables in the threads you do not
|
||||||
|
/// manage.
|
||||||
|
/// </summary>
|
||||||
|
public static void RemoveAll()
|
||||||
|
{
|
||||||
|
InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.GetIfSet();
|
||||||
|
if (threadLocalMap == null)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
object v = threadLocalMap.GetIndexedVariable(VariablesToRemoveIndex);
|
||||||
|
if (v != null && v != InternalThreadLocalMap.Unset)
|
||||||
|
{
|
||||||
|
var variablesToRemove = (HashSet<FastThreadLocal>)v;
|
||||||
|
foreach (FastThreadLocal tlv in variablesToRemove) // todo: do we need to make a snapshot?
|
||||||
|
{
|
||||||
|
tlv.Remove(threadLocalMap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
finally
|
||||||
|
{
|
||||||
|
InternalThreadLocalMap.Remove();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Destroys the data structure that keeps all {@link FastThreadLocal} variables accessed from
|
||||||
|
/// non-{@link FastThreadLocalThread}s. This operation is useful when you are in a container environment, and you
|
||||||
|
/// do not want to leave the thread local variables in the threads you do not manage. Call this method when your
|
||||||
|
/// application is being unloaded from the container.
|
||||||
|
public static void Destroy()
|
||||||
|
{
|
||||||
|
InternalThreadLocalMap.Destroy();
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static void AddToVariablesToRemove(InternalThreadLocalMap threadLocalMap, FastThreadLocal variable)
|
||||||
|
{
|
||||||
|
object v = threadLocalMap.GetIndexedVariable(VariablesToRemoveIndex);
|
||||||
|
HashSet<FastThreadLocal> variablesToRemove;
|
||||||
|
if (v == InternalThreadLocalMap.Unset || v == null)
|
||||||
|
{
|
||||||
|
variablesToRemove = new HashSet<FastThreadLocal>(); // Collections.newSetFromMap(new IdentityHashMap<FastThreadLocal<?>, Boolean>());
|
||||||
|
threadLocalMap.SetIndexedVariable(VariablesToRemoveIndex, variablesToRemove);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
variablesToRemove = (HashSet<FastThreadLocal>)v;
|
||||||
|
}
|
||||||
|
|
||||||
|
variablesToRemove.Add(variable);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static void RemoveFromVariablesToRemove(InternalThreadLocalMap threadLocalMap, FastThreadLocal variable)
|
||||||
|
{
|
||||||
|
object v = threadLocalMap.GetIndexedVariable(VariablesToRemoveIndex);
|
||||||
|
|
||||||
|
if (v == InternalThreadLocalMap.Unset || v == null)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
var variablesToRemove = (HashSet<FastThreadLocal>)v;
|
||||||
|
variablesToRemove.Remove(variable);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Sets the value to uninitialized; a proceeding call to get() will trigger a call to GetInitialValue().
|
||||||
|
/// </summary>
|
||||||
|
/// <param name="threadLocalMap"></param>
|
||||||
|
public abstract void Remove(InternalThreadLocalMap threadLocalMap);
|
||||||
|
}
|
||||||
|
|
||||||
|
public class FastThreadLocal<T> : FastThreadLocal
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
readonly int index;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Returns the number of thread local variables bound to the current thread.
|
||||||
|
/// </summary>
|
||||||
|
public static int Count => InternalThreadLocalMap.GetIfSet()?.Count ?? 0;
|
||||||
|
|
||||||
|
public FastThreadLocal()
|
||||||
|
{
|
||||||
|
this.index = InternalThreadLocalMap.NextVariableIndex();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Gets or sets current value for the current thread.
|
||||||
|
/// </summary>
|
||||||
|
public T Value
|
||||||
|
{
|
||||||
|
get { return this.Get(InternalThreadLocalMap.Get()); }
|
||||||
|
set { this.Set(InternalThreadLocalMap.Get(), value); }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Returns the current value for the specified thread local map.
|
||||||
|
/// The specified thread local map must be for the current thread.
|
||||||
|
/// </summary>
|
||||||
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||||
|
public T Get(InternalThreadLocalMap threadLocalMap)
|
||||||
|
{
|
||||||
|
object v = threadLocalMap.GetIndexedVariable(this.index);
|
||||||
|
if (v != InternalThreadLocalMap.Unset)
|
||||||
|
{
|
||||||
|
return (T)v;
|
||||||
|
}
|
||||||
|
|
||||||
|
return this.Initialize(threadLocalMap);
|
||||||
|
}
|
||||||
|
|
||||||
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||||
|
T Initialize(InternalThreadLocalMap threadLocalMap)
|
||||||
|
{
|
||||||
|
T v = this.GetInitialValue();
|
||||||
|
|
||||||
|
threadLocalMap.SetIndexedVariable(this.index, v);
|
||||||
|
AddToVariablesToRemove(threadLocalMap, this);
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Set the value for the specified thread local map. The specified thread local map must be for the current thread.
|
||||||
|
/// </summary>
|
||||||
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||||
|
public void Set(InternalThreadLocalMap threadLocalMap, T value)
|
||||||
|
{
|
||||||
|
if (threadLocalMap.SetIndexedVariable(this.index, value))
|
||||||
|
{
|
||||||
|
AddToVariablesToRemove(threadLocalMap, this);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Returns {@code true} if and only if this thread-local variable is set.
|
||||||
|
/// </summary>
|
||||||
|
public bool IsSet()
|
||||||
|
{
|
||||||
|
return this.IsSet(InternalThreadLocalMap.GetIfSet());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Returns {@code true} if and only if this thread-local variable is set.
|
||||||
|
/// The specified thread local map must be for the current thread.
|
||||||
|
/// </summary>
|
||||||
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||||
|
public bool IsSet(InternalThreadLocalMap threadLocalMap)
|
||||||
|
{
|
||||||
|
return threadLocalMap != null && threadLocalMap.IsIndexedVariableSet(this.index);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Returns the initial value for this thread-local variable.
|
||||||
|
/// </summary>
|
||||||
|
protected virtual T GetInitialValue()
|
||||||
|
{
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void Remove()
|
||||||
|
{
|
||||||
|
this.Remove(InternalThreadLocalMap.GetIfSet());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the value to uninitialized for the specified thread local map;
|
||||||
|
/// a proceeding call to get() will trigger a call to GetInitialValue().
|
||||||
|
/// The specified thread local map must be for the current thread.
|
||||||
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||||
|
public sealed override void Remove(InternalThreadLocalMap threadLocalMap)
|
||||||
|
{
|
||||||
|
if (threadLocalMap == null)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
object v = threadLocalMap.RemoveIndexedVariable(this.index);
|
||||||
|
RemoveFromVariablesToRemove(threadLocalMap, this);
|
||||||
|
|
||||||
|
if (v != InternalThreadLocalMap.Unset)
|
||||||
|
{
|
||||||
|
this.OnRemoval((T)v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Invoked when this thread local variable is removed by {@link #remove()}.
|
||||||
|
/// </summary>
|
||||||
|
protected virtual void OnRemoval(T value)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Common.Internal
|
||||||
|
{
|
||||||
|
public abstract class AbstractQueue<T> : IQueue<T>
|
||||||
|
{
|
||||||
|
public abstract bool TryEnqueue(T element);
|
||||||
|
|
||||||
|
public abstract T Dequeue();
|
||||||
|
|
||||||
|
public abstract T Peek();
|
||||||
|
|
||||||
|
public abstract int Count { get; }
|
||||||
|
|
||||||
|
public abstract bool IsEmpty { get; }
|
||||||
|
|
||||||
|
public abstract void Clear();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,146 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Common.Internal
|
||||||
|
{
|
||||||
|
using System;
|
||||||
|
using System.Threading;
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
|
/// Forked from
|
||||||
|
/// <a href="https://github.com/JCTools/JCTools">JCTools</a>
|
||||||
|
/// .
|
||||||
|
/// A concurrent access enabling class used by circular array based queues this class exposes an offset computation
|
||||||
|
/// method along with differently memory fenced load/store methods into the underlying array. The class is pre-padded and
|
||||||
|
/// the array is padded on either side to help with False sharing prvention. It is expected theat subclasses handle post
|
||||||
|
/// padding.
|
||||||
|
/// <p />
|
||||||
|
/// Offset calculation is separate from access to enable the reuse of a give compute offset.
|
||||||
|
/// <p />
|
||||||
|
/// Load/Store methods using a
|
||||||
|
/// <i>buffer</i>
|
||||||
|
/// parameter are provided to allow the prevention of field reload after a
|
||||||
|
/// LoadLoad barrier.
|
||||||
|
/// <p />
|
||||||
|
/// @param
|
||||||
|
/// <E>
|
||||||
|
abstract class ConcurrentCircularArrayQueue<T> : ConcurrentCircularArrayQueueL0Pad<T>
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
protected static readonly int RefBufferPad = (64 * 2) / IntPtr.Size;
|
||||||
|
protected long Mask;
|
||||||
|
protected readonly T[] Buffer;
|
||||||
|
|
||||||
|
protected ConcurrentCircularArrayQueue(int capacity)
|
||||||
|
{
|
||||||
|
int actualCapacity = IntegerExtensions.RoundUpToPowerOfTwo(capacity);
|
||||||
|
this.Mask = actualCapacity - 1;
|
||||||
|
// pad data on either end with some empty slots.
|
||||||
|
this.Buffer = new T[actualCapacity + RefBufferPad * 2];
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @param index desirable element index
|
||||||
|
/// @return the offset in bytes within the array for a given index.
|
||||||
|
protected long CalcElementOffset(long index)
|
||||||
|
{
|
||||||
|
return CalcElementOffset(index, this.Mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// @param index desirable element index
|
||||||
|
/// @param mask
|
||||||
|
/// @return the offset in bytes within the array for a given index.
|
||||||
|
protected static long CalcElementOffset(long index, long mask)
|
||||||
|
{
|
||||||
|
return RefBufferPad + (index & mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A plain store (no ordering/fences) of an element to a given offset
|
||||||
|
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||||
|
/// @param e a kitty
|
||||||
|
protected void SpElement(long offset, T e)
|
||||||
|
{
|
||||||
|
SpElement(this.Buffer, offset, e);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A plain store (no ordering/fences) of an element to a given offset
|
||||||
|
/// @param buffer this.buffer
|
||||||
|
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||||
|
/// @param e an orderly kitty
|
||||||
|
protected static void SpElement(T[] buffer, long offset, T e)
|
||||||
|
{
|
||||||
|
buffer[offset] = e;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An ordered store(store + StoreStore barrier) of an element to a given offset
|
||||||
|
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||||
|
/// @param e an orderly kitty
|
||||||
|
protected void SoElement(long offset, T e)
|
||||||
|
{
|
||||||
|
SoElement(this.Buffer, offset, e);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An ordered store(store + StoreStore barrier) of an element to a given offset
|
||||||
|
/// @param buffer this.buffer
|
||||||
|
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||||
|
/// @param e an orderly kitty
|
||||||
|
protected static void SoElement(T[] buffer, long offset, T e)
|
||||||
|
{
|
||||||
|
Volatile.Write(ref buffer[offset], e);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A plain load (no ordering/fences) of an element from a given offset.
|
||||||
|
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||||
|
/// @return the element at the offset
|
||||||
|
protected T LpElement(long offset)
|
||||||
|
{
|
||||||
|
return LpElement(this.Buffer, offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A plain load (no ordering/fences) of an element from a given offset.
|
||||||
|
/// @param buffer this.buffer
|
||||||
|
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||||
|
/// @return the element at the offset
|
||||||
|
protected static T LpElement(T[] buffer, long offset)
|
||||||
|
{
|
||||||
|
return buffer[offset];
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A volatile load (load + LoadLoad barrier) of an element from a given offset.
|
||||||
|
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||||
|
/// @return the element at the offset
|
||||||
|
protected T LvElement(long offset)
|
||||||
|
{
|
||||||
|
return LvElement(this.Buffer, offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A volatile load (load + LoadLoad barrier) of an element from a given offset.
|
||||||
|
/// @param buffer this.buffer
|
||||||
|
/// @param offset computed via {@link ConcurrentCircularArrayQueue#calcElementOffset(long)}
|
||||||
|
/// @return the element at the offset
|
||||||
|
protected static T LvElement(T[] buffer, long offset)
|
||||||
|
{
|
||||||
|
return Volatile.Read(ref buffer[offset]);
|
||||||
|
}
|
||||||
|
|
||||||
|
public override void Clear()
|
||||||
|
{
|
||||||
|
while (this.Dequeue() != null || !this.IsEmpty)
|
||||||
|
{
|
||||||
|
// looping
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public int Capacity()
|
||||||
|
{
|
||||||
|
return (int)(this.Mask + 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class ConcurrentCircularArrayQueueL0Pad<T> : AbstractQueue<T>
|
||||||
|
{
|
||||||
|
#pragma warning disable 169 // padded reference
|
||||||
|
long p00, p01, p02, p03, p04, p05, p06, p07;
|
||||||
|
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||||
|
#pragma warning restore 169
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Common.Internal
|
||||||
|
{
|
||||||
|
public interface IQueue<T>
|
||||||
|
{
|
||||||
|
bool TryEnqueue(T element);
|
||||||
|
|
||||||
|
T Dequeue();
|
||||||
|
|
||||||
|
T Peek();
|
||||||
|
|
||||||
|
int Count { get; }
|
||||||
|
|
||||||
|
bool IsEmpty { get; }
|
||||||
|
|
||||||
|
void Clear();
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,331 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Common.Internal
|
||||||
|
{
|
||||||
|
using System.Diagnostics.Contracts;
|
||||||
|
using System.Threading;
|
||||||
|
|
||||||
|
/// Forked from
|
||||||
|
/// <a href="https://github.com/JCTools/JCTools">JCTools</a>
|
||||||
|
/// .
|
||||||
|
/// A Multi-Producer-Single-Consumer queue based on a {@link ConcurrentCircularArrayQueue}. This implies that
|
||||||
|
/// any thread may call the offer method, but only a single thread may call poll/peek for correctness to
|
||||||
|
/// maintained.
|
||||||
|
/// <br />
|
||||||
|
/// This implementation follows patterns documented on the package level for False Sharing protection.
|
||||||
|
/// <br />
|
||||||
|
/// This implementation is using the
|
||||||
|
/// <a href="http://sourceforge.net/projects/mc-fastflow/">Fast Flow</a>
|
||||||
|
/// method for polling from the queue (with minor change to correctly publish the index) and an extension of
|
||||||
|
/// the Leslie Lamport concurrent queue algorithm (originated by Martin Thompson) on the producer side.
|
||||||
|
/// <br />
|
||||||
|
/// @param <E>
|
||||||
|
sealed class MpscArrayQueue<T> : MpscArrayQueueConsumerField<T>
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
#pragma warning disable 169 // padded reference
|
||||||
|
long p40, p41, p42, p43, p44, p45, p46;
|
||||||
|
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||||
|
#pragma warning restore 169
|
||||||
|
|
||||||
|
public MpscArrayQueue(int capacity)
|
||||||
|
: base(capacity)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/// {@inheritDoc}
|
||||||
|
/// <br />
|
||||||
|
/// IMPLEMENTATION NOTES:
|
||||||
|
/// <br />
|
||||||
|
/// Lock free offer using a single CAS. As class name suggests access is permitted to many threads
|
||||||
|
/// concurrently.
|
||||||
|
/// @see java.util.Queue#offer(java.lang.Object)
|
||||||
|
public override bool TryEnqueue(T e)
|
||||||
|
{
|
||||||
|
Contract.Requires(e != null);
|
||||||
|
|
||||||
|
// use a cached view on consumer index (potentially updated in loop)
|
||||||
|
long mask = this.Mask;
|
||||||
|
long capacity = mask + 1;
|
||||||
|
long consumerIndexCache = this.ConsumerIndexCache; // LoadLoad
|
||||||
|
long currentProducerIndex;
|
||||||
|
do
|
||||||
|
{
|
||||||
|
currentProducerIndex = this.ProducerIndex; // LoadLoad
|
||||||
|
long wrapPoint = currentProducerIndex - capacity;
|
||||||
|
if (consumerIndexCache <= wrapPoint)
|
||||||
|
{
|
||||||
|
long currHead = this.ConsumerIndex; // LoadLoad
|
||||||
|
if (currHead <= wrapPoint)
|
||||||
|
{
|
||||||
|
return false; // FULL :(
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// update shared cached value of the consumerIndex
|
||||||
|
this.ConsumerIndexCache = currHead; // StoreLoad
|
||||||
|
// update on stack copy, we might need this value again if we lose the CAS.
|
||||||
|
consumerIndexCache = currHead;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
while (!this.TrySetProducerIndex(currentProducerIndex, currentProducerIndex + 1));
|
||||||
|
|
||||||
|
// NOTE: the new producer index value is made visible BEFORE the element in the array. If we relied on
|
||||||
|
// the index visibility to poll() we would need to handle the case where the element is not visible.
|
||||||
|
|
||||||
|
// Won CAS, move on to storing
|
||||||
|
long offset = CalcElementOffset(currentProducerIndex, mask);
|
||||||
|
this.SoElement(offset, e); // StoreStore
|
||||||
|
return true; // AWESOME :)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A wait free alternative to offer which fails on CAS failure.
|
||||||
|
/// @param e new element, not null
|
||||||
|
/// @return 1 if next element cannot be filled, -1 if CAS failed, 0 if successful
|
||||||
|
public int WeakEnqueue(T e)
|
||||||
|
{
|
||||||
|
Contract.Requires(e != null);
|
||||||
|
|
||||||
|
long mask = this.Mask;
|
||||||
|
long capacity = mask + 1;
|
||||||
|
long currentTail = this.ProducerIndex; // LoadLoad
|
||||||
|
long consumerIndexCache = this.ConsumerIndexCache; // LoadLoad
|
||||||
|
long wrapPoint = currentTail - capacity;
|
||||||
|
if (consumerIndexCache <= wrapPoint)
|
||||||
|
{
|
||||||
|
long currHead = this.ConsumerIndex; // LoadLoad
|
||||||
|
if (currHead <= wrapPoint)
|
||||||
|
{
|
||||||
|
return 1; // FULL :(
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this.ConsumerIndexCache = currHead; // StoreLoad
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// look Ma, no loop!
|
||||||
|
if (!this.TrySetProducerIndex(currentTail, currentTail + 1))
|
||||||
|
{
|
||||||
|
return -1; // CAS FAIL :(
|
||||||
|
}
|
||||||
|
|
||||||
|
// Won CAS, move on to storing
|
||||||
|
long offset = CalcElementOffset(currentTail, mask);
|
||||||
|
this.SoElement(offset, e);
|
||||||
|
return 0; // AWESOME :)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// {@inheritDoc}
|
||||||
|
/// <p />
|
||||||
|
/// IMPLEMENTATION NOTES:
|
||||||
|
/// <br />
|
||||||
|
/// Lock free poll using ordered loads/stores. As class name suggests access is limited to a single thread.
|
||||||
|
/// @see java.util.Queue#poll()
|
||||||
|
public override T Dequeue()
|
||||||
|
{
|
||||||
|
long consumerIndex = this.ConsumerIndex; // LoadLoad
|
||||||
|
long offset = this.CalcElementOffset(consumerIndex);
|
||||||
|
// Copy field to avoid re-reading after volatile load
|
||||||
|
T[] buffer = this.Buffer;
|
||||||
|
|
||||||
|
// If we can't see the next available element we can't poll
|
||||||
|
T e = LvElement(buffer, offset); // LoadLoad
|
||||||
|
if (null == e)
|
||||||
|
{
|
||||||
|
// NOTE: Queue may not actually be empty in the case of a producer (P1) being interrupted after
|
||||||
|
// winning the CAS on offer but before storing the element in the queue. Other producers may go on
|
||||||
|
// to fill up the queue after this element.
|
||||||
|
|
||||||
|
if (consumerIndex != this.ProducerIndex)
|
||||||
|
{
|
||||||
|
do
|
||||||
|
{
|
||||||
|
e = LvElement(buffer, offset);
|
||||||
|
}
|
||||||
|
while (e == null);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return default(T);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SpElement(buffer, offset, default(T));
|
||||||
|
this.ConsumerIndex = consumerIndex + 1; // StoreStore
|
||||||
|
return e;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// {@inheritDoc}
|
||||||
|
/// <p />
|
||||||
|
/// IMPLEMENTATION NOTES:
|
||||||
|
/// <br />
|
||||||
|
/// Lock free peek using ordered loads. As class name suggests access is limited to a single thread.
|
||||||
|
/// @see java.util.Queue#poll()
|
||||||
|
public override T Peek()
|
||||||
|
{
|
||||||
|
// Copy field to avoid re-reading after volatile load
|
||||||
|
T[] buffer = this.Buffer;
|
||||||
|
|
||||||
|
long consumerIndex = this.ConsumerIndex; // LoadLoad
|
||||||
|
long offset = this.CalcElementOffset(consumerIndex);
|
||||||
|
T e = LvElement(buffer, offset);
|
||||||
|
if (null == e)
|
||||||
|
{
|
||||||
|
// NOTE: Queue may not actually be empty in the case of a producer (P1) being interrupted after
|
||||||
|
// winning the CAS on offer but before storing the element in the queue. Other producers may go on
|
||||||
|
// to fill up the queue after this element.
|
||||||
|
|
||||||
|
if (consumerIndex != this.ProducerIndex)
|
||||||
|
{
|
||||||
|
do
|
||||||
|
{
|
||||||
|
e = LvElement(buffer, offset);
|
||||||
|
}
|
||||||
|
while (e == null);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return default(T);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// {@inheritDoc}
|
||||||
|
public override int Count
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
// It is possible for a thread to be interrupted or reschedule between the read of the producer and
|
||||||
|
// consumer indices, therefore protection is required to ensure size is within valid range. In the
|
||||||
|
// event of concurrent polls/offers to this method the size is OVER estimated as we read consumer
|
||||||
|
// index BEFORE the producer index.
|
||||||
|
|
||||||
|
long after = this.ConsumerIndex;
|
||||||
|
while (true)
|
||||||
|
{
|
||||||
|
long before = after;
|
||||||
|
long currentProducerIndex = this.ProducerIndex;
|
||||||
|
after = this.ConsumerIndex;
|
||||||
|
if (before == after)
|
||||||
|
{
|
||||||
|
return (int)(currentProducerIndex - after);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public override bool IsEmpty
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
// Order matters!
|
||||||
|
// Loading consumer before producer allows for producer increments after consumer index is read.
|
||||||
|
// This ensures the correctness of this method at least for the consumer thread. Other threads POV is
|
||||||
|
// not really
|
||||||
|
// something we can fix here.
|
||||||
|
return this.ConsumerIndex == this.ProducerIndex;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class MpscArrayQueueL1Pad<T> : ConcurrentCircularArrayQueue<T>
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
#pragma warning disable 169 // padded reference
|
||||||
|
long p10, p11, p12, p13, p14, p15, p16;
|
||||||
|
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||||
|
#pragma warning restore 169
|
||||||
|
|
||||||
|
protected MpscArrayQueueL1Pad(int capacity)
|
||||||
|
: base(capacity)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class MpscArrayQueueTailField<T> : MpscArrayQueueL1Pad<T>
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
long producerIndex;
|
||||||
|
|
||||||
|
protected MpscArrayQueueTailField(int capacity)
|
||||||
|
: base(capacity)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
protected long ProducerIndex
|
||||||
|
{
|
||||||
|
get { return Volatile.Read(ref this.producerIndex); }
|
||||||
|
}
|
||||||
|
|
||||||
|
protected bool TrySetProducerIndex(long expect, long newValue)
|
||||||
|
{
|
||||||
|
return Interlocked.CompareExchange(ref this.producerIndex, newValue, expect) == expect;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class MpscArrayQueueMidPad<T> : MpscArrayQueueTailField<T>
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
#pragma warning disable 169 // padded reference
|
||||||
|
long p20, p21, p22, p23, p24, p25, p26;
|
||||||
|
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||||
|
#pragma warning restore 169
|
||||||
|
|
||||||
|
protected MpscArrayQueueMidPad(int capacity)
|
||||||
|
: base(capacity)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class MpscArrayQueueHeadCacheField<T> : MpscArrayQueueMidPad<T>
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
long headCache;
|
||||||
|
|
||||||
|
protected MpscArrayQueueHeadCacheField(int capacity)
|
||||||
|
: base(capacity)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
protected long ConsumerIndexCache
|
||||||
|
{
|
||||||
|
get { return Volatile.Read(ref this.headCache); }
|
||||||
|
set { Volatile.Write(ref this.headCache, value); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class MpscArrayQueueL2Pad<T> : MpscArrayQueueHeadCacheField<T>
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
#pragma warning disable 169 // padded reference
|
||||||
|
long p20, p21, p22, p23, p24, p25, p26;
|
||||||
|
long p30, p31, p32, p33, p34, p35, p36, p37;
|
||||||
|
#pragma warning restore 169
|
||||||
|
|
||||||
|
protected MpscArrayQueueL2Pad(int capacity)
|
||||||
|
: base(capacity)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract class MpscArrayQueueConsumerField<T> : MpscArrayQueueL2Pad<T>
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
long consumerIndex;
|
||||||
|
|
||||||
|
protected MpscArrayQueueConsumerField(int capacity)
|
||||||
|
: base(capacity)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
protected long ConsumerIndex
|
||||||
|
{
|
||||||
|
get { return Volatile.Read(ref this.consumerIndex); }
|
||||||
|
set { Volatile.Write(ref this.consumerIndex, value); } // todo: revisit: UNSAFE.putOrderedLong -- StoreStore fence
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Common.Internal
|
||||||
|
{
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
|
public static class PlatformDependent
|
||||||
|
{
|
||||||
|
public static IQueue<T> NewFixedMpscQueue<T>(int capacity)
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
return new MpscArrayQueue<T>(capacity);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static IQueue<T> NewMpscQueue<T>()
|
||||||
|
where T : class
|
||||||
|
{
|
||||||
|
return new MpscLinkedQueue<T>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,220 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Common
|
||||||
|
{
|
||||||
|
using System;
|
||||||
|
using System.Runtime.CompilerServices;
|
||||||
|
using System.Text;
|
||||||
|
using System.Threading;
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// The internal data structure that stores the thread-local variables for Netty and all {@link FastThreadLocal}s.
|
||||||
|
/// Note that this class is for internal use only and is subject to change at any time. Use {@link FastThreadLocal}
|
||||||
|
/// unless you know what you are doing.
|
||||||
|
/// </summary>
|
||||||
|
public sealed class InternalThreadLocalMap
|
||||||
|
{
|
||||||
|
public static readonly object Unset = new object();
|
||||||
|
|
||||||
|
[ThreadStatic]
|
||||||
|
static InternalThreadLocalMap slowThreadLocalMap;
|
||||||
|
|
||||||
|
static int nextIndex;
|
||||||
|
|
||||||
|
/// Used by {@link FastThreadLocal}
|
||||||
|
object[] indexedVariables;
|
||||||
|
|
||||||
|
// Core thread-locals
|
||||||
|
int futureListenerStackDepth;
|
||||||
|
int localChannelReaderStackDepth;
|
||||||
|
|
||||||
|
// String-related thread-locals
|
||||||
|
StringBuilder stringBuilder;
|
||||||
|
|
||||||
|
internal static int NextVariableIndex()
|
||||||
|
{
|
||||||
|
int index = Interlocked.Increment(ref nextIndex);
|
||||||
|
if (index < 0)
|
||||||
|
{
|
||||||
|
Interlocked.Decrement(ref nextIndex);
|
||||||
|
throw new InvalidOperationException("too many thread-local indexed variables");
|
||||||
|
}
|
||||||
|
return index;
|
||||||
|
}
|
||||||
|
|
||||||
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||||
|
public static InternalThreadLocalMap GetIfSet()
|
||||||
|
{
|
||||||
|
return slowThreadLocalMap;
|
||||||
|
}
|
||||||
|
|
||||||
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||||
|
public static InternalThreadLocalMap Get()
|
||||||
|
{
|
||||||
|
InternalThreadLocalMap ret = slowThreadLocalMap;
|
||||||
|
if (ret == null)
|
||||||
|
{
|
||||||
|
ret = new InternalThreadLocalMap();
|
||||||
|
slowThreadLocalMap = ret;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void Remove()
|
||||||
|
{
|
||||||
|
slowThreadLocalMap = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void Destroy()
|
||||||
|
{
|
||||||
|
slowThreadLocalMap = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache line padding (must be public)
|
||||||
|
// With CompressedOops enabled, an instance of this class should occupy at least 128 bytes.
|
||||||
|
public long rp1, rp2, rp3, rp4, rp5, rp6, rp7, rp8, rp9;
|
||||||
|
|
||||||
|
InternalThreadLocalMap()
|
||||||
|
{
|
||||||
|
this.indexedVariables = CreateIndexedVariableTable();
|
||||||
|
}
|
||||||
|
|
||||||
|
static object[] CreateIndexedVariableTable()
|
||||||
|
{
|
||||||
|
var array = new object[32];
|
||||||
|
|
||||||
|
array.Fill(Unset);
|
||||||
|
return array;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int Count
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
int count = 0;
|
||||||
|
|
||||||
|
if (this.futureListenerStackDepth != 0)
|
||||||
|
{
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
if (this.localChannelReaderStackDepth != 0)
|
||||||
|
{
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
if (this.stringBuilder != null)
|
||||||
|
{
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
foreach (object o in this.indexedVariables)
|
||||||
|
{
|
||||||
|
if (o != Unset)
|
||||||
|
{
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We should subtract 1 from the count because the first element in 'indexedVariables' is reserved
|
||||||
|
// by 'FastThreadLocal' to keep the list of 'FastThreadLocal's to remove on 'FastThreadLocal.RemoveAll()'.
|
||||||
|
return count - 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public StringBuilder StringBuilder
|
||||||
|
{
|
||||||
|
get
|
||||||
|
{
|
||||||
|
StringBuilder builder = this.stringBuilder;
|
||||||
|
if (builder == null)
|
||||||
|
{
|
||||||
|
this.stringBuilder = builder = new StringBuilder(512);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
builder.Length = 0;
|
||||||
|
}
|
||||||
|
return builder;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public int FutureListenerStackDepth
|
||||||
|
{
|
||||||
|
get { return this.futureListenerStackDepth; }
|
||||||
|
set { this.futureListenerStackDepth = value; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public int LocalChannelReaderStackDepth
|
||||||
|
{
|
||||||
|
get { return this.localChannelReaderStackDepth; }
|
||||||
|
set { this.localChannelReaderStackDepth = value; }
|
||||||
|
}
|
||||||
|
|
||||||
|
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||||
|
public object GetIndexedVariable(int index)
|
||||||
|
{
|
||||||
|
object[] lookup = this.indexedVariables;
|
||||||
|
return index < lookup.Length ? lookup[index] : Unset;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return {@code true} if and only if a new thread-local variable has been created
|
||||||
|
*/
|
||||||
|
|
||||||
|
public bool SetIndexedVariable(int index, object value)
|
||||||
|
{
|
||||||
|
object[] lookup = this.indexedVariables;
|
||||||
|
if (index < lookup.Length)
|
||||||
|
{
|
||||||
|
object oldValue = lookup[index];
|
||||||
|
lookup[index] = value;
|
||||||
|
return oldValue == Unset;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this.ExpandIndexedVariableTableAndSet(index, value);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExpandIndexedVariableTableAndSet(int index, object value)
|
||||||
|
{
|
||||||
|
object[] oldArray = this.indexedVariables;
|
||||||
|
int oldCapacity = oldArray.Length;
|
||||||
|
int newCapacity = index;
|
||||||
|
newCapacity |= newCapacity.RightUShift(1);
|
||||||
|
newCapacity |= newCapacity.RightUShift(2);
|
||||||
|
newCapacity |= newCapacity.RightUShift(4);
|
||||||
|
newCapacity |= newCapacity.RightUShift(8);
|
||||||
|
newCapacity |= newCapacity.RightUShift(16);
|
||||||
|
newCapacity++;
|
||||||
|
|
||||||
|
var newArray = new object[newCapacity];
|
||||||
|
oldArray.CopyTo(newArray, 0);
|
||||||
|
newArray.Fill(oldCapacity, newArray.Length - oldCapacity, Unset);
|
||||||
|
newArray[index] = value;
|
||||||
|
this.indexedVariables = newArray;
|
||||||
|
}
|
||||||
|
|
||||||
|
public object RemoveIndexedVariable(int index)
|
||||||
|
{
|
||||||
|
object[] lookup = this.indexedVariables;
|
||||||
|
if (index < lookup.Length)
|
||||||
|
{
|
||||||
|
object v = lookup[index];
|
||||||
|
lookup[index] = Unset;
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return Unset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public bool IsIndexedVariableSet(int index)
|
||||||
|
{
|
||||||
|
object[] lookup = this.indexedVariables;
|
||||||
|
return index < lookup.Length && lookup[index] != Unset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,237 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Common
|
||||||
|
{
|
||||||
|
using System;
|
||||||
|
using System.Collections.Generic;
|
||||||
|
using System.Diagnostics.Contracts;
|
||||||
|
using System.Threading;
|
||||||
|
using DotNetty.Common.Concurrency;
|
||||||
|
using DotNetty.Common.Internal;
|
||||||
|
using DotNetty.Common.Internal.Logging;
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
|
|
||||||
|
public static class ThreadDeathWatcher
|
||||||
|
{
|
||||||
|
static readonly IInternalLogger Logger = InternalLoggerFactory.GetInstance(typeof(ThreadDeathWatcher));
|
||||||
|
|
||||||
|
static readonly IQueue<Entry> PendingEntries = PlatformDependent.NewMpscQueue<Entry>();
|
||||||
|
static readonly Watcher watcher = new Watcher();
|
||||||
|
static int started;
|
||||||
|
static volatile Thread watcherThread;
|
||||||
|
|
||||||
|
static ThreadDeathWatcher()
|
||||||
|
{
|
||||||
|
string poolName = "threadDeathWatcher";
|
||||||
|
string serviceThreadPrefix = SystemPropertyUtil.Get("io.netty.serviceThreadPrefix");
|
||||||
|
if (!string.IsNullOrEmpty(serviceThreadPrefix))
|
||||||
|
{
|
||||||
|
poolName = serviceThreadPrefix + poolName;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Schedules the specified {@code task} to run when the specified {@code thread} dies.
|
||||||
|
*
|
||||||
|
* @param thread the {@link Thread} to watch
|
||||||
|
* @param task the {@link Runnable} to run when the {@code thread} dies
|
||||||
|
*
|
||||||
|
* @throws IllegalArgumentException if the specified {@code thread} is not alive
|
||||||
|
*/
|
||||||
|
|
||||||
|
public static void Watch(Thread thread, Action task)
|
||||||
|
{
|
||||||
|
Contract.Requires(thread != null);
|
||||||
|
Contract.Requires(task != null);
|
||||||
|
Contract.Requires(thread.IsAlive);
|
||||||
|
|
||||||
|
Schedule(thread, task, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cancels the task scheduled via {@link #watch(Thread, Runnable)}.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public static void Unwatch(Thread thread, Action task)
|
||||||
|
{
|
||||||
|
Contract.Requires(thread != null);
|
||||||
|
Contract.Requires(task != null);
|
||||||
|
|
||||||
|
Schedule(thread, task, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void Schedule(Thread thread, Action task, bool isWatch)
|
||||||
|
{
|
||||||
|
PendingEntries.TryEnqueue(new Entry(thread, task, isWatch));
|
||||||
|
|
||||||
|
if (Interlocked.CompareExchange(ref started, 1, 0) == 0)
|
||||||
|
{
|
||||||
|
Thread watcherThread = new Thread(s => ((IRunnable)s).Run());
|
||||||
|
watcherThread.Start(watcher);
|
||||||
|
ThreadDeathWatcher.watcherThread = watcherThread;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Waits until the thread of this watcher has no threads to watch and terminates itself.
|
||||||
|
* Because a new watcher thread will be started again on {@link #watch(Thread, Runnable)},
|
||||||
|
* this operation is only useful when you want to ensure that the watcher thread is terminated
|
||||||
|
* <strong>after</strong> your application is shut down and there's no chance of calling
|
||||||
|
* {@link #watch(Thread, Runnable)} afterwards.
|
||||||
|
*
|
||||||
|
* @return {@code true} if and only if the watcher thread has been terminated
|
||||||
|
*/
|
||||||
|
|
||||||
|
public static bool AwaitInactivity(TimeSpan timeout)
|
||||||
|
{
|
||||||
|
Thread watcherThread = ThreadDeathWatcher.watcherThread;
|
||||||
|
if (watcherThread != null)
|
||||||
|
{
|
||||||
|
watcherThread.Join(timeout);
|
||||||
|
return !watcherThread.IsAlive;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sealed class Watcher : IRunnable
|
||||||
|
{
|
||||||
|
readonly List<Entry> watchees = new List<Entry>();
|
||||||
|
|
||||||
|
public void Run()
|
||||||
|
{
|
||||||
|
for (;;)
|
||||||
|
{
|
||||||
|
this.FetchWatchees();
|
||||||
|
this.NotifyWatchees();
|
||||||
|
|
||||||
|
// Try once again just in case notifyWatchees() triggered watch() or unwatch().
|
||||||
|
this.FetchWatchees();
|
||||||
|
this.NotifyWatchees();
|
||||||
|
|
||||||
|
Thread.Sleep(1000);
|
||||||
|
|
||||||
|
if (this.watchees.Count == 0 && PendingEntries.IsEmpty)
|
||||||
|
{
|
||||||
|
// Mark the current worker thread as stopped.
|
||||||
|
// The following CAS must always success and must be uncontended,
|
||||||
|
// because only one watcher thread should be running at the same time.
|
||||||
|
bool stopped = Interlocked.CompareExchange(ref started, 0, 1) == 1;
|
||||||
|
Contract.Assert(stopped);
|
||||||
|
|
||||||
|
// Check if there are pending entries added by watch() while we do CAS above.
|
||||||
|
if (PendingEntries.IsEmpty)
|
||||||
|
{
|
||||||
|
// A) watch() was not invoked and thus there's nothing to handle
|
||||||
|
// -> safe to terminate because there's nothing left to do
|
||||||
|
// B) a new watcher thread started and handled them all
|
||||||
|
// -> safe to terminate the new watcher thread will take care the rest
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// There are pending entries again, added by watch()
|
||||||
|
if (Interlocked.CompareExchange(ref started, 1, 0) != 0)
|
||||||
|
{
|
||||||
|
// watch() started a new watcher thread and set 'started' to true.
|
||||||
|
// -> terminate this thread so that the new watcher reads from pendingEntries exclusively.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// watch() added an entry, but this worker was faster to set 'started' to true.
|
||||||
|
// i.e. a new watcher thread was not started
|
||||||
|
// -> keep this thread alive to handle the newly added entries.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void FetchWatchees()
|
||||||
|
{
|
||||||
|
for (;;)
|
||||||
|
{
|
||||||
|
Entry e = PendingEntries.Dequeue();
|
||||||
|
if (e == null)
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (e.IsWatch)
|
||||||
|
{
|
||||||
|
this.watchees.Add(e);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
this.watchees.Remove(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void NotifyWatchees()
|
||||||
|
{
|
||||||
|
List<Entry> watchees = this.watchees;
|
||||||
|
for (int i = 0; i < watchees.Count;)
|
||||||
|
{
|
||||||
|
Entry e = watchees[i];
|
||||||
|
if (!e.Thread.IsAlive)
|
||||||
|
{
|
||||||
|
watchees.RemoveAt(i);
|
||||||
|
try
|
||||||
|
{
|
||||||
|
e.Task();
|
||||||
|
}
|
||||||
|
catch (Exception t)
|
||||||
|
{
|
||||||
|
Logger.Warn("Thread death watcher task raised an exception:", t);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sealed class Entry : MpscLinkedQueueNode<Entry>
|
||||||
|
{
|
||||||
|
internal readonly Thread Thread;
|
||||||
|
internal readonly Action Task;
|
||||||
|
internal readonly bool IsWatch;
|
||||||
|
|
||||||
|
public Entry(Thread thread, Action task, bool isWatch)
|
||||||
|
{
|
||||||
|
this.Thread = thread;
|
||||||
|
this.Task = task;
|
||||||
|
this.IsWatch = isWatch;
|
||||||
|
}
|
||||||
|
|
||||||
|
public override Entry Value
|
||||||
|
{
|
||||||
|
get { return this; }
|
||||||
|
}
|
||||||
|
|
||||||
|
public override int GetHashCode()
|
||||||
|
{
|
||||||
|
return this.Thread.GetHashCode() ^ this.Task.GetHashCode();
|
||||||
|
}
|
||||||
|
|
||||||
|
public override bool Equals(object obj)
|
||||||
|
{
|
||||||
|
if (obj == this)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!(obj is Entry))
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
var that = (Entry)obj;
|
||||||
|
return this.Thread == that.Thread && this.Task == that.Task;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,8 +4,8 @@
|
||||||
namespace DotNetty.Common
|
namespace DotNetty.Common
|
||||||
{
|
{
|
||||||
using System;
|
using System;
|
||||||
using System.Collections.Generic;
|
|
||||||
using System.Diagnostics.Contracts;
|
using System.Diagnostics.Contracts;
|
||||||
|
using System.Runtime.CompilerServices;
|
||||||
using System.Threading;
|
using System.Threading;
|
||||||
|
|
||||||
public class ThreadLocalPool
|
public class ThreadLocalPool
|
||||||
|
@ -36,7 +36,7 @@ namespace DotNetty.Common
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
Dictionary<Stack, WeakOrderQueue> queueDictionary = DelayedPool.Value;
|
ConditionalWeakTable<Stack, WeakOrderQueue> queueDictionary = DelayedPool.Value;
|
||||||
WeakOrderQueue queue;
|
WeakOrderQueue queue;
|
||||||
if (!queueDictionary.TryGetValue(stack, out queue))
|
if (!queueDictionary.TryGetValue(stack, out queue))
|
||||||
{
|
{
|
||||||
|
@ -373,8 +373,15 @@ namespace DotNetty.Common
|
||||||
static int idSource = int.MinValue;
|
static int idSource = int.MinValue;
|
||||||
static readonly int ownThreadId = Interlocked.Increment(ref idSource);
|
static readonly int ownThreadId = Interlocked.Increment(ref idSource);
|
||||||
|
|
||||||
internal static readonly ThreadLocal<Dictionary<Stack, WeakOrderQueue>> DelayedPool =
|
internal static readonly DelayedThreadLocal DelayedPool = new DelayedThreadLocal();
|
||||||
new ThreadLocal<Dictionary<Stack, WeakOrderQueue>>(() => new Dictionary<Stack, WeakOrderQueue>());
|
|
||||||
|
internal sealed class DelayedThreadLocal : FastThreadLocal<ConditionalWeakTable<Stack, WeakOrderQueue>>
|
||||||
|
{
|
||||||
|
protected override ConditionalWeakTable<Stack, WeakOrderQueue> GetInitialValue()
|
||||||
|
{
|
||||||
|
return new ConditionalWeakTable<Stack, WeakOrderQueue>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public ThreadLocalPool(int maxCapacity)
|
public ThreadLocalPool(int maxCapacity)
|
||||||
{
|
{
|
||||||
|
@ -388,7 +395,7 @@ namespace DotNetty.Common
|
||||||
public sealed class ThreadLocalPool<T> : ThreadLocalPool
|
public sealed class ThreadLocalPool<T> : ThreadLocalPool
|
||||||
where T : class
|
where T : class
|
||||||
{
|
{
|
||||||
readonly ThreadLocal<Stack> threadLocal;
|
readonly ThreadLocalStack threadLocal;
|
||||||
readonly Func<Handle, T> valueFactory;
|
readonly Func<Handle, T> valueFactory;
|
||||||
readonly bool preCreate;
|
readonly bool preCreate;
|
||||||
|
|
||||||
|
@ -409,23 +416,10 @@ namespace DotNetty.Common
|
||||||
|
|
||||||
this.preCreate = preCreate;
|
this.preCreate = preCreate;
|
||||||
|
|
||||||
this.threadLocal = new ThreadLocal<Stack>(this.InitializeStorage, true);
|
this.threadLocal = new ThreadLocalStack(this);
|
||||||
this.valueFactory = valueFactory;
|
this.valueFactory = valueFactory;
|
||||||
}
|
}
|
||||||
|
|
||||||
Stack InitializeStorage()
|
|
||||||
{
|
|
||||||
var stack = new Stack(this.MaxCapacity, this, Thread.CurrentThread);
|
|
||||||
if (this.preCreate)
|
|
||||||
{
|
|
||||||
for (int i = 0; i < this.MaxCapacity; i++)
|
|
||||||
{
|
|
||||||
stack.Push(this.CreateValue(stack));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return stack;
|
|
||||||
}
|
|
||||||
|
|
||||||
public T Take()
|
public T Take()
|
||||||
{
|
{
|
||||||
Stack stack = this.threadLocal.Value;
|
Stack stack = this.threadLocal.Value;
|
||||||
|
@ -459,5 +453,28 @@ namespace DotNetty.Common
|
||||||
internal int ThreadLocalCapacity => this.threadLocal.Value.elements.Length;
|
internal int ThreadLocalCapacity => this.threadLocal.Value.elements.Length;
|
||||||
|
|
||||||
internal int ThreadLocalSize => this.threadLocal.Value.Size;
|
internal int ThreadLocalSize => this.threadLocal.Value.Size;
|
||||||
|
|
||||||
|
sealed class ThreadLocalStack : FastThreadLocal<Stack>
|
||||||
|
{
|
||||||
|
readonly ThreadLocalPool<T> owner;
|
||||||
|
|
||||||
|
public ThreadLocalStack(ThreadLocalPool<T> owner)
|
||||||
|
{
|
||||||
|
this.owner = owner;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected override Stack GetInitialValue()
|
||||||
|
{
|
||||||
|
var stack = new Stack(this.owner.MaxCapacity, this.owner, Thread.CurrentThread);
|
||||||
|
if (this.owner.preCreate)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < this.owner.MaxCapacity; i++)
|
||||||
|
{
|
||||||
|
stack.Push(this.owner.CreateValue(stack));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return stack;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -9,11 +9,11 @@ namespace DotNetty.Common.Utilities
|
||||||
/// <summary>
|
/// <summary>
|
||||||
/// Extension methods used for slicing byte arrays
|
/// Extension methods used for slicing byte arrays
|
||||||
/// </summary>
|
/// </summary>
|
||||||
public static class ByteArrayExtensions
|
public static class ArrayExtensions
|
||||||
{
|
{
|
||||||
public static readonly byte[] Empty = new byte[0];
|
public static readonly byte[] ZeroBytes = new byte[0];
|
||||||
|
|
||||||
public static byte[] Slice(this byte[] array, int length)
|
public static T[] Slice<T>(this T[] array, int length)
|
||||||
{
|
{
|
||||||
Contract.Requires(array != null);
|
Contract.Requires(array != null);
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ namespace DotNetty.Common.Utilities
|
||||||
return Slice(array, 0, length);
|
return Slice(array, 0, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static byte[] Slice(this byte[] array, int index, int length)
|
public static T[] Slice<T>(this T[] array, int index, int length)
|
||||||
{
|
{
|
||||||
Contract.Requires(array != null);
|
Contract.Requires(array != null);
|
||||||
|
|
||||||
|
@ -32,17 +32,17 @@ namespace DotNetty.Common.Utilities
|
||||||
{
|
{
|
||||||
throw new ArgumentOutOfRangeException(nameof(length), $"index: ({index}), length({length}) index + length cannot be longer than Array.length({array.Length})");
|
throw new ArgumentOutOfRangeException(nameof(length), $"index: ({index}), length({length}) index + length cannot be longer than Array.length({array.Length})");
|
||||||
}
|
}
|
||||||
var result = new byte[length];
|
var result = new T[length];
|
||||||
Array.Copy(array, index, result, 0, length);
|
Array.Copy(array, index, result, 0, length);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void SetRange(this byte[] array, int index, byte[] src)
|
public static void SetRange<T>(this T[] array, int index, T[] src)
|
||||||
{
|
{
|
||||||
SetRange(array, index, src, 0, src.Length);
|
SetRange(array, index, src, 0, src.Length);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void SetRange(this byte[] array, int index, byte[] src, int srcIndex, int srcLength)
|
public static void SetRange<T>(this T[] array, int index, T[] src, int srcIndex, int srcLength)
|
||||||
{
|
{
|
||||||
Contract.Requires(array != null);
|
Contract.Requires(array != null);
|
||||||
Contract.Requires(src != null);
|
Contract.Requires(src != null);
|
||||||
|
@ -57,5 +57,23 @@ namespace DotNetty.Common.Utilities
|
||||||
|
|
||||||
Array.Copy(src, srcIndex, array, index, srcLength);
|
Array.Copy(src, srcIndex, array, index, srcLength);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static void Fill<T>(this T[] array, T value)
|
||||||
|
{
|
||||||
|
for (int i = 0; i < array.Length; i++)
|
||||||
|
{
|
||||||
|
array[i] = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void Fill<T>(this T[] array, int offset, int count, T value)
|
||||||
|
{
|
||||||
|
Contract.Requires(count + offset <= array.Length);
|
||||||
|
|
||||||
|
for (int i = offset; i < count + offset; i++)
|
||||||
|
{
|
||||||
|
array[i] = value;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -11,7 +11,6 @@ namespace DotNetty.Common.Utilities
|
||||||
public sealed class AtomicReference<T>
|
public sealed class AtomicReference<T>
|
||||||
where T : class
|
where T : class
|
||||||
{
|
{
|
||||||
// ReSharper disable once InconsistentNaming
|
|
||||||
T atomicValue;
|
T atomicValue;
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
|
|
|
@ -0,0 +1,43 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Common.Utilities
|
||||||
|
{
|
||||||
|
public static class IntegerExtensions
|
||||||
|
{
|
||||||
|
static readonly int[] MultiplyDeBruijnBitPosition =
|
||||||
|
{
|
||||||
|
0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30,
|
||||||
|
8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31
|
||||||
|
};
|
||||||
|
|
||||||
|
public const int SizeInBits = sizeof(int) * 8;
|
||||||
|
|
||||||
|
public static int RoundUpToPowerOfTwo(int res)
|
||||||
|
{
|
||||||
|
if (res <= 2)
|
||||||
|
{
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
res--;
|
||||||
|
res |= res >> 1;
|
||||||
|
res |= res >> 2;
|
||||||
|
res |= res >> 4;
|
||||||
|
res |= res >> 8;
|
||||||
|
res |= res >> 16;
|
||||||
|
res++;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static int Log2(int v)
|
||||||
|
{
|
||||||
|
v |= v >> 1; // first round down to one less than a power of 2
|
||||||
|
v |= v >> 2;
|
||||||
|
v |= v >> 4;
|
||||||
|
v |= v >> 8;
|
||||||
|
v |= v >> 16;
|
||||||
|
|
||||||
|
return MultiplyDeBruijnBitPosition[unchecked((uint)(v * 0x07C4ACDDU) >> 27)];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -7,8 +7,9 @@ namespace DotNetty.Common.Utilities
|
||||||
using System.Collections.Generic;
|
using System.Collections.Generic;
|
||||||
using System.Diagnostics.Contracts;
|
using System.Diagnostics.Contracts;
|
||||||
using System.Threading;
|
using System.Threading;
|
||||||
|
using DotNetty.Common.Internal;
|
||||||
|
|
||||||
sealed class MpscLinkedQueue<T> : MpscLinkedQueueTailRef<T>, IEnumerable<T>
|
sealed class MpscLinkedQueue<T> : MpscLinkedQueueTailRef<T>, IEnumerable<T>, IQueue<T>
|
||||||
where T : class
|
where T : class
|
||||||
{
|
{
|
||||||
#pragma warning disable 169 // padded reference
|
#pragma warning disable 169 // padded reference
|
||||||
|
@ -60,7 +61,7 @@ namespace DotNetty.Common.Utilities
|
||||||
return next;
|
return next;
|
||||||
}
|
}
|
||||||
|
|
||||||
public bool Enqueue(T value)
|
public bool TryEnqueue(T value)
|
||||||
{
|
{
|
||||||
Contract.Requires(value != null);
|
Contract.Requires(value != null);
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Common.Utilities
|
||||||
|
{
|
||||||
|
using System;
|
||||||
|
|
||||||
|
public static class RandomExtensions
|
||||||
|
{
|
||||||
|
public static long NextLong(this Random random)
|
||||||
|
{
|
||||||
|
return random.Next() << 32 & unchecked((uint)random.Next());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,6 +4,7 @@
|
||||||
namespace DotNetty.Common.Utilities
|
namespace DotNetty.Common.Utilities
|
||||||
{
|
{
|
||||||
using System;
|
using System;
|
||||||
|
using System.Threading;
|
||||||
using DotNetty.Common.Internal.Logging;
|
using DotNetty.Common.Internal.Logging;
|
||||||
|
|
||||||
public sealed class ReferenceCountUtil
|
public sealed class ReferenceCountUtil
|
||||||
|
@ -136,5 +137,52 @@ namespace DotNetty.Common.Utilities
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Schedules the specified object to be released when the caller thread terminates. Note that this operation is
|
||||||
|
/// intended to simplify reference counting of ephemeral objects during unit tests. Do not use it beyond the
|
||||||
|
/// intended use case.
|
||||||
|
/// </summary>
|
||||||
|
public static T ReleaseLater<T>(T msg)
|
||||||
|
{
|
||||||
|
return ReleaseLater(msg, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// <summary>
|
||||||
|
/// Schedules the specified object to be released when the caller thread terminates. Note that this operation is
|
||||||
|
/// intended to simplify reference counting of ephemeral objects during unit tests. Do not use it beyond the
|
||||||
|
/// intended use case.
|
||||||
|
/// </summary>
|
||||||
|
public static T ReleaseLater<T>(T msg, int decrement)
|
||||||
|
{
|
||||||
|
var referenceCounted = msg as IReferenceCounted;
|
||||||
|
if (referenceCounted != null)
|
||||||
|
{
|
||||||
|
ThreadDeathWatcher.Watch(Thread.CurrentThread, () =>
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
if (!referenceCounted.Release(decrement))
|
||||||
|
{
|
||||||
|
Logger.Warn("Non-zero refCnt: {}", FormatReleaseString(referenceCounted, decrement));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Logger.Debug("Released: {}", FormatReleaseString(referenceCounted, decrement));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Exception ex)
|
||||||
|
{
|
||||||
|
Logger.Warn("Failed to release an object: {}", referenceCounted, ex);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return msg;
|
||||||
|
}
|
||||||
|
|
||||||
|
static string FormatReleaseString(IReferenceCounted referenceCounted, int decrement)
|
||||||
|
{
|
||||||
|
return referenceCounted.GetType().Name + ".Release(" + decrement + ") refCnt: " + referenceCounted.ReferenceCount;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -17,7 +17,7 @@ namespace DotNetty.Transport.Channels
|
||||||
{
|
{
|
||||||
static readonly TimeSpan DefaultConnectTimeout = TimeSpan.FromSeconds(30);
|
static readonly TimeSpan DefaultConnectTimeout = TimeSpan.FromSeconds(30);
|
||||||
|
|
||||||
volatile IByteBufferAllocator allocator = UnpooledByteBufferAllocator.Default;
|
volatile IByteBufferAllocator allocator = ByteBufferUtil.DefaultAllocator;
|
||||||
volatile IRecvByteBufAllocator recvByteBufAllocator = FixedRecvByteBufAllocator.Default;
|
volatile IRecvByteBufAllocator recvByteBufAllocator = FixedRecvByteBufAllocator.Default;
|
||||||
volatile IMessageSizeEstimator messageSizeEstimator = DefaultMessageSizeEstimator.Default;
|
volatile IMessageSizeEstimator messageSizeEstimator = DefaultMessageSizeEstimator.Default;
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ namespace DotNetty.Transport.Channels.Sockets
|
||||||
{
|
{
|
||||||
using System;
|
using System;
|
||||||
using System.Net.Sockets;
|
using System.Net.Sockets;
|
||||||
|
using System.Threading;
|
||||||
using DotNetty.Buffers;
|
using DotNetty.Buffers;
|
||||||
|
|
||||||
/// <summary>
|
/// <summary>
|
||||||
|
@ -185,7 +186,18 @@ namespace DotNetty.Transport.Channels.Sockets
|
||||||
protected override void ScheduleSocketRead()
|
protected override void ScheduleSocketRead()
|
||||||
{
|
{
|
||||||
SocketChannelAsyncOperation operation = this.ReadOperation;
|
SocketChannelAsyncOperation operation = this.ReadOperation;
|
||||||
bool pending = this.Socket.ReceiveAsync(operation);
|
bool pending;
|
||||||
|
if (ExecutionContext.IsFlowSuppressed())
|
||||||
|
{
|
||||||
|
pending = this.Socket.ReceiveAsync(operation);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
using (ExecutionContext.SuppressFlow())
|
||||||
|
{
|
||||||
|
pending = this.Socket.ReceiveAsync(operation);
|
||||||
|
}
|
||||||
|
}
|
||||||
if (!pending)
|
if (!pending)
|
||||||
{
|
{
|
||||||
// todo: potential allocation / non-static field?
|
// todo: potential allocation / non-static field?
|
||||||
|
@ -329,7 +341,20 @@ namespace DotNetty.Transport.Channels.Sockets
|
||||||
SocketChannelAsyncOperation operation = this.PrepareWriteOperation(buffer);
|
SocketChannelAsyncOperation operation = this.PrepareWriteOperation(buffer);
|
||||||
|
|
||||||
this.SetState(StateFlags.WriteScheduled);
|
this.SetState(StateFlags.WriteScheduled);
|
||||||
bool pending = this.Socket.SendAsync(operation);
|
bool pending;
|
||||||
|
|
||||||
|
if (ExecutionContext.IsFlowSuppressed())
|
||||||
|
{
|
||||||
|
pending = this.Socket.SendAsync(operation);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
using (ExecutionContext.SuppressFlow())
|
||||||
|
{
|
||||||
|
pending = this.Socket.SendAsync(operation);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!pending)
|
if (!pending)
|
||||||
{
|
{
|
||||||
((ISocketChannelUnsafe)this.Unsafe).FinishWrite(operation);
|
((ISocketChannelUnsafe)this.Unsafe).FinishWrite(operation);
|
||||||
|
|
|
@ -22,7 +22,7 @@ namespace DotNetty.Transport.Channels.Sockets
|
||||||
this.Completed += AbstractSocketChannel.IoCompletedCallback;
|
this.Completed += AbstractSocketChannel.IoCompletedCallback;
|
||||||
if (setEmptyBuffer)
|
if (setEmptyBuffer)
|
||||||
{
|
{
|
||||||
this.SetBuffer(ByteArrayExtensions.Empty, 0, 0);
|
this.SetBuffer(ArrayExtensions.ZeroBytes, 0, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,66 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers.Tests
|
||||||
|
{
|
||||||
|
using Xunit;
|
||||||
|
|
||||||
|
public abstract class AbstractPooledByteBufTest : AbstractByteBufTest
|
||||||
|
{
|
||||||
|
protected abstract IByteBuffer Alloc(int length);
|
||||||
|
|
||||||
|
protected override IByteBuffer NewBuffer(int length)
|
||||||
|
{
|
||||||
|
IByteBuffer buffer = this.Alloc(length);
|
||||||
|
Assert.Equal(0, buffer.WriterIndex);
|
||||||
|
Assert.Equal(0, buffer.ReaderIndex);
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void TestDiscardMarks()
|
||||||
|
{
|
||||||
|
this.TestDiscardMarks(4);
|
||||||
|
}
|
||||||
|
|
||||||
|
[Fact]
|
||||||
|
public void TestDiscardMarksUnpooled()
|
||||||
|
{
|
||||||
|
this.TestDiscardMarks(32 * 1024 * 1024);
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestDiscardMarks(int capacity)
|
||||||
|
{
|
||||||
|
IByteBuffer buf = this.NewBuffer(capacity);
|
||||||
|
buf.WriteShort(1);
|
||||||
|
|
||||||
|
buf.SkipBytes(1);
|
||||||
|
|
||||||
|
buf.MarkReaderIndex();
|
||||||
|
buf.MarkWriterIndex();
|
||||||
|
Assert.True(buf.Release());
|
||||||
|
|
||||||
|
IByteBuffer buf2 = this.NewBuffer(capacity);
|
||||||
|
|
||||||
|
Assert.Same(UnwrapIfNeeded(buf), UnwrapIfNeeded(buf2));
|
||||||
|
|
||||||
|
buf2.WriteShort(1);
|
||||||
|
|
||||||
|
buf2.ResetReaderIndex();
|
||||||
|
buf2.ResetWriterIndex();
|
||||||
|
|
||||||
|
Assert.Equal(0, buf2.ReaderIndex);
|
||||||
|
Assert.Equal(0, buf2.WriterIndex);
|
||||||
|
Assert.True(buf2.Release());
|
||||||
|
}
|
||||||
|
|
||||||
|
static IByteBuffer UnwrapIfNeeded(IByteBuffer buf)
|
||||||
|
{
|
||||||
|
if (buf is AdvancedLeakAwareByteBuffer || buf is SimpleLeakAwareByteBuffer)
|
||||||
|
{
|
||||||
|
return buf.Unwrap();
|
||||||
|
}
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -81,12 +81,15 @@
|
||||||
</Otherwise>
|
</Otherwise>
|
||||||
</Choose>
|
</Choose>
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
|
<Compile Include="AbstractPooledByteBufTest.cs" />
|
||||||
<Compile Include="ByteBufferDerivationTests.cs" />
|
<Compile Include="ByteBufferDerivationTests.cs" />
|
||||||
<Compile Include="LeakDetectionTest.cs" />
|
<Compile Include="LeakDetectionTest.cs" />
|
||||||
|
<Compile Include="PooledBigEndianHeapByteBufTest.cs" />
|
||||||
<Compile Include="PortionedMemoryStream.cs" />
|
<Compile Include="PortionedMemoryStream.cs" />
|
||||||
<Compile Include="AbstractByteBufferTests.cs" />
|
<Compile Include="AbstractByteBufferTests.cs" />
|
||||||
<Compile Include="Properties\AssemblyInfo.cs" />
|
<Compile Include="Properties\AssemblyInfo.cs" />
|
||||||
<Compile Include="PooledBufferAllocatorTests.cs" />
|
<Compile Include="PooledBufferAllocatorTests.cs" />
|
||||||
|
<Compile Include="AbstractByteBufTest.cs" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<None Include="packages.config" />
|
<None Include="packages.config" />
|
||||||
|
|
|
@ -5,6 +5,7 @@ namespace DotNetty.Buffers.Tests
|
||||||
{
|
{
|
||||||
using System;
|
using System;
|
||||||
using System.Diagnostics.Tracing;
|
using System.Diagnostics.Tracing;
|
||||||
|
using System.Runtime.CompilerServices;
|
||||||
using DotNetty.Common;
|
using DotNetty.Common;
|
||||||
using DotNetty.Common.Internal.Logging;
|
using DotNetty.Common.Internal.Logging;
|
||||||
using Microsoft.Practices.EnterpriseLibrary.SemanticLogging;
|
using Microsoft.Practices.EnterpriseLibrary.SemanticLogging;
|
||||||
|
@ -20,24 +21,36 @@ namespace DotNetty.Buffers.Tests
|
||||||
[Fact]
|
[Fact]
|
||||||
public void UnderReleaseBufferLeak()
|
public void UnderReleaseBufferLeak()
|
||||||
{
|
{
|
||||||
var eventListener = new ObservableEventListener();
|
ResourceLeakDetector.DetectionLevel preservedLevel = ResourceLeakDetector.Level;
|
||||||
Mock<IObserver<EventEntry>> logListener = this.mockRepo.Create<IObserver<EventEntry>>();
|
try
|
||||||
var eventTextFormatter = new EventTextFormatter();
|
{
|
||||||
Func<EventEntry, bool> leakPredicate = y => y.TryFormatAsString(eventTextFormatter).Contains("LEAK");
|
ResourceLeakDetector.Level = ResourceLeakDetector.DetectionLevel.Paranoid;
|
||||||
logListener.Setup(x => x.OnNext(It.Is<EventEntry>(y => leakPredicate(y)))).Verifiable();
|
var eventListener = new ObservableEventListener();
|
||||||
logListener.Setup(x => x.OnNext(It.Is<EventEntry>(y => !leakPredicate(y))));
|
Mock<IObserver<EventEntry>> logListener = this.mockRepo.Create<IObserver<EventEntry>>();
|
||||||
eventListener.Subscribe(logListener.Object);
|
var eventTextFormatter = new EventTextFormatter();
|
||||||
eventListener.EnableEvents(DefaultEventSource.Log, EventLevel.Verbose);
|
Func<EventEntry, bool> leakPredicate = y => y.TryFormatAsString(eventTextFormatter).Contains("LEAK");
|
||||||
|
logListener.Setup(x => x.OnNext(It.Is<EventEntry>(y => leakPredicate(y)))).Verifiable();
|
||||||
|
logListener.Setup(x => x.OnNext(It.Is<EventEntry>(y => !leakPredicate(y))));
|
||||||
|
eventListener.Subscribe(logListener.Object);
|
||||||
|
eventListener.EnableEvents(DefaultEventSource.Log, EventLevel.Verbose);
|
||||||
|
|
||||||
var bufPool = new PooledByteBufferAllocator(100, 1000);
|
this.CreateAndForgetBuffer();
|
||||||
IByteBuffer buffer = bufPool.Buffer(10);
|
|
||||||
|
|
||||||
buffer = null;
|
GC.Collect();
|
||||||
|
GC.WaitForPendingFinalizers();
|
||||||
|
|
||||||
GC.Collect();
|
this.mockRepo.Verify();
|
||||||
GC.WaitForPendingFinalizers();
|
}
|
||||||
|
finally
|
||||||
|
{
|
||||||
|
ResourceLeakDetector.Level = preservedLevel;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
this.mockRepo.Verify();
|
[MethodImpl(MethodImplOptions.NoInlining)]
|
||||||
|
void CreateAndForgetBuffer()
|
||||||
|
{
|
||||||
|
IByteBuffer forgotten = PooledByteBufferAllocator.Default.Buffer(10);
|
||||||
}
|
}
|
||||||
|
|
||||||
[Fact]
|
[Fact]
|
||||||
|
@ -47,10 +60,9 @@ namespace DotNetty.Buffers.Tests
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
ResourceLeakDetector.Level = ResourceLeakDetector.DetectionLevel.Paranoid;
|
ResourceLeakDetector.Level = ResourceLeakDetector.DetectionLevel.Paranoid;
|
||||||
var bufPool = new PooledByteBufferAllocator(100, 1000);
|
IByteBuffer buffer = PooledByteBufferAllocator.Default.Buffer(10);
|
||||||
IByteBuffer buffer = bufPool.Buffer(10);
|
|
||||||
buffer.Release();
|
buffer.Release();
|
||||||
buffer = bufPool.Buffer(10);
|
buffer = PooledByteBufferAllocator.Default.Buffer(10);
|
||||||
buffer.Release();
|
buffer.Release();
|
||||||
}
|
}
|
||||||
finally
|
finally
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
// Copyright (c) Microsoft. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
namespace DotNetty.Buffers.Tests
|
||||||
|
{
|
||||||
|
public class PooledBigEndianHeapByteBufTest : AbstractPooledByteBufTest
|
||||||
|
{
|
||||||
|
protected override IByteBuffer Alloc(int length)
|
||||||
|
{
|
||||||
|
return PooledByteBufferAllocator.Default.Buffer(length);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -3,11 +3,13 @@
|
||||||
|
|
||||||
namespace DotNetty.Buffers.Tests
|
namespace DotNetty.Buffers.Tests
|
||||||
{
|
{
|
||||||
|
using DotNetty.Common.Utilities;
|
||||||
using Xunit;
|
using Xunit;
|
||||||
|
|
||||||
public class PooledBufferAllocatorTests
|
public class PooledBufferAllocatorTests
|
||||||
{
|
{
|
||||||
[Theory]
|
[Theory]
|
||||||
|
[InlineData(8000, 32000, new[] { 1024, 0, 10 * 1024 })]
|
||||||
[InlineData(16 * 1024, 10, new[] { 16 * 1024 - 100, 8 * 1024 })]
|
[InlineData(16 * 1024, 10, new[] { 16 * 1024 - 100, 8 * 1024 })]
|
||||||
[InlineData(16 * 1024, 0, new[] { 16 * 1024 - 100, 8 * 1024 })]
|
[InlineData(16 * 1024, 0, new[] { 16 * 1024 - 100, 8 * 1024 })]
|
||||||
[InlineData(1024, 2 * 1024, new[] { 16 * 1024 - 100, 8 * 1024 })]
|
[InlineData(1024, 2 * 1024, new[] { 16 * 1024 - 100, 8 * 1024 })]
|
||||||
|
@ -15,7 +17,7 @@ namespace DotNetty.Buffers.Tests
|
||||||
[InlineData(1024, 0, new[] { 1024, 0, 10 * 1024 })]
|
[InlineData(1024, 0, new[] { 1024, 0, 10 * 1024 })]
|
||||||
public void PooledBufferGrowTest(int bufferSize, int startSize, int[] writeSizes)
|
public void PooledBufferGrowTest(int bufferSize, int startSize, int[] writeSizes)
|
||||||
{
|
{
|
||||||
var alloc = new PooledByteBufferAllocator(bufferSize, int.MaxValue);
|
var alloc = new PooledByteBufferAllocator();
|
||||||
IByteBuffer buffer = alloc.Buffer(startSize);
|
IByteBuffer buffer = alloc.Buffer(startSize);
|
||||||
int wrote = 0;
|
int wrote = 0;
|
||||||
foreach (int size in writeSizes)
|
foreach (int size in writeSizes)
|
||||||
|
|
|
@ -82,8 +82,8 @@ namespace DotNetty.Transport.Tests.Performance.Sockets
|
||||||
this.signal = new ManualResetEventSlimReadFinishedSignal(this.ResetEvent);
|
this.signal = new ManualResetEventSlimReadFinishedSignal(this.ResetEvent);
|
||||||
|
|
||||||
// reserve up to 10mb of 16kb buffers on both client and server; we're only sending about 700k worth of messages
|
// reserve up to 10mb of 16kb buffers on both client and server; we're only sending about 700k worth of messages
|
||||||
this.serverBufferAllocator = new PooledByteBufferAllocator(256, 10 * 1024 * 1024 / Environment.ProcessorCount);
|
this.serverBufferAllocator = new PooledByteBufferAllocator();
|
||||||
this.clientBufferAllocator = new PooledByteBufferAllocator(256, 10 * 1024 * 1024 / Environment.ProcessorCount);
|
this.clientBufferAllocator = new PooledByteBufferAllocator();
|
||||||
|
|
||||||
ServerBootstrap sb = new ServerBootstrap()
|
ServerBootstrap sb = new ServerBootstrap()
|
||||||
.Group(this.ServerGroup, this.WorkerGroup)
|
.Group(this.ServerGroup, this.WorkerGroup)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче