Target NetAdapterCx 1.3 (Preview)

Re-Target to NetAdapter 1.3 (WDK/SDK 10.0.17763.0).

NetAdapterCx 1.2 (Preview) was removed in the
Windows 10 October 2018 Update (Build 1809)
This commit is contained in:
Tyler Retzlaff 2018-12-06 00:05:29 +00:00
Родитель 6ab0bc21bf
Коммит eafebe1aa1
18 изменённых файлов: 575 добавлений и 2098 удалений

Просмотреть файл

@ -1,404 +0,0 @@
// Copyright (C) Microsoft Corporation. All rights reserved.
/*++
NetAdapterCx NETTXQUEUE DMA Scatter/Gather Framework
This framework aims to help developers simplify the code they write to
deal with the DMA engine in a NETTXQUEUE. It creates a queue on behalf
of the NIC driver and intercepts the EvtTxQueueAdvance callback, breaking
it down in four function calls the NIC driver should define using macros:
Macro Name | Required | Type
-------------------------------|------------|--------------------------------------
TX_DMA_FX_PROGRAM_DESCRIPTORS | Yes | EVT_TX_DMA_QUEUE_PROGRAM_DESCRIPTORS
TX_DMA_FX_GET_PACKET_STATUS | Yes | EVT_TX_DMA_QUEUE_GET_PACKET_STATUS
TX_DMA_FX_FLUSH_TRANSACTION | Yes | EVT_TX_DMA_QUEUE_FLUSH_TRANSACTION
TX_DMA_FX_BOUNCE_ANALYSIS | No | EVT_TX_DMA_QUEUE_BOUNCE_ANALYSIS
TX_DMA_FX_ALLOC_TAG | No | Pool tag to use in internal allocations
To use this framework the NIC driver should:
- Include txdmafxtypes.h
- Define at least the required macros
- Include txdmafx.h
Example:
#include "txdmafxtypes.h
EVT_TX_DMA_QUEUE_PROGRAM_DESCRIPTORS EvtProgramDescriptors;
EVT_TX_DMA_QUEUE_FLUSH_TRANSACTION EvtFlushTransation;
EVT_TX_DMA_QUEUE_GET_PACKET_STATUS EvtGetPacketStatus;
#define TX_DMA_FX_PROGRAM_DESCRIPTORS EvtProgramDescriptors
#define TX_DMA_FX_FLUSH_TRANSACTION EvtFlushTransation
#define TX_DMA_FX_GET_PACKET_STATUS EvtGetPacketStatus
#define TX_DMA_FX_ALLOC_TAG 'tseT'
#include "txdmafx.h"
TX_DMA_FX_PROGRAM_DESCRIPTORS: Called one time for each NET_PACKET
that needs to be transmitted. The framework will take care of
mapping/unmapping the buffers and will pass in a SCATTER_GATHER_LIST
along with the packet so that the NIC driver can program the descriptors
to hardware.
TX_DMA_FX_GET_PACKET_STATUS: Called one time for each NET_PACKET pending
transmission. If this function returns STATUS_SUCCESS, the framework will
release any resources it acquired to map the buffers and return the packet
to the OS.
If this function returns STATUS_PENDING the packet is not completed, any
other status code will cause the packet to be returned to the OS and will
be counted as a failed completion.
TX_DMA_FX_FLUSH_TRANSACTION: Called once per EvtTxQueueAdvance callback
if any new descriptors were programmed to hardware. The NIC should do
whatever it is necessary to flush their DMA transaction.
TX_DMA_FX_BOUNCE_ANALYSIS: If the internal framework bounce analysis is not
enough for the NIC driver, it can use this function to implement any checks
relevant to their hardware. Called once for each NET_PACKET that needs to
be transmitted.
To see what kind of bounce analysis the framework does see NET_SCATTER_GATHER_TXQUEUE_CONFIG
definition or _TxDmaFxBounceAnalysis implementation.
--*/
#ifndef _TXDMAFX_H_
#define _TXDMAFX_H_
#include "txdmafx_details.h"
#ifndef TX_DMA_FX_PROGRAM_DESCRIPTORS
#error To use this framework you need to define TX_DMA_FX_PROGRAM_DESCRIPTORS
#endif
#ifndef TX_DMA_FX_GET_PACKET_STATUS
#error To use this framework you need to define TX_DMA_FX_GET_PACKET_STATUS
#endif
#ifndef TX_DMA_FX_FLUSH_TRANSACTION
#error To use this framework you need to define TX_DMA_FX_FLUSH_TRANSACTION
#endif
_IRQL_requires_max_(DISPATCH_LEVEL)
__inline
VOID
_TxDmaFxCompleteTxPackets(
_In_ TxDmaFx *DmaFx
)
/*
Description:
Iterates over the packets in the ring buffer that belong to the NIC driver and
were already programmed to hardware. It then calls TX_DMA_FX_GET_PACKET_STATUS
to let the NIC driver check if the DMA transfer is complete or not. If it is, any
resources associated with the transfer are returned and the packet is marked
as complete.
*/
{
PCNET_DATAPATH_DESCRIPTOR descriptor = DmaFx->Descriptor;
NET_RING_BUFFER *ringBuffer = NET_DATAPATH_DESCRIPTOR_GET_PACKET_RING_BUFFER(descriptor);
while (ringBuffer->BeginIndex != ringBuffer->NextIndex)
{
NET_PACKET *packet = NetRingBufferGetPacketAtIndex(descriptor, ringBuffer->BeginIndex);
// If the packet is already marked as completed it is because we
// failed to program its descriptors and we should just drop it
if (!NET_PACKET_GET_FRAGMENT(packet, descriptor, 0)->Completed)
{
NTSTATUS packetStatus =
TX_DMA_FX_GET_PACKET_STATUS(
DmaFx->QueueHandle,
packet);
// We need to complete packets in order, if the current returned
// pending there is no point in keep trying
if (packetStatus == STATUS_PENDING)
break;
if (packetStatus != STATUS_SUCCESS)
DmaFx->Statistics.Packet.CompletedWithError += 1;
if (!DmaFx->DmaBypass)
{
// If we are using DMA APIs make sure we return the resources we
// acquired
TX_DMA_FX_PACKET_CONTEXT *fxPacketContext = _TxDmaFxGetPacketContextFromToken(descriptor, packet, DmaFx->ContextToken);
// Even when using DMA APIs, we might still have a NULL SGL if
// the packet was bounced
if (fxPacketContext->ScatterGatherList != NULL)
{
DmaFx->DmaAdapter->DmaOperations->PutScatterGatherList(
DmaFx->DmaAdapter,
fxPacketContext->ScatterGatherList,
TRUE);
fxPacketContext->ScatterGatherList = NULL;
}
}
if (_TX_DMA_FX_IS_PACKET_BOUNCED(descriptor, packet))
{
DmaFx->BounceFreeIndex += 1;
_TX_DMA_FX_PACKET_CLEAR_BOUNCED_FLAG(descriptor, packet);
}
}
ringBuffer->BeginIndex = NetRingBufferIncrementIndex(ringBuffer, ringBuffer->BeginIndex);
}
}
_IRQL_requires_max_(DISPATCH_LEVEL)
__inline
ULONG
_TxDmaFxTransmitPackets(
_In_ TxDmaFx *DmaFx
)
/*
Description:
This function iterates over the packets in the ring buffer
owned by the NIC but not yet programmed to hardware. It then
performs an internal bounce analysis to decide if the Tx buffer
needs to be bounced before transmiting or not. Lastly it calls a
function to handle the transmit operation.
The NIC driver can optionally register a TX_DMA_FX_BOUNCE_ANALYSIS
callback in which it can analyze the Tx buffers and decide if it they
need to be bounced or not.
*/
{
ULONG numPacketsProgrammed = 0;
NET_PACKET *netPacket;
PCNET_DATAPATH_DESCRIPTOR descriptor = DmaFx->Descriptor;
while (NULL != (netPacket = NetRingBufferGetNextPacket(descriptor)))
{
NTSTATUS status = STATUS_SUCCESS;
if (netPacket->IgnoreThisPacket)
{
DmaFx->Statistics.Packet.Skipped += 1;
status = STATUS_UNSUCCESSFUL;
}
else
{
TX_DMA_BOUNCE_ANALYSIS bounceAnalysis = _TxDmaFxBounceAnalysis(DmaFx, netPacket);
if (bounceAnalysis == TxDmaTransmitInPlace)
{
#ifdef TX_DMA_FX_BOUNCE_ANALYSIS
bounceAnalysis = TX_DMA_FX_BOUNCE_ANALYSIS(DmaFx->QueueHandle, netPacket);
#endif
}
switch (bounceAnalysis)
{
case TxDmaTransmitInPlace:
{
status = _TxDmaFxMapAndTransmitPacket(DmaFx, netPacket);
if (status != STATUS_BUFFER_TOO_SMALL)
{
break;
}
// If we could not map and transmit the packet using DMA
// because the SG list size was not enough (the packet was
// too fragmented) we should try to bounce the buffer
// before dropping it
__fallthrough;
}
case TxDmaTransmitAfterBouncing:
{
status = _TxDmaFxBounceAndTransmitPacket(DmaFx, netPacket);
break;
}
case TxDmaCannotTransmit:
DmaFx->Statistics.Packet.CannotTransmit += 1;
__fallthrough;
default:
status = STATUS_UNSUCCESSFUL;
break;
}
}
// If DMA doesn't have enough buffers for us,
// or if we didn't have enough bounce buffers
// give up now and try again when resources
// are available.
if (status == STATUS_INSUFFICIENT_RESOURCES)
break;
if (status == STATUS_SUCCESS)
numPacketsProgrammed++;
else
NET_PACKET_GET_FRAGMENT(netPacket, descriptor, 0)->Completed = TRUE;
NetRingBufferAdvanceNextPacket(descriptor);
}
return numPacketsProgrammed;
}
_Use_decl_annotations_
__inline
VOID
_TxDmaFxAdvance(
_In_ NETTXQUEUE TxQueue
)
/*
Description:
This function handles the NETTXQUEUE EvtTxQueueAdvance on behalf of the
NIC driver. If any number of packets are programmed to hardware during
this callback we call TX_DMA_FX_FLUSH_TRANSACTION to let the consumer
of this framework do whatever is necessary to flush their DMA transaction.
Arguments:
TxQueue - NETTXQUEUE handle
*/
{
TxDmaFx *TxDmaFx = _TxDmaFxGetContext(TxQueue);
ULONG numPacketsProgrammed = _TxDmaFxTransmitPackets(TxDmaFx);
if (numPacketsProgrammed > 0)
TX_DMA_FX_FLUSH_TRANSACTION(TxDmaFx->QueueHandle);
_TxDmaFxCompleteTxPackets(TxDmaFx);
}
_IRQL_requires_max_(PASSIVE_LEVEL)
NTSTATUS
__inline
NetTxDmaQueueCreate(
_Inout_ PNETTXQUEUE_INIT NetTxQueueInit,
_In_opt_ PWDF_OBJECT_ATTRIBUTES TxQueueAttributes,
_In_ PNET_TX_DMA_QUEUE_CONFIG Configuration,
_Out_ NETTXQUEUE* TxQueue
)
/*
Description:
This function will create a NETTXQUEUE on behalf of the caller and set up
the necessary state to intercept incoming NET_PACKETs and map the buffers
to use in DMA transactions.
Arguments:
NetTxQueueInit - Opaque handle containing information about the queue the OS
is asking us to create
TxQueueAttributes - Object attributes the NIC driver wants in the NETTXQUEUE
Configuration - Contains configuration this framework will use to make decisions
about what to do with incoming NET_PACKETs as well as the
callback functions needed to operate this queue
TxQueue - Handle to the created NETTXQUEUE
*/
{
*TxQueue = NULL;
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(_TxDmaFxValidateConfig(Configuration));
BOOLEAN dmaBypass;
if (Configuration->AllowDmaBypass)
{
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
_TxDmaFxCheckDmaBypass(
Configuration->DmaEnabler,
&dmaBypass));
}
else
{
dmaBypass = FALSE;
}
// We only need a packet context if using DMA APIs
if (!dmaBypass)
{
// Configure a packet context area to hold DMA mapping information
NET_PACKET_CONTEXT_ATTRIBUTES packetContextAttribs;
NET_PACKET_CONTEXT_ATTRIBUTES_INIT_TYPE(&packetContextAttribs, TX_DMA_FX_PACKET_CONTEXT);
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
NetTxQueueInitAddPacketContextAttributes(
NetTxQueueInit,
&packetContextAttribs));
}
// The framework only intercepts the Advance callback (to map and unmap the
// incoming packets), the others go directly to the ones the NIC driver provides
NET_TXQUEUE_CONFIG txConfig;
NET_TXQUEUE_CONFIG_INIT(
&txConfig,
_TxDmaFxAdvance,
Configuration->EvtTxQueueSetNotificationEnabled,
Configuration->EvtTxQueueCancel);
// Configure a private Tx Queue context to hold DMA information, the NIC
// is not allowed to modify the data stored in it
WDF_OBJECT_ATTRIBUTES privateAttribs;
WDF_OBJECT_ATTRIBUTES_INIT_CONTEXT_TYPE(&privateAttribs, TxDmaFx);
NETTXQUEUE txQueue;
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
NetTxQueueCreate(
NetTxQueueInit,
&privateAttribs,
&txConfig,
&txQueue));
// Initialize TxDmaFx object
TxDmaFx *dmaFx = _TxDmaFxGetContext(txQueue);
dmaFx->QueueHandle = txQueue;
dmaFx->Descriptor = NetTxQueueGetDatapathDescriptor(txQueue);
dmaFx->Config = *Configuration;
if(!dmaBypass)
dmaFx->ContextToken = NET_TXQUEUE_GET_PACKET_CONTEXT_TOKEN(txQueue, TX_DMA_FX_PACKET_CONTEXT);
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
_TxDmaFxInitialize(
dmaFx,
dmaBypass));
// Now allocate space for the NIC context (if any)
if (TxQueueAttributes != NULL)
{
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
WdfObjectAllocateContext(
txQueue,
TxQueueAttributes,
NULL));
}
*TxQueue = txQueue;
return STATUS_SUCCESS;
}
#endif

Просмотреть файл

@ -1,689 +0,0 @@
// Copyright (C) Microsoft Corporation. All rights reserved.
#ifndef _TXDMAFX_DETAILS_H_
#define _TXDMAFX_DETAILS_H_
_IRQL_requires_max_(PASSIVE_LEVEL)
__inline
NTSTATUS
_TxDmaFxCheckDmaBypass(
_In_ WDFDMAENABLER Enabler,
_Out_ BOOLEAN *DmaBypass
)
{
DMA_ADAPTER *dmaAdapter = WdfDmaEnablerWdmGetDmaAdapter(Enabler, WdfDmaDirectionWriteToDevice);
DMA_ADAPTER_INFO adapterInfo = { DMA_ADAPTER_INFO_VERSION1 };
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
dmaAdapter->DmaOperations->GetDmaAdapterInfo(
dmaAdapter,
&adapterInfo));
*DmaBypass = (0 != (adapterInfo.V1.Flags & ADAPTER_INFO_API_BYPASS));
return STATUS_SUCCESS;
}
_IRQL_requires_max_(PASSIVE_LEVEL)
__inline
NTSTATUS
_TxDmaFxValidateConfig(
_In_ NET_TX_DMA_QUEUE_CONFIG *Config
)
/*
Description:
Validades the configuration object provided by
the NIC driver.
*/
{
// Check mandatory event callbacks
if (Config->EvtTxQueueSetNotificationEnabled == NULL ||
Config->EvtTxQueueCancel == NULL)
{
return STATUS_INVALID_PARAMETER;
}
// We need a WDFDEVICE and WDFDMAENABLER to use DMA APIs, check they
// are present
if (Config->Device == NULL || Config->DmaEnabler == NULL)
return STATUS_INVALID_PARAMETER;
// Check if AlignmentRequirement is a 2^N - 1 number
if (Config->AlignmentRequirement != -1 && (Config->AlignmentRequirement & (Config->AlignmentRequirement + 1)) != 0)
return STATUS_INVALID_PARAMETER;
// Check if a maximum packet size is given, the framework needs this information
// to make internal allocations
if (Config->MaximumPacketSize == 0 || Config->MaximumPacketSize > _TX_DMA_FX_MAXIMUM_BOUNCE_BUFFER_SIZE)
return STATUS_INVALID_PARAMETER;
return STATUS_SUCCESS;
}
_IRQL_requires_max_(PASSIVE_LEVEL)
__inline
NTSTATUS
_TxDmaFxInitializeForDirectMapping(
_In_ TxDmaFx *DmaFx
)
/*
Description:
Initializes TxDmaFx with the necessary resources
to do direct mapping of buffers.
*/
{
DmaFx->DmaBypass = TRUE;
size_t sglAllocationSize;
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
RtlSizeTMult(
sizeof(SCATTER_GATHER_ELEMENT),
DmaFx->Config.MaximumScatterGatherElements,
&sglAllocationSize));
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
RtlSizeTAdd(
sglAllocationSize,
FIELD_OFFSET(SCATTER_GATHER_LIST, Elements),
&sglAllocationSize));
// Parent the memory allocation to the NETTXQUEUE
WDF_OBJECT_ATTRIBUTES memoryAttributes;
WDF_OBJECT_ATTRIBUTES_INIT(&memoryAttributes);
memoryAttributes.ParentObject = DmaFx->QueueHandle;
WDFMEMORY sglAllocation;
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
WdfMemoryCreate(
&memoryAttributes,
NonPagedPoolNx,
TX_DMA_FX_ALLOC_TAG,
sglAllocationSize,
&sglAllocation,
(void**)&DmaFx->SpareSgl));
return STATUS_SUCCESS;
}
_IRQL_requires_max_(PASSIVE_LEVEL)
__inline
NTSTATUS
_TxDmaFxInitializeForDma(
_In_ TxDmaFx *DmaFx
)
/*
Description:
Initializes TxDmaFx with the necessary resources
to use with DMA APIs when mapping buffers.
*/
{
NTSTATUS status = DmaFx->DmaAdapter->DmaOperations->CalculateScatterGatherList(
DmaFx->DmaAdapter,
NULL,
ULongToPtr(PAGE_SIZE - 1),
DmaFx->Config.MaximumPacketSize,
&DmaFx->ScatterGatherListSize,
NULL);
switch (status)
{
// Couldn't calculate SGL
case STATUS_INSUFFICIENT_RESOURCES:
case STATUS_BUFFER_TOO_SMALL:
DmaFx->BounceAlways = TRUE;
return STATUS_SUCCESS;
}
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(status);
//
// Allocate memory for scatter-gather list
//
NET_RING_BUFFER *ringBuffer = NET_DATAPATH_DESCRIPTOR_GET_PACKET_RING_BUFFER(DmaFx->Descriptor);
PCNET_DATAPATH_DESCRIPTOR descriptor = DmaFx->Descriptor;
size_t memSize;
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
RtlSizeTMult(
ringBuffer->NumberOfElements,
DmaFx->ScatterGatherListSize,
&memSize));
// Parent the memory allocation to the NETTXQUEUE
WDF_OBJECT_ATTRIBUTES memoryAttributes;
WDF_OBJECT_ATTRIBUTES_INIT(&memoryAttributes);
memoryAttributes.ParentObject = DmaFx->QueueHandle;
WDFMEMORY memory;
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
WdfMemoryCreate(
&memoryAttributes,
NonPagedPoolNx,
TX_DMA_FX_ALLOC_TAG,
memSize,
&memory,
&DmaFx->SgListMem));
RtlZeroMemory(DmaFx->SgListMem, memSize);
size_t dmaAllocationSize;
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
RtlSizeTMult(
DMA_TRANSFER_CONTEXT_SIZE_V1,
ringBuffer->NumberOfElements,
&dmaAllocationSize));
WDF_OBJECT_ATTRIBUTES_INIT(&memoryAttributes);
memoryAttributes.ParentObject = DmaFx->QueueHandle;
void *dmaArray;
WDFMEMORY dmaAllocation;
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
WdfMemoryCreate(
&memoryAttributes,
NonPagedPoolNx,
TX_DMA_FX_ALLOC_TAG,
dmaAllocationSize,
&dmaAllocation,
&dmaArray));
// Initialize the private packet context for each packet in the ring buffer
for (UINT32 i = 0; i < ringBuffer->NumberOfElements; i++)
{
NET_PACKET *packet = NetRingBufferGetPacketAtIndex(descriptor, i);
TX_DMA_FX_PACKET_CONTEXT *fxPacketContext = _TxDmaFxGetPacketContextFromToken(descriptor, packet, DmaFx->ContextToken);
fxPacketContext->ScatterGatherBuffer = (PUCHAR)DmaFx->SgListMem + i * DmaFx->ScatterGatherListSize;
fxPacketContext->DmaTransferContext = (UCHAR*)dmaArray + i * DMA_TRANSFER_CONTEXT_SIZE_V1;
}
return STATUS_SUCCESS;
}
_IRQL_requires_max_(PASSIVE_LEVEL)
__inline
NTSTATUS
_TxDmaFxInitializeBounceBuffers(
_In_ TxDmaFx *DmaFx
)
/*
Description:
Allocates the buffers used to bounce packets.
*/
{
// Make sure the bounce buffer size is aligned to something, if
// the NIC driver provided an alignment requirement, use that value,
// otherwise use MEMORY_ALLOCATION_ALIGNMENT
ULONG_PTR alignUpBy = DmaFx->Config.AlignmentRequirement != -1
? DmaFx->Config.AlignmentRequirement + 1
: MEMORY_ALLOCATION_ALIGNMENT;
ULONG_PTR bounceBufferSize = ALIGN_UP_BY(DmaFx->Config.MaximumPacketSize, alignUpBy);
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
RtlULongPtrToULong(
bounceBufferSize,
&DmaFx->BounceBufferSize));
// Allocate bounce buffers
size_t bounceSize;
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
RtlSizeTMult(
DmaFx->BounceBufferSize,
DmaFx->NumBounceBuffers,
&bounceSize));
WDFCOMMONBUFFER commonBuffer;
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
WdfCommonBufferCreate(
DmaFx->Config.DmaEnabler,
bounceSize,
WDF_NO_OBJECT_ATTRIBUTES,
&commonBuffer));
DmaFx->BounceAlways = FALSE;
DmaFx->BounceBasePA = WdfCommonBufferGetAlignedLogicalAddress(commonBuffer);
DmaFx->BounceBaseVA = WdfCommonBufferGetAlignedVirtualAddress(commonBuffer);
return STATUS_SUCCESS;
}
_IRQL_requires_max_(PASSIVE_LEVEL)
__inline
NTSTATUS
_TxDmaFxInitialize(
_In_ TxDmaFx *DmaFx,
_In_ BOOLEAN BypassDma
)
/*
Description:
This function will initialze the framework to properly map/unmap buffers
using DMA. It will decide if direct mapping can be done or not, and
allocate the appropriate resources.
*/
{
if (DmaFx->Config.MaximumScatterGatherElements == 0)
DmaFx->Config.MaximumScatterGatherElements = _TX_DMA_FX_DEFAULT_SCATTER_GATHER_ELEMENTS;
if (DmaFx->Config.AddressWidth > 0)
DmaFx->MaximumAddress = 1ull << DmaFx->Config.AddressWidth;
C_ASSERT(_TX_DMA_FX_IS_POWER_OF_TWO(_TX_DMA_FX_NUM_BOUNCE_BUFFERS));
ULONG numberOfBounceBuffers = 1;
ULONG temp = _TX_DMA_FX_MAXIMUM_BOUNCE_BUFFER_SIZE / DmaFx->Config.MaximumPacketSize;
while (temp != 1)
{
temp = temp >> 1;
numberOfBounceBuffers = numberOfBounceBuffers << 1;
}
DmaFx->NumBounceBuffers =
(numberOfBounceBuffers > _TX_DMA_FX_NUM_BOUNCE_BUFFERS) ?
_TX_DMA_FX_NUM_BOUNCE_BUFFERS : numberOfBounceBuffers;
DmaFx->DmaAdapter = WdfDmaEnablerWdmGetDmaAdapter(
DmaFx->Config.DmaEnabler,
WdfDmaDirectionWriteToDevice);
DmaFx->DeviceObject = WdfDeviceWdmGetDeviceObject(DmaFx->Config.Device);
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
_TxDmaFxInitializeBounceBuffers(DmaFx));
if (BypassDma)
{
// If ADAPTER_INFO_API_BYPASS flag is set, it means we can map
// the buffers without using the DMA HAL APIs, which requires less
// resource allocation and less complicated calls
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
_TxDmaFxInitializeForDirectMapping(DmaFx));
}
else
{
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
_TxDmaFxInitializeForDma(DmaFx));
}
return STATUS_SUCCESS;
}
_IRQL_requires_max_(DISPATCH_LEVEL)
__inline
ULONG
_TxDmaFxCopyPacketToBuffer(
_In_ PCNET_DATAPATH_DESCRIPTOR descriptor,
_In_ NET_PACKET *packet,
_Out_writes_bytes_(bufferSize) VOID *buffer,
_In_ ULONG bufferSize
)
/*
Description:
Copy the contents of a NET_PACKET to a continuous buffer.
*/
{
UCHAR *p = (UCHAR*)buffer;
ULONG bytesRemaining = bufferSize;
UINT32 fragmentCount = NetPacketGetFragmentCount(descriptor, packet);
for (UINT32 i = 0; i < fragmentCount; i++)
{
NET_PACKET_FRAGMENT *fragment = NET_PACKET_GET_FRAGMENT(packet, descriptor, i);
if (!NT_VERIFY(bytesRemaining >= fragment->ValidLength))
break;
RtlCopyMemory(p, (UCHAR*)fragment->VirtualAddress + fragment->Offset, (size_t)fragment->ValidLength);
p += fragment->ValidLength;
bytesRemaining -= (ULONG)fragment->ValidLength;
if (fragment->LastFragmentOfFrame)
break;
}
return (ULONG)(p - (UCHAR*)buffer);
}
_IRQL_requires_max_(DISPATCH_LEVEL)
__inline
NTSTATUS
_TxDmaFxTransmitPacketViaDirectMapping(
_In_ TxDmaFx *DmaFx,
_In_ NET_PACKET *NetPacket
)
/*
Description:
Maps the fragments of a NET_PACKET using direct mapping and then
calls EvtSgProgramDescriptors to let the NIC driver program the
buffers to hardware.
*/
{
SCATTER_GATHER_LIST *sgl = DmaFx->SpareSgl;
PCNET_DATAPATH_DESCRIPTOR descriptor = DmaFx->Descriptor;
UINT32 fragmentCount = NetPacketGetFragmentCount(descriptor, NetPacket);
sgl->NumberOfElements = 0;
for (UINT32 i = 0; i < fragmentCount; i++)
{
NET_PACKET_FRAGMENT *fragment = NET_PACKET_GET_FRAGMENT(NetPacket, descriptor, i);
ULONG_PTR vaStart = (ULONG_PTR)fragment->VirtualAddress + (ULONG)fragment->Offset;
ULONG_PTR vaEnd = vaStart + (ULONG)fragment->ValidLength;
if (vaStart == vaEnd)
continue;
for (ULONG_PTR va = vaStart; va < vaEnd; va = (ULONG_PTR)(PAGE_ALIGN(va)) + PAGE_SIZE)
{
NT_ASSERT(sgl->NumberOfElements < DmaFx->Config.MaximumScatterGatherElements);
SCATTER_GATHER_ELEMENT *sgElement = &sgl->Elements[sgl->NumberOfElements];
// Performance can be optimized by coalescing adjacent SGEs
sgElement->Address = MmGetPhysicalAddress((PVOID)va);
if (PAGE_ALIGN(va) != PAGE_ALIGN(vaEnd))
sgElement->Length = PAGE_SIZE - BYTE_OFFSET(va);
else
sgElement->Length = (ULONG)(vaEnd - va);
sgl->NumberOfElements += 1;
}
if (fragment->LastFragmentOfFrame)
break;
}
NT_ASSERT(sgl->NumberOfElements > 0);
TX_DMA_FX_PROGRAM_DESCRIPTORS(
DmaFx->QueueHandle,
NetPacket,
sgl);
return STATUS_SUCCESS;
}
_IRQL_requires_max_(DISPATCH_LEVEL)
__inline
NTSTATUS
_TxDmaFxTransmitPacketViaDma(
_In_ TxDmaFx *DmaFx,
_In_ NET_PACKET *NetPacket
)
/*
Description:
Maps the fragments of a NET_PACKET using DMA HAL APIs and then
calls EvtSgProgramDescriptors to let the NIC driver program the
buffers to hardware.
*/
{
ULONG totalFrameLength = 0;
ULONG mdlChainOffset = 0;
PCNET_DATAPATH_DESCRIPTOR descriptor = DmaFx->Descriptor;
UINT32 fragmentCount = NetPacketGetFragmentCount(descriptor, NetPacket);
for (UINT32 i = 0; i < fragmentCount; i++)
{
NET_PACKET_FRAGMENT *fragment = NET_PACKET_GET_FRAGMENT(NetPacket, descriptor, i);
if (fragment->Offset > 0)
{
if (totalFrameLength == 0)
{
mdlChainOffset += (ULONG)fragment->Offset;
}
}
totalFrameLength += (ULONG)fragment->ValidLength;
if (fragment->LastFragmentOfFrame)
break;
}
DMA_ADAPTER *dmaAdapter = DmaFx->DmaAdapter;
TX_DMA_FX_PACKET_CONTEXT *fxPacketContext = _TxDmaFxGetPacketContextFromToken(descriptor, NetPacket, DmaFx->ContextToken);
_TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(
dmaAdapter->DmaOperations->InitializeDmaTransferContext(
dmaAdapter,
fxPacketContext->DmaTransferContext));
MDL *firstMdl = NET_PACKET_GET_FRAGMENT(NetPacket, descriptor, 0)->Mapping.Mdl;
NTSTATUS buildSGLStatus = dmaAdapter->DmaOperations->BuildScatterGatherListEx(
dmaAdapter,
DmaFx->DeviceObject,
fxPacketContext->DmaTransferContext,
firstMdl,
mdlChainOffset,
totalFrameLength,
DMA_SYNCHRONOUS_CALLBACK,
NULL, // ExecutionRoutine
NULL, // Context
TRUE, // WriteToDevice
fxPacketContext->ScatterGatherBuffer,
DmaFx->ScatterGatherListSize,
NULL, // DmaCompletionRoutine,
NULL, // CompletionContext,
&fxPacketContext->ScatterGatherList);
if (buildSGLStatus == STATUS_SUCCESS)
{
TX_DMA_FX_PROGRAM_DESCRIPTORS(
DmaFx->QueueHandle,
NetPacket,
fxPacketContext->ScatterGatherList);
}
dmaAdapter->DmaOperations->FreeAdapterObject(
dmaAdapter,
DeallocateObjectKeepRegisters);
return buildSGLStatus;
}
_IRQL_requires_max_(DISPATCH_LEVEL)
__inline
NTSTATUS
_TxDmaFxMapAndTransmitPacket(
_In_ TxDmaFx *DmaFx,
_Inout_ NET_PACKET *NetPacket
)
/*
Description:
Calls the appropriate transmit function based on if we
can bypass DMA APIs or not
*/
{
if (DmaFx->DmaBypass)
{
return _TxDmaFxTransmitPacketViaDirectMapping(DmaFx, NetPacket);
}
else
{
NTSTATUS dmaStatus = _TxDmaFxTransmitPacketViaDma(DmaFx, NetPacket);
if (dmaStatus != STATUS_SUCCESS)
{
switch (dmaStatus)
{
case STATUS_INSUFFICIENT_RESOURCES:
DmaFx->Statistics.DMA.InsufficientResourcesCount += 1;
break;
case STATUS_BUFFER_TOO_SMALL:
DmaFx->Statistics.DMA.BufferTooSmall += 1;
break;
default:
DmaFx->Statistics.DMA.OtherErrors += 1;
break;
}
}
return dmaStatus;
}
}
_IRQL_requires_max_(DISPATCH_LEVEL)
__inline
NTSTATUS
_TxDmaFxBounceAndTransmitPacket(
_In_ TxDmaFx *DmaFx,
_In_ NET_PACKET *NetPacket
)
/*
Description:
This function tries to bounce the buffers in a NET_PACKET and build the
necessary SCATTER_GATHER_LIST to map the new buffer. If it succeeds it
calls EvtSgProgramDescriptors to let the NIC driver do whatever is needed
to program the buffer to hardware.
*/
{
union
{
SCATTER_GATHER_LIST sgl;
UCHAR sglStorage[FIELD_OFFSET(SCATTER_GATHER_LIST, Elements) + sizeof(SCATTER_GATHER_ELEMENT)];
} u;
// Is there a free bounce buffer?
if (DmaFx->BounceBusyIndex - DmaFx->BounceFreeIndex == DmaFx->NumBounceBuffers)
{
DmaFx->Statistics.Packet.BounceFailure += 1;
return STATUS_INSUFFICIENT_RESOURCES;
}
ULONG bounce = DmaFx->BounceBusyIndex % DmaFx->NumBounceBuffers;
PVOID buffer = (UCHAR*)DmaFx->BounceBaseVA + bounce * DmaFx->BounceBufferSize;
PCNET_DATAPATH_DESCRIPTOR descriptor = DmaFx->Descriptor;
ULONG packetLength = _TxDmaFxCopyPacketToBuffer(descriptor, NetPacket, buffer, DmaFx->BounceBufferSize);
u.sgl.NumberOfElements = 1;
u.sgl.Elements[0].Length = packetLength;
u.sgl.Elements[0].Address.QuadPart = DmaFx->BounceBasePA.QuadPart + bounce * DmaFx->BounceBufferSize;
TX_DMA_FX_PROGRAM_DESCRIPTORS(
DmaFx->QueueHandle,
NetPacket,
&u.sgl);
_TX_DMA_FX_PACKET_SET_BOUNCED_FLAG(descriptor, NetPacket);
DmaFx->Statistics.Packet.BounceSuccess += 1;
DmaFx->BounceBusyIndex += 1;
return STATUS_SUCCESS;
}
_IRQL_requires_max_(DISPATCH_LEVEL)
__inline
TX_DMA_BOUNCE_ANALYSIS
_TxDmaFxBounceAnalysis(
_In_ TxDmaFx *DmaFx,
_In_ NET_PACKET *NetPacket
)
{
ULONG alignmentRequirement = DmaFx->Config.AlignmentRequirement;
ULONG maximumScatterGatherElements = DmaFx->Config.MaximumScatterGatherElements;
ULONG maximumPacketSize = DmaFx->Config.MaximumPacketSize;
BOOLEAN checkAddrWidth = DmaFx->Config.AddressWidth != 0;
ULONG numDescriptorsRequired = 0;
ULONGLONG totalPacketSize = 0;
BOOLEAN bounce = DmaFx->BounceAlways;
PCNET_DATAPATH_DESCRIPTOR descriptor = DmaFx->Descriptor;
UINT32 fragmentCount = NetPacketGetFragmentCount(descriptor, NetPacket);
for (UINT32 i = 0; i < fragmentCount; i++)
{
NET_PACKET_FRAGMENT *fragment = NET_PACKET_GET_FRAGMENT(NetPacket, descriptor, i);
// If a fragment other than the first one has an offset, the DMA
// APIs won't be able to properly map the buffers.
if (fragment->Offset > 0 && i != 0)
bounce = TRUE;
MDL *mdl = (MDL*)fragment->Mapping.Mdl;
// If a fragment other than the last one does not completly fill
// the memory described by the MDL, the DMA APIs won't be able to
// properly map the buffers
if (fragment->Offset + fragment->ValidLength < MmGetMdlByteCount(mdl) && !fragment->LastFragmentOfFrame)
bounce = TRUE;
// Calculate how many Scatter/Gather elements we need to transmit
// this fragment.
// This is overly pessimistic if the physical pages are contiguous.
ULONG_PTR va = (ULONG_PTR)fragment->VirtualAddress + (ULONG)fragment->Offset;
numDescriptorsRequired += (ULONG)ADDRESS_AND_SIZE_TO_SPAN_PAGES(va, fragment->ValidLength);
totalPacketSize += fragment->ValidLength;
// If the configuration has an alignment requirement, check if the virtual
// address meets the requirement. We're assuming the alignment requirement
// is less than 4096.
if ((alignmentRequirement != -1) && (va & alignmentRequirement) != 0)
bounce = TRUE;
if (checkAddrWidth)
{
PHYSICAL_ADDRESS pa = MmGetPhysicalAddress((VOID*)va);
if ((ULONGLONG)pa.QuadPart > DmaFx->MaximumAddress)
bounce = TRUE;
}
if (fragment->LastFragmentOfFrame)
break;
}
// First check if we can transmit the buffers at all
if (maximumPacketSize != 0 && totalPacketSize > maximumPacketSize)
return TxDmaCannotTransmit;
// Then check if we detected any condition that requires
// buffer bouncing
if (bounce)
return TxDmaTransmitAfterBouncing;
// Lastly check if we can DMA the number of required descriptors
if (numDescriptorsRequired > maximumScatterGatherElements)
return TxDmaTransmitAfterBouncing;
return TxDmaTransmitInPlace;
}
#endif

Просмотреть файл

@ -1,248 +0,0 @@
// Copyright (C) Microsoft Corporation. All rights reserved.
#ifndef _TXDMAFXTYPES_H_
#define _TXDMAFXTYPES_H_
#define _TX_DMA_FX_RETURN_IF_NTSTATUS_FAILED(status) { NTSTATUS __statusRet = (status); if (__statusRet < 0) { return __statusRet;} }
#define _TX_DMA_FX_IS_POWER_OF_TWO(_n) (((_n) != 0) && !((_n) & ((_n) - 1)))
#define _TX_DMA_FX_PACKET_SET_BOUNCED_FLAG(_ringBufferSet, _packet) NET_PACKET_GET_FRAGMENT(_packet, descriptor, 0)->Scratch = TRUE
#define _TX_DMA_FX_PACKET_CLEAR_BOUNCED_FLAG(_ringBufferSet, _packet) NET_PACKET_GET_FRAGMENT(_packet, descriptor, 0)->Scratch = FALSE
#define _TX_DMA_FX_IS_PACKET_BOUNCED(_ringBufferSet, _packet) NET_PACKET_GET_FRAGMENT(_packet, descriptor, 0)->Scratch
#define _TX_DMA_FX_NUM_BOUNCE_BUFFERS ( 1 << 4 )
#define _TX_DMA_FX_DEFAULT_SCATTER_GATHER_ELEMENTS 16
#define _TX_DMA_FX_MAXIMUM_BOUNCE_BUFFER_SIZE 0x40000
#ifndef TX_DMA_FX_ALLOC_TAG
#pragma message(": warning: It is a good practice to define TX_DMA_FX_ALLOC_TAG. Defaulting to WdfDriverGlobals->DriverTag.")
#define TX_DMA_FX_ALLOC_TAG WdfDriverGlobals->DriverTag
#endif
typedef enum _TX_DMA_BOUNCE_ANALYSIS
{
TxDmaTransmitInPlace,
TxDmaTransmitAfterBouncing,
TxDmaCannotTransmit,
} TX_DMA_BOUNCE_ANALYSIS;
typedef
_Function_class_(EVT_TX_DMA_QUEUE_BOUNCE_ANALYSIS)
_IRQL_requires_same_
_IRQL_requires_max_(DISPATCH_LEVEL)
TX_DMA_BOUNCE_ANALYSIS
EVT_TX_DMA_QUEUE_BOUNCE_ANALYSIS(
_In_ NETTXQUEUE TxQueue,
_In_ NET_PACKET *NetPacket
);
typedef EVT_TX_DMA_QUEUE_BOUNCE_ANALYSIS *PFN_SG_TXQUEUE_BOUNCE_ANALYSIS;
typedef
_Function_class_(EVT_TX_DMA_QUEUE_PROGRAM_DESCRIPTORS)
_IRQL_requires_same_
_IRQL_requires_max_(DISPATCH_LEVEL)
VOID
EVT_TX_DMA_QUEUE_PROGRAM_DESCRIPTORS(
_In_ NETTXQUEUE TxQueue,
_In_ NET_PACKET *NetPacket,
_In_ SCATTER_GATHER_LIST *Sgl
);
typedef EVT_TX_DMA_QUEUE_PROGRAM_DESCRIPTORS *PFN_SG_TXQUEUE_PROGRAM_DESCRIPTORS;
typedef
_Function_class_(EVT_TX_DMA_QUEUE_GET_PACKET_STATUS)
_IRQL_requires_same_
_IRQL_requires_max_(DISPATCH_LEVEL)
NTSTATUS
EVT_TX_DMA_QUEUE_GET_PACKET_STATUS(
_In_ NETTXQUEUE TxQueue,
_In_ NET_PACKET *NetPacket
);
typedef EVT_TX_DMA_QUEUE_GET_PACKET_STATUS *PFN_SG_TXQUEUE_GET_PACKET_STATUS;
typedef
_Function_class_(EVT_TX_DMA_QUEUE_FLUSH_TRANSACTION)
_IRQL_requires_same_
_IRQL_requires_max_(DISPATCH_LEVEL)
VOID
EVT_TX_DMA_QUEUE_FLUSH_TRANSACTION(
_In_ NETTXQUEUE TxQueue
);
typedef EVT_TX_DMA_QUEUE_FLUSH_TRANSACTION *PFN_SG_TXQUEUE_FLUSH_TRANSACTION;
typedef struct _NET_TX_DMA_QUEUE_CONFIG
{
//
// Has information needed by DMA APIs
//
WDFDEVICE Device;
//
// DMA enabler to use when mapping/unmapping buffers
//
WDFDMAENABLER DmaEnabler;
//
// In certain conditions the framework might be able
// to bypass DMA APIs and map buffers directly, set
// this to TRUE if you want to allow this behavior
//
BOOLEAN AllowDmaBypass;
//
// 0 for no limit
//
ULONG MaximumScatterGatherElements;
//
// Maximum packet size, in bytes, the Tx Queue can transmit
//
ULONG MaximumPacketSize;
//
// FILE_XXX_ALIGNMENT, or -1 to determine automatically
//
ULONG AlignmentRequirement;
//
// 2 ^ AddressWidth gives the maximum physical address supported by the hardware
//
ULONG AddressWidth;
//
// Mandatory Callback - Normal EvtSetNotificationEnabled callback
//
PFN_TXQUEUE_SET_NOTIFICATION_ENABLED EvtTxQueueSetNotificationEnabled;
//
// Mandatory Callback - Normal EvtCancel callback
//
PFN_TXQUEUE_CANCEL EvtTxQueueCancel;
} NET_TX_DMA_QUEUE_CONFIG, *PNET_TX_DMA_QUEUE_CONFIG;
VOID
FORCEINLINE
NET_TX_DMA_QUEUE_CONFIG_INIT(
_Out_ PNET_TX_DMA_QUEUE_CONFIG NetTxQueueConfig,
_In_ WDFDEVICE Device,
_In_ WDFDMAENABLER DmaEnabler,
_In_ ULONG MaximumPacketSize,
_In_ PFN_TXQUEUE_SET_NOTIFICATION_ENABLED EvtTxQueueSetNotificationEnabled,
_In_ PFN_TXQUEUE_CANCEL EvtTxQueueCancel
)
{
RtlZeroMemory(NetTxQueueConfig, sizeof(*NetTxQueueConfig));
NetTxQueueConfig->Device = Device;
NetTxQueueConfig->DmaEnabler = DmaEnabler;
NetTxQueueConfig->EvtTxQueueSetNotificationEnabled = EvtTxQueueSetNotificationEnabled;
NetTxQueueConfig->EvtTxQueueCancel = EvtTxQueueCancel;
NetTxQueueConfig->MaximumPacketSize = MaximumPacketSize;
NetTxQueueConfig->AlignmentRequirement = (ULONG)-1;
}
typedef struct _TxDmaFxStats
{
struct _Packet
{
// How many NET_PACKETs with IgnoreThisPacket flag set
ULONG Skipped;
// Number of times we had to bounce a packet and succeeded
ULONG BounceSuccess;
// Number of times we had to bounce a packet but failed
ULONG BounceFailure;
// Number of times the bounce analysis returned TxDmaCannotTransmit
ULONG CannotTransmit;
// Number of times TX_DMA_FX_GET_PACKET_STATUS returned an error
ULONG CompletedWithError;
} Packet;
struct _DMA
{
// Number of times an attempt to transmit a packet using DMA APIs
// returned STATUS_INSUFFICIENT_RESOURCES
ULONG InsufficientResourcesCount;
// Number of times the pre-calculated SG List size was not enough to
// describe a packet
ULONG BufferTooSmall;
// Counts other errors from DMA APIs
ULONG OtherErrors;
} DMA;
} TxDmaFxStats;
typedef struct _TxDmaFx
{
// Queue handle, created by the framework
NETTXQUEUE QueueHandle;
// Copy of the configuration provided by
// the NIC driver
NET_TX_DMA_QUEUE_CONFIG Config;
// Cache of the NET_DATAPATH_DESCRIPTOR pointer
PCNET_DATAPATH_DESCRIPTOR Descriptor;
// If the NIC driver provided the AddressWidth
// parameter in the configuration, this stores
// the maximum physical address supported by
// the hardware
ULONGLONG MaximumAddress;
// Used to retrieve the framework's packet
// context from a NET_PACKET
PNET_PACKET_CONTEXT_TOKEN ContextToken;
// WDM objects
DEVICE_OBJECT *DeviceObject;
DMA_ADAPTER *DmaAdapter;
// If TRUE, we can bypass HAL's DMA APIs
BOOLEAN DmaBypass;
// Used when we can do direct mapping
SCATTER_GATHER_LIST *SpareSgl;
// Used when we need to use HAL APIs
VOID *SgListMem;
ULONG ScatterGatherListSize;
// Bounce buffer management
PHYSICAL_ADDRESS BounceBasePA;
VOID *BounceBaseVA;
ULONG BounceBufferSize;
BOOLEAN BounceAlways;
ULONG NumBounceBuffers;
ULONG BounceFreeIndex;
ULONG BounceBusyIndex;
TxDmaFxStats Statistics;
} TxDmaFx;
WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(TxDmaFx, _TxDmaFxGetContext);
// Used only with HAL APIs
typedef struct _TX_DMA_FX_PACKET_CONTEXT
{
SCATTER_GATHER_LIST *ScatterGatherList;
VOID *ScatterGatherBuffer;
VOID *DmaTransferContext;
} TX_DMA_FX_PACKET_CONTEXT;
NET_PACKET_DECLARE_CONTEXT_TYPE_WITH_NAME(TX_DMA_FX_PACKET_CONTEXT, _TxDmaFxGetPacketContext);
EVT_TXQUEUE_ADVANCE _TxDmaFxAdvance;
#endif

Просмотреть файл

@ -29,13 +29,13 @@
<PropertyGroup Label="Globals">
<ProjectGuid>{EAC78963-C6D0-4C8C-918D-5B5996AE80AC}</ProjectGuid>
<RootNamespace>$(MSBuildProjectName)</RootNamespace>
<WindowsTargetPlatformVersion>10.0.17134.0</WindowsTargetPlatformVersion>
<WindowsTargetPlatformVersion>10.0.17763.0</WindowsTargetPlatformVersion>
</PropertyGroup>
<PropertyGroup Label="Configuration">
<TargetVersion>Windows10</TargetVersion>
<NetAdapterDriver>true</NetAdapterDriver>
<NETADAPTER_VERSION_MAJOR>1</NETADAPTER_VERSION_MAJOR>
<NETADAPTER_VERSION_MINOR>2</NETADAPTER_VERSION_MINOR>
<NETADAPTER_VERSION_MINOR>3</NETADAPTER_VERSION_MINOR>
<KMDF_VERSION_MAJOR>1</KMDF_VERSION_MAJOR>
<KMDF_VERSION_MINOR>23</KMDF_VERSION_MINOR>
<UseDebugLibraries Condition="'$(Configuration)'=='Debug'">True</UseDebugLibraries>

Просмотреть файл

@ -20,24 +20,6 @@
#include "eeprom.h"
#include "gigamac.h"
// the api for registration of extensions was incomplete at the time of
// the NetAdapterCx 1.2 release. for now we directly call the internal
// registration function. anyone using this sample should be aware that
// the API and ABI for this call is likely to change and this direct
// call absolutely will not work when NetAdapterCx 1.3 is released.
NTSTATUS
NetAdapterRegisterPacketExtension(
_In_ NETADAPTER Adapter,
const PNET_PACKET_EXTENSION ExtensionToRegister
)
{
return ((NTSTATUS(*)(PNET_DRIVER_GLOBALS, NETADAPTER, CONST PNET_PACKET_EXTENSION))
NetFunctions[NetAdapterRegisterPacketExtensionTableIndex])(
NetDriverGlobals,
Adapter,
ExtensionToRegister);
}
#define ETH_IS_ZERO(Address) ( \
(((PUCHAR)(Address))[0] == ((UCHAR)0x00)) && \
(((PUCHAR)(Address))[1] == ((UCHAR)0x00)) && \
@ -86,7 +68,6 @@ Return Value:
adapter->OffloadEncapsulation.Header.Type = NDIS_OBJECT_TYPE_OFFLOAD_ENCAPSULATION;
adapter->EEPROMInUse = false;
adapter->GigaMacInUse = false;
//spinlock
WDF_OBJECT_ATTRIBUTES attributes;
@ -103,159 +84,20 @@ Exit:
}
void
RtAdapterQueryOffloadConfiguration(
_In_ RT_ADAPTER const *adapter,
_Out_ NDIS_OFFLOAD *offloadCaps
)
{
RtlZeroMemory(offloadCaps, sizeof(*offloadCaps));
offloadCaps->Header.Type = NDIS_OBJECT_TYPE_OFFLOAD;
offloadCaps->Header.Size = NDIS_SIZEOF_NDIS_OFFLOAD_REVISION_5;
offloadCaps->Header.Revision = NDIS_OFFLOAD_REVISION_5;
// IPv4 : Tx
offloadCaps->Checksum.IPv4Transmit.Encapsulation = NDIS_ENCAPSULATION_IEEE_802_3;
if (adapter->IPChksumOffv4 == RtChecksumOffloadTxEnabled ||
adapter->IPChksumOffv4 == RtChecksumOffloadTxRxEnabled)
{
offloadCaps->Checksum.IPv4Transmit.IpChecksum = NDIS_OFFLOAD_SUPPORTED;
offloadCaps->Checksum.IPv4Transmit.IpOptionsSupported = NDIS_OFFLOAD_SUPPORTED;
}
if (adapter->TCPChksumOffv4 == RtChecksumOffloadTxEnabled ||
adapter->TCPChksumOffv4 == RtChecksumOffloadTxRxEnabled)
{
offloadCaps->Checksum.IPv4Transmit.TcpChecksum = NDIS_OFFLOAD_SUPPORTED;
offloadCaps->Checksum.IPv4Transmit.TcpOptionsSupported = NDIS_OFFLOAD_SUPPORTED;
}
if (adapter->UDPChksumOffv4 == RtChecksumOffloadTxEnabled ||
adapter->UDPChksumOffv4 == RtChecksumOffloadTxRxEnabled)
{
offloadCaps->Checksum.IPv4Transmit.UdpChecksum = NDIS_OFFLOAD_SUPPORTED;
}
// IPv4 : Rx
offloadCaps->Checksum.IPv4Receive.Encapsulation = NDIS_ENCAPSULATION_IEEE_802_3;
if (adapter->IPChksumOffv4 == RtChecksumOffloadRxEnabled ||
adapter->IPChksumOffv4 == RtChecksumOffloadTxRxEnabled)
{
offloadCaps->Checksum.IPv4Receive.IpChecksum = NDIS_OFFLOAD_SUPPORTED;
offloadCaps->Checksum.IPv4Receive.IpOptionsSupported = NDIS_OFFLOAD_SUPPORTED;
}
if (adapter->TCPChksumOffv4 == RtChecksumOffloadRxEnabled ||
adapter->TCPChksumOffv4 == RtChecksumOffloadTxRxEnabled)
{
offloadCaps->Checksum.IPv4Receive.TcpChecksum = NDIS_OFFLOAD_SUPPORTED;
offloadCaps->Checksum.IPv4Receive.TcpOptionsSupported = NDIS_OFFLOAD_SUPPORTED;
}
if (adapter->UDPChksumOffv4 == RtChecksumOffloadRxEnabled ||
adapter->UDPChksumOffv4 == RtChecksumOffloadTxRxEnabled)
{
offloadCaps->Checksum.IPv4Receive.UdpChecksum = NDIS_OFFLOAD_SUPPORTED;
}
// IPv6 : Tx
offloadCaps->Checksum.IPv6Transmit.Encapsulation = NDIS_ENCAPSULATION_IEEE_802_3;
offloadCaps->Checksum.IPv6Transmit.IpExtensionHeadersSupported = NDIS_OFFLOAD_SUPPORTED;
if (adapter->TCPChksumOffv6 == RtChecksumOffloadTxEnabled ||
adapter->TCPChksumOffv6 == RtChecksumOffloadTxRxEnabled)
{
offloadCaps->Checksum.IPv6Transmit.TcpChecksum = NDIS_OFFLOAD_SUPPORTED;
offloadCaps->Checksum.IPv6Transmit.TcpOptionsSupported = NDIS_OFFLOAD_SUPPORTED;
}
if (adapter->UDPChksumOffv6 == RtChecksumOffloadTxEnabled ||
adapter->UDPChksumOffv6 == RtChecksumOffloadTxRxEnabled)
{
offloadCaps->Checksum.IPv6Transmit.UdpChecksum = NDIS_OFFLOAD_SUPPORTED;
}
// IPv6 : Rx
offloadCaps->Checksum.IPv6Receive.Encapsulation = NDIS_ENCAPSULATION_IEEE_802_3;
offloadCaps->Checksum.IPv6Receive.IpExtensionHeadersSupported = NDIS_OFFLOAD_SUPPORTED;
if (adapter->TCPChksumOffv6 == RtChecksumOffloadRxEnabled ||
adapter->TCPChksumOffv6 == RtChecksumOffloadTxRxEnabled)
{
offloadCaps->Checksum.IPv6Receive.TcpChecksum = NDIS_OFFLOAD_SUPPORTED;
offloadCaps->Checksum.IPv6Receive.TcpOptionsSupported = NDIS_OFFLOAD_SUPPORTED;
}
if (adapter->UDPChksumOffv6 == RtChecksumOffloadRxEnabled ||
adapter->UDPChksumOffv6 == RtChecksumOffloadTxRxEnabled)
{
offloadCaps->Checksum.IPv6Receive.UdpChecksum = NDIS_OFFLOAD_SUPPORTED;
}
// LSOv1 not supoorted
offloadCaps->LsoV1.IPv4.Encapsulation = NDIS_ENCAPSULATION_NOT_SUPPORTED;
offloadCaps->LsoV1.IPv4.MaxOffLoadSize = 0;
offloadCaps->LsoV1.IPv4.MinSegmentCount = 0;
offloadCaps->LsoV1.IPv4.TcpOptions = NDIS_OFFLOAD_NOT_SUPPORTED;
offloadCaps->LsoV1.IPv4.IpOptions = NDIS_OFFLOAD_NOT_SUPPORTED;
// LSOv2 IPv4
if (adapter->LSOv4 == RtLsoOffloadEnabled)
{
offloadCaps->LsoV2.IPv4.Encapsulation = NDIS_ENCAPSULATION_IEEE_802_3;
offloadCaps->LsoV2.IPv4.MaxOffLoadSize = RT_LSO_OFFLOAD_MAX_SIZE;
offloadCaps->LsoV2.IPv4.MinSegmentCount = RT_LSO_OFFLOAD_MIN_SEGMENT_COUNT;
}
// LSOv2 IPv6
if (adapter->LSOv6 == RtLsoOffloadEnabled)
{
offloadCaps->LsoV2.IPv6.Encapsulation = NDIS_ENCAPSULATION_IEEE_802_3;
offloadCaps->LsoV2.IPv6.MaxOffLoadSize = RT_LSO_OFFLOAD_MAX_SIZE;
offloadCaps->LsoV2.IPv6.MinSegmentCount = RT_LSO_OFFLOAD_MIN_SEGMENT_COUNT;
offloadCaps->LsoV2.IPv6.IpExtensionHeadersSupported = NDIS_OFFLOAD_SUPPORTED;
offloadCaps->LsoV2.IPv6.TcpOptionsSupported = NDIS_OFFLOAD_SUPPORTED;
}
}
// Lock not required in D0Entry
_Requires_lock_held_(adapter->Lock)
void
RtAdapterUpdateEnabledChecksumOffloads(
RtAdapterUpdateHardwareChecksum(
_In_ RT_ADAPTER *adapter
)
{
adapter->IpRxHwChkSumv4 =
(adapter->IPChksumOffv4 == RtChecksumOffloadRxEnabled ||
adapter->IPChksumOffv4 == RtChecksumOffloadTxRxEnabled);
adapter->TcpRxHwChkSumv4 =
(adapter->TCPChksumOffv4 == RtChecksumOffloadRxEnabled ||
adapter->TCPChksumOffv4 == RtChecksumOffloadTxRxEnabled);
adapter->UdpRxHwChkSumv4 =
(adapter->UDPChksumOffv4 == RtChecksumOffloadRxEnabled ||
adapter->UDPChksumOffv4 == RtChecksumOffloadTxRxEnabled);
adapter->TcpRxHwChkSumv6 =
(adapter->TCPChksumOffv6 == RtChecksumOffloadRxEnabled ||
adapter->TCPChksumOffv6 == RtChecksumOffloadTxRxEnabled);
adapter->UdpRxHwChkSumv6 =
(adapter->UDPChksumOffv6 == RtChecksumOffloadRxEnabled ||
adapter->UDPChksumOffv6 == RtChecksumOffloadTxRxEnabled);
USHORT cpcr = adapter->CSRAddress->CPCR;
// if one of the checksum offloads is needed
// or one of the LSO offloads is enabled,
// enable HW checksum
if (adapter->IpRxHwChkSumv4 || adapter->TcpRxHwChkSumv4 ||
adapter->UdpRxHwChkSumv4 || adapter->TcpRxHwChkSumv6 ||
adapter->UdpRxHwChkSumv6 ||
if (adapter->IpHwChkSum ||
adapter->TcpHwChkSum ||
adapter->UdpHwChkSum ||
adapter->LSOv4 == RtLsoOffloadEnabled ||
adapter->LSOv6 == RtLsoOffloadEnabled)
{
@ -350,21 +192,15 @@ EvtAdapterCreateTxQueue(
WDF_OBJECT_ATTRIBUTES_INIT_CONTEXT_TYPE(&txAttributes, RT_TXQUEUE);
txAttributes.EvtDestroyCallback = EvtTxQueueDestroy;
NET_TX_DMA_QUEUE_CONFIG sgConfig;
NET_TX_DMA_QUEUE_CONFIG_INIT(
&sgConfig,
adapter->WdfDevice,
adapter->DmaEnabler,
RT_MAX_PACKET_SIZE,
NET_PACKET_QUEUE_CONFIG txConfig;
NET_PACKET_QUEUE_CONFIG_INIT(
&txConfig,
EvtTxQueueAdvance,
EvtTxQueueSetNotificationEnabled,
EvtTxQueueCancel);
// LSO goes to 64K payload header + extra
sgConfig.MaximumPacketSize = RT_MAX_FRAGMENT_SIZE * RT_MAX_PHYS_BUF_COUNT;
sgConfig.MaximumScatterGatherElements = RT_MAX_PHYS_BUF_COUNT;
sgConfig.AllowDmaBypass = TRUE;
txConfig.EvtStart = EvtTxQueueStart;
txConfig.EvtStop = EvtTxQueueStop;
NET_PACKET_CONTEXT_ATTRIBUTES contextAttributes;
NET_PACKET_CONTEXT_ATTRIBUTES_INIT_TYPE(&contextAttributes, RT_TCB);
@ -372,12 +208,12 @@ EvtAdapterCreateTxQueue(
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
NetTxQueueInitAddPacketContextAttributes(txQueueInit, &contextAttributes));
NETTXQUEUE txQueue;
NETPACKETQUEUE txQueue;
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
NetTxDmaQueueCreate(
NetTxQueueCreate(
txQueueInit,
&txAttributes,
&sgConfig,
&txConfig,
&txQueue));
#pragma endregion
@ -407,13 +243,6 @@ EvtAdapterCreateTxQueue(
#pragma endregion
WdfSpinLockAcquire(adapter->Lock); {
RtTxQueueStart(tx);
adapter->TxQueue = txQueue;
} WdfSpinLockRelease(adapter->Lock);
Exit:
TraceExitResult(status);
@ -440,15 +269,17 @@ EvtAdapterCreateRxQueue(
rxAttributes.EvtDestroyCallback = EvtRxQueueDestroy;
NET_RXQUEUE_CONFIG rxConfig;
NET_RXQUEUE_CONFIG_INIT(
NET_PACKET_QUEUE_CONFIG rxConfig;
NET_PACKET_QUEUE_CONFIG_INIT(
&rxConfig,
EvtRxQueueAdvance,
EvtRxQueueSetNotificationEnabled,
EvtRxQueueCancel);
rxConfig.EvtStart = EvtRxQueueStart;
rxConfig.EvtStop = EvtRxQueueStop;
const ULONG queueId = NetRxQueueInitGetQueueId(rxQueueInit);
NETRXQUEUE rxQueue;
NETPACKETQUEUE rxQueue;
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
NetRxQueueCreate(rxQueueInit, &rxAttributes, &rxConfig, &rxQueue));
@ -471,16 +302,6 @@ EvtAdapterCreateRxQueue(
#pragma endregion
WdfSpinLockAcquire(adapter->Lock); {
// Starting the receive queue must be synchronized with any OIDs that
// modify the receive queue's behavior.
RtRxQueueStart(RtGetRxQueueContext(rxQueue));
adapter->RxQueues[rx->QueueId] = rxQueue;
} WdfSpinLockRelease(adapter->Lock);
Exit:
TraceExitResult(status);
@ -491,26 +312,26 @@ static
NTSTATUS
RtReceiveScalingEnable(
_In_ RT_ADAPTER *adapter,
_In_ const NET_ADAPTER_RECEIVE_SCALING_PROTOCOL_TYPE *protocols
_In_ NET_ADAPTER_RECEIVE_SCALING_PROTOCOL_TYPE protocols
)
{
UINT32 controlBitsEnable = RSS_MULTI_CPU_ENABLE | RSS_HASH_BITS_ENABLE;
if (*protocols & NetAdapterReceiveScalingProtocolTypeIPv4)
if (protocols & NetAdapterReceiveScalingProtocolTypeIPv4)
{
controlBitsEnable |= RSS_IPV4_ENABLE;
if (*protocols & NetAdapterReceiveScalingProtocolTypeTcp)
if (protocols & NetAdapterReceiveScalingProtocolTypeTcp)
{
controlBitsEnable |= RSS_IPV4_TCP_ENABLE;
}
}
if (*protocols & NetAdapterReceiveScalingProtocolTypeIPv6)
if (protocols & NetAdapterReceiveScalingProtocolTypeIPv6)
{
controlBitsEnable |= RSS_IPV6_ENABLE;
if (*protocols & NetAdapterReceiveScalingProtocolTypeTcp)
if (protocols & NetAdapterReceiveScalingProtocolTypeTcp)
{
controlBitsEnable |= RSS_IPV6_TCP_ENABLE;
}
@ -535,7 +356,7 @@ RtReceiveScalingSetHashSecretKey(
)
{
const UINT32 * key = (const UINT32 *)hashSecretKey->Key;
const size_t keySize = hashSecretKey->Count / sizeof(*key);
const size_t keySize = hashSecretKey->Length / sizeof(*key);
if (! GigaMacRssSetHashSecretKey(adapter, key, keySize))
{
WdfDeviceSetFailed(adapter->WdfDevice, WdfDeviceFailedAttemptRestart);
@ -548,19 +369,20 @@ RtReceiveScalingSetHashSecretKey(
static
NTSTATUS
EvtAdapterReceiveScalingEnable(
_In_ NETADAPTER netAdapter
_In_ NETADAPTER netAdapter,
_In_ NET_ADAPTER_RECEIVE_SCALING_HASH_TYPE hashType,
_In_ NET_ADAPTER_RECEIVE_SCALING_PROTOCOL_TYPE protocolType
)
{
UNREFERENCED_PARAMETER(hashType);
TraceEntryNetAdapter(netAdapter);
NTSTATUS status = STATUS_SUCCESS;
RT_ADAPTER *adapter = RtGetAdapterContext(netAdapter);
const NET_ADAPTER_RECEIVE_SCALING_PROTOCOL_TYPE protocols =
NetAdapterGetReceiveScalingProtocolTypes(netAdapter);
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
RtReceiveScalingEnable(adapter, &protocols));
RtReceiveScalingEnable(adapter, protocolType));
Exit:
TraceExitResult(status);
@ -601,7 +423,7 @@ EvtAdapterReceiveScalingSetIndirectionEntries(
{
RT_ADAPTER * adapter = RtGetAdapterContext(netAdapter);
for (size_t i = 0; i < indirectionEntries->Count; i++)
for (size_t i = 0; i < indirectionEntries->Length; i++)
{
const ULONG queueId = RtGetRxQueueContext(indirectionEntries->Entries[i].Queue)->QueueId;
const UINT32 index = indirectionEntries->Entries[i].Index;
@ -1041,7 +863,7 @@ RtAdapterSetReceiveScalingCapabilities(
NET_ADAPTER_RECEIVE_SCALING_CAPABILITIES_INIT(
&receiveScalingCapabilities,
4, // NumberOfQueues
NetAdapterReceiveScalingUnhashedTargetTypeQueue,
NetAdapterReceiveScalingUnhashedTargetTypeHashIndex,
NetAdapterReceiveScalingHashTypeToeplitz,
NetAdapterReceiveScalingProtocolTypeIPv4 |
NetAdapterReceiveScalingProtocolTypeIPv6 |
@ -1075,14 +897,22 @@ RtAdapterSetDatapathCapabilities(
_In_ RT_ADAPTER const *adapter
)
{
NET_ADAPTER_DMA_CAPABILITIES txDmaCapabilities;
NET_ADAPTER_DMA_CAPABILITIES_INIT(&txDmaCapabilities, adapter->DmaEnabler);
NET_ADAPTER_TX_CAPABILITIES txCapabilities;
NET_ADAPTER_TX_CAPABILITIES_INIT(
&txCapabilities,
RT_MAX_PACKET_SIZE,
NET_ADAPTER_TX_CAPABILITIES_INIT_FOR_DMA(
&txCapabilities,
&txDmaCapabilities,
RT_MAX_FRAGMENT_SIZE,
1);
txCapabilities.FragmentRingNumberOfElementsHint = adapter->NumTcb * RT_MAX_PHYS_BUF_COUNT;
// LSO goes to 64K payload header + extra
//sgConfig.MaximumPacketSize = RT_MAX_FRAGMENT_SIZE * RT_MAX_PHYS_BUF_COUNT;
txCapabilities.FragmentRingNumberOfElementsHint = adapter->NumTcb * RT_MAX_PHYS_BUF_COUNT;
txCapabilities.MaximumNumberOfFragments = RT_MAX_PHYS_BUF_COUNT;
NET_ADAPTER_DMA_CAPABILITIES rxDmaCapabilities;
NET_ADAPTER_DMA_CAPABILITIES_INIT(&rxDmaCapabilities, adapter->DmaEnabler);
@ -1100,18 +930,75 @@ RtAdapterSetDatapathCapabilities(
}
_Use_decl_annotations_
NTSTATUS
EvtAdapterSetCapabilities(
_In_ NETADAPTER netAdapter
static
void
EvtAdapterOffloadSetChecksum(
_In_ NETADAPTER netAdapter,
_In_ NET_ADAPTER_OFFLOAD_CHECKSUM_CAPABILITIES * capabilities
)
{
TraceEntryNetAdapter(netAdapter);
RT_ADAPTER *adapter = RtGetAdapterContext(netAdapter);
adapter->IpHwChkSum = capabilities->IPv4;
adapter->TcpHwChkSum = capabilities->Tcp;
adapter->UdpHwChkSum = capabilities->Udp;
RtAdapterUpdateHardwareChecksum(adapter);
}
static
void
EvtAdapterOffloadSetLso(
_In_ NETADAPTER netAdapter,
_In_ NET_ADAPTER_OFFLOAD_LSO_CAPABILITIES * capabilities
)
{
RT_ADAPTER *adapter = RtGetAdapterContext(netAdapter);
adapter->LSOv4 = capabilities->IPv4 ? RtLsoOffloadEnabled : RtLsoOffloadDisabled;
adapter->LSOv6 = capabilities->IPv6 ? RtLsoOffloadEnabled : RtLsoOffloadDisabled;
RtAdapterUpdateHardwareChecksum(adapter);
}
static
void
RtAdapterSetOffloadCapabilities(
_In_ RT_ADAPTER const *adapter
)
{
NET_ADAPTER_OFFLOAD_CHECKSUM_CAPABILITIES checksumOffloadCapabilities;
NET_ADAPTER_OFFLOAD_CHECKSUM_CAPABILITIES_INIT(
&checksumOffloadCapabilities,
TRUE,
TRUE,
TRUE);
NetAdapterOffloadSetChecksumCapabilities(adapter->NetAdapter, &checksumOffloadCapabilities, EvtAdapterOffloadSetChecksum);
NET_ADAPTER_OFFLOAD_LSO_CAPABILITIES lsoOffloadCapabilities;
NET_ADAPTER_OFFLOAD_LSO_CAPABILITIES_INIT(
&lsoOffloadCapabilities,
TRUE,
TRUE,
RT_LSO_OFFLOAD_MAX_SIZE,
RT_LSO_OFFLOAD_MIN_SEGMENT_COUNT);
NetAdapterOffloadSetLsoCapabilities(adapter->NetAdapter, &lsoOffloadCapabilities, EvtAdapterOffloadSetLso);
}
_Use_decl_annotations_
NTSTATUS
RtAdapterStart(
RT_ADAPTER *adapter
)
{
TraceEntryNetAdapter(adapter->NetAdapter);
NTSTATUS status = STATUS_SUCCESS;
RT_ADAPTER *adapter = RtGetAdapterContext(netAdapter);
RtAdapterSetLinkLayerCapabilities(adapter);
RtAdapterSetReceiveScalingCapabilities(adapter);
@ -1120,61 +1007,11 @@ EvtAdapterSetCapabilities(
RtAdapterSetDatapathCapabilities(adapter);
NET_PACKET_EXTENSION extension;
NET_PACKET_EXTENSION_INIT(
&extension,
NET_PACKET_EXTENSION_CHECKSUM_NAME,
NET_PACKET_EXTENSION_CHECKSUM_VERSION_1,
NET_PACKET_EXTENSION_CHECKSUM_VERSION_1_SIZE,
sizeof(ULONG) - 1);
RtAdapterSetOffloadCapabilities(adapter);
// Register checksum extension.
GOTO_IF_NOT_NT_SUCCESS(
Exit, status,
NetAdapterRegisterPacketExtension(netAdapter, &extension));
NET_PACKET_EXTENSION_INIT(
&extension,
NET_PACKET_EXTENSION_LSO_NAME,
NET_PACKET_EXTENSION_LSO_VERSION_1,
NET_PACKET_EXTENSION_LSO_VERSION_1_SIZE,
sizeof(ULONG) - 1);
// Register LSO extension.
GOTO_IF_NOT_NT_SUCCESS(
Exit, status,
NetAdapterRegisterPacketExtension(netAdapter, &extension));
NDIS_MINIPORT_ADAPTER_OFFLOAD_ATTRIBUTES offloadAttributes;
RtlZeroMemory(&offloadAttributes, sizeof(NDIS_MINIPORT_ADAPTER_OFFLOAD_ATTRIBUTES));
offloadAttributes.Header.Type = NDIS_OBJECT_TYPE_MINIPORT_ADAPTER_OFFLOAD_ATTRIBUTES;
offloadAttributes.Header.Size = sizeof(NDIS_MINIPORT_ADAPTER_OFFLOAD_ATTRIBUTES);
offloadAttributes.Header.Revision = NDIS_MINIPORT_ADAPTER_OFFLOAD_ATTRIBUTES_REVISION_1;
NDIS_OFFLOAD hardwareCaps;
RtAdapterQueryHardwareCapabilities(&hardwareCaps);
offloadAttributes.HardwareOffloadCapabilities = &hardwareCaps;
NDIS_OFFLOAD offloadCaps;
RtAdapterQueryOffloadConfiguration(adapter, &offloadCaps);
offloadAttributes.DefaultOffloadConfiguration = &offloadCaps;
status = STATUS_SUCCESS;
NDIS_STATUS ndisStatus = NdisMSetMiniportAttributes(
adapter->NdisLegacyAdapterHandle,
reinterpret_cast<NDIS_MINIPORT_ADAPTER_ATTRIBUTES*>(&offloadAttributes));
if (ndisStatus != NDIS_STATUS_SUCCESS)
{
status = (NTSTATUS)ndisStatus;
if (NT_SUCCESS(status))
{
status = STATUS_UNSUCCESSFUL;
}
goto Exit;
}
NetAdapterStart(adapter->NetAdapter));
Exit:
TraceExitResult(status);

Просмотреть файл

@ -89,8 +89,8 @@ typedef struct _RT_ADAPTER
WDFDEVICE WdfDevice;
// Handle to default Tx and Rx Queues
NETTXQUEUE TxQueue;
NETRXQUEUE RxQueues[RT_NUMBER_OF_QUEUES];
NETPACKETQUEUE TxQueue;
NETPACKETQUEUE RxQueues[RT_NUMBER_OF_QUEUES];
// Pointer to interrupt object
RT_INTERRUPT *Interrupt;
@ -151,6 +151,7 @@ typedef struct _RT_ADAPTER
WDFDMAENABLER DmaEnabler;
WDFCOMMONBUFFER HwTallyMemAlloc;
PHYSICAL_ADDRESS TallyPhy;
RT_TALLY *GTally;
@ -160,21 +161,12 @@ typedef struct _RT_ADAPTER
NDIS_OFFLOAD_ENCAPSULATION OffloadEncapsulation;
RT_CHKSUM_OFFLOAD UDPChksumOffv4;
RT_CHKSUM_OFFLOAD UDPChksumOffv6;
RT_CHKSUM_OFFLOAD IPChksumOffv4;
RT_CHKSUM_OFFLOAD TCPChksumOffv4;
RT_CHKSUM_OFFLOAD TCPChksumOffv6;
USHORT ReceiveBuffers;
USHORT TransmitBuffers;
BOOLEAN IpRxHwChkSumv4;
BOOLEAN TcpRxHwChkSumv4;
BOOLEAN UdpRxHwChkSumv4;
BOOLEAN TcpRxHwChkSumv6;
BOOLEAN UdpRxHwChkSumv6;
BOOLEAN IpHwChkSum;
BOOLEAN TcpHwChkSum;
BOOLEAN UdpHwChkSum;
ULONG ChksumErrRxIpv4Cnt;
ULONG ChksumErrRxTcpIpv6Cnt;
@ -195,7 +187,6 @@ typedef struct _RT_ADAPTER
// basic detection of concurrent EEPROM use
bool EEPROMSupported;
bool EEPROMInUse;
bool GigaMacInUse;
// ReceiveScaling
UINT32 RssIndirectionTable[RT_INDIRECTION_TABLE_SIZE];
@ -209,7 +200,6 @@ typedef struct _RT_ADAPTER
WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(RT_ADAPTER, RtGetAdapterContext);
EVT_NET_ADAPTER_SET_CAPABILITIES EvtAdapterSetCapabilities;
EVT_NET_ADAPTER_CREATE_TXQUEUE EvtAdapterCreateTxQueue;
EVT_NET_ADAPTER_CREATE_RXQUEUE EvtAdapterCreateRxQueue;
@ -240,17 +230,14 @@ RtInitializeAdapterContext(
_In_ WDFDEVICE device,
_In_ NETADAPTER netAdapter);
NTSTATUS
RtAdapterStart(
_In_ RT_ADAPTER *adapter);
void RtAdapterUpdateInterruptModeration(_In_ RT_ADAPTER *adapter);
void
RtAdapterQueryOffloadConfiguration(
_In_ RT_ADAPTER const *adapter,
_Out_ NDIS_OFFLOAD *offloadCaps);
// Lock not required in D0Entry
_Requires_lock_held_(adapter->Lock)
void
RtAdapterUpdateEnabledChecksumOffloads(_In_ RT_ADAPTER *adapter);
RtAdapterUpdateHardwareChecksum(_In_ RT_ADAPTER *adapter);
NTSTATUS
RtAdapterReadAddress(_In_ RT_ADAPTER *adapter);

Просмотреть файл

@ -36,16 +36,9 @@ RT_ADVANCED_PROPERTY RtSupportedProperties[] =
{ NDIS_STRING_CONST("*SpeedDuplex"), RT_OFFSET(SpeedDuplex), RT_SIZE(SpeedDuplex), RtSpeedDuplexModeAutoNegotiation, RtSpeedDuplexModeAutoNegotiation, RtSpeedDuplexMode1GFullDuplex },
{ NDIS_STRING_CONST("*ReceiveBuffers"), RT_OFFSET(ReceiveBuffers), RT_SIZE(ReceiveBuffers), 128, RT_MIN_RX_DESC, RT_MAX_RX_DESC },
{ NDIS_STRING_CONST("*TransmitBuffers"), RT_OFFSET(TransmitBuffers), RT_SIZE(TransmitBuffers), 128, RT_MIN_TCB, RT_MAX_TCB },
{ NDIS_STRING_CONST("*IPChecksumOffloadIPv4"), RT_OFFSET(IPChksumOffv4), RT_SIZE(IPChksumOffv4), RtChecksumOffloadTxRxEnabled, RtChecksumOffloadDisabled, RtChecksumOffloadTxRxEnabled },
{ NDIS_STRING_CONST("*UDPChecksumOffloadIPv6"), RT_OFFSET(UDPChksumOffv6), RT_SIZE(UDPChksumOffv6), RtChecksumOffloadTxRxEnabled, RtChecksumOffloadDisabled, RtChecksumOffloadTxRxEnabled },
{ NDIS_STRING_CONST("*UDPChecksumOffloadIPv4"), RT_OFFSET(UDPChksumOffv4), RT_SIZE(UDPChksumOffv4), RtChecksumOffloadTxRxEnabled, RtChecksumOffloadDisabled, RtChecksumOffloadTxRxEnabled },
{ NDIS_STRING_CONST("*TCPChecksumOffloadIPv4"), RT_OFFSET(TCPChksumOffv4), RT_SIZE(TCPChksumOffv4), RtChecksumOffloadTxRxEnabled, RtChecksumOffloadDisabled, RtChecksumOffloadTxRxEnabled },
{ NDIS_STRING_CONST("*TCPChecksumOffloadIPv6"), RT_OFFSET(TCPChksumOffv6), RT_SIZE(TCPChksumOffv6), RtChecksumOffloadTxRxEnabled, RtChecksumOffloadDisabled, RtChecksumOffloadTxRxEnabled },
{ NDIS_STRING_CONST("*WakeOnMagicPacket"), RT_OFFSET(WakeOnMagicPacketEnabled), RT_SIZE(WakeOnMagicPacketEnabled), true, false, true },
{ NDIS_STRING_CONST("*InterruptModeration"), RT_OFFSET(InterruptModerationMode), RT_SIZE(InterruptModerationMode), RtInterruptModerationEnabled, RtInterruptModerationDisabled, RtInterruptModerationEnabled },
{ NDIS_STRING_CONST("*FlowControl"), RT_OFFSET(FlowControl), RT_SIZE(FlowControl), RtFlowControlTxRxEnabled, RtFlowControlDisabled, RtFlowControlTxRxEnabled },
{ NDIS_STRING_CONST("*LsoV2Ipv4"), RT_OFFSET(LSOv4), RT_SIZE(LSOv4), RtLsoOffloadDisabled, RtLsoOffloadDisabled, RtLsoOffloadEnabled },
{ NDIS_STRING_CONST("*LsoV2Ipv6"), RT_OFFSET(LSOv6), RT_SIZE(LSOv6), RtLsoOffloadDisabled, RtLsoOffloadDisabled, RtLsoOffloadEnabled },
{ NDIS_STRING_CONST("*RSS"), RT_OFFSET(RssEnabled), RT_SIZE(RssEnabled), false, false, true },
// Custom Keywords

Просмотреть файл

@ -131,12 +131,84 @@ Exit:
return status;
}
static
NTSTATUS
RtInitializeChipType(
_In_ RT_ADAPTER *adapter)
{
if (RtAdapterQueryChipType(adapter, &adapter->ChipType))
{
TraceLoggingWrite(
RealtekTraceProvider,
"ChipType",
TraceLoggingUInt32(adapter->ChipType));
return STATUS_SUCCESS;
}
//
// Unsupported card
//
NdisWriteErrorLogEntry(
adapter->NdisLegacyAdapterHandle,
NDIS_ERROR_CODE_ADAPTER_NOT_FOUND,
0);
return STATUS_NOT_FOUND;
}
static
void
RtInitializeEeprom(
_In_ RT_ADAPTER *adapter)
{
UINT16 eepromId, pciId;
if (!RtAdapterReadEepromId(adapter, &eepromId, &pciId))
{
adapter->EEPROMSupported = false;
}
else
{
TraceLoggingWrite(
RealtekTraceProvider,
"EepromId",
TraceLoggingLevel(TRACE_LEVEL_INFORMATION),
TraceLoggingUInt32(eepromId),
TraceLoggingUInt32(pciId));
adapter->EEPROMSupported = (eepromId == 0x8129 && pciId == 0x10ec);
}
if (!adapter->EEPROMSupported)
{
TraceLoggingWrite(
RealtekTraceProvider,
"UnsupportedEEPROM",
TraceLoggingLevel(TRACE_LEVEL_WARNING));
}
}
static
void
RtAdapterSetCurrentLinkState(
_In_ RT_ADAPTER *adapter)
{
// Gathers and indicates current link state to NDIS
//
// Normally need to take the adapter lock before updating the NIC's
// media state, but preparehardware already is serialized against all
// other callbacks to the NetAdapter.
NET_ADAPTER_LINK_STATE linkState;
RtAdapterQueryLinkState(adapter, &linkState);
NetAdapterSetCurrentLinkState(adapter->NetAdapter, &linkState);
}
NTSTATUS
RtInitializeHardware(
_In_ RT_ADAPTER *adapter,
_In_ WDFCMRESLIST resourcesRaw,
_In_ WDFCMRESLIST resourcesTranslated)
{
TraceEntryRtAdapter(adapter);
//
// Read the registry parameters
//
@ -162,47 +234,10 @@ RtInitializeHardware(
adapter->Interrupt->Isr[2].Address8 = &adapter->CSRAddress->ISR2;
adapter->Interrupt->Isr[3].Address8 = &adapter->CSRAddress->ISR3;
if (!RtAdapterQueryChipType(adapter, &adapter->ChipType))
{
//
// Unsupported card
//
NdisWriteErrorLogEntry(
adapter->NdisLegacyAdapterHandle,
NDIS_ERROR_CODE_ADAPTER_NOT_FOUND,
0);
GOTO_IF_NOT_NT_SUCCESS(Exit, status, STATUS_NOT_FOUND);
}
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
RtInitializeChipType(adapter));
TraceLoggingWrite(
RealtekTraceProvider,
"ChipType",
TraceLoggingUInt32(adapter->ChipType));
UINT16 eepromId, pciId;
if (!RtAdapterReadEepromId(adapter, &eepromId, &pciId))
{
adapter->EEPROMSupported = false;
}
else
{
TraceLoggingWrite(
RealtekTraceProvider,
"EepromId",
TraceLoggingLevel(TRACE_LEVEL_INFORMATION),
TraceLoggingUInt32(eepromId),
TraceLoggingUInt32(pciId));
adapter->EEPROMSupported = (eepromId == 0x8129 && pciId == 0x10ec);
}
if (!adapter->EEPROMSupported)
{
TraceLoggingWrite(
RealtekTraceProvider,
"UnsupportedEEPROM",
TraceLoggingLevel(TRACE_LEVEL_WARNING));
}
RtInitializeEeprom(adapter);
RtAdapterSetupHardware(adapter);
@ -237,16 +272,10 @@ RtInitializeHardware(
adapter->CSRAddress->CPCR = 0;
// Gathers and indicates current link state to NDIS
//
// Normally need to take the adapter lock before updating the NIC's
// media state, but preparehardware already is serialized against all
// other callbacks to the NetAdapter.
RtAdapterSetCurrentLinkState(adapter);
NET_ADAPTER_LINK_STATE linkState;
RtAdapterQueryLinkState(adapter, &linkState);
NetAdapterSetCurrentLinkState(adapter->NetAdapter, &linkState);
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
RtAdapterStart(adapter));
Exit:
TraceExitResult(status);
@ -257,6 +286,12 @@ void
RtReleaseHardware(
_In_ RT_ADAPTER *adapter)
{
if (adapter->HwTallyMemAlloc)
{
WdfObjectDelete(adapter->HwTallyMemAlloc);
adapter->HwTallyMemAlloc = WDF_NO_HANDLE;
}
if (adapter->CSRAddress)
{
MmUnmapIoSpace(

Просмотреть файл

@ -82,6 +82,8 @@ EvtDriverDeviceAdd(
TraceEntry();
NTSTATUS status = STATUS_SUCCESS;
PNETADAPTER_INIT adapterInit = nullptr;
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
NetAdapterDeviceInitConfig(deviceInit));
@ -115,26 +117,40 @@ EvtDriverDeviceAdd(
WdfDeviceSetAlignmentRequirement(wdfDevice, FILE_256_BYTE_ALIGNMENT);
// Default wake settings is good enough
WDF_DEVICE_POWER_POLICY_IDLE_SETTINGS idleSettings;
WDF_DEVICE_POWER_POLICY_IDLE_SETTINGS_INIT(&idleSettings, IdleCannotWakeFromS0);
idleSettings.UserControlOfIdleSettings = IdleAllowUserControl;
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
WdfDeviceAssignS0IdleSettings(wdfDevice, &idleSettings));
WDF_DEVICE_POWER_POLICY_WAKE_SETTINGS wakeSettings;
WDF_DEVICE_POWER_POLICY_WAKE_SETTINGS_INIT(&wakeSettings);
wakeSettings.UserControlOfWakeSettings = WakeAllowUserControl;
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
WdfDeviceAssignSxWakeSettings(wdfDevice, &wakeSettings));
NET_ADAPTER_CONFIG adapterConfig;
NET_ADAPTER_CONFIG_INIT(
&adapterConfig,
EvtAdapterSetCapabilities,
adapterInit = NetDefaultAdapterInitAllocate(wdfDevice);
GOTO_WITH_INSUFFICIENT_RESOURCES_IF_NULL(Exit, status, adapterInit);
NET_ADAPTER_DATAPATH_CALLBACKS datapathCallbacks;
NET_ADAPTER_DATAPATH_CALLBACKS_INIT(
&datapathCallbacks,
EvtAdapterCreateTxQueue,
EvtAdapterCreateRxQueue);
NetAdapterInitSetDatapathCallbacks(
adapterInit,
&datapathCallbacks);
WDF_OBJECT_ATTRIBUTES adapterAttributes;
WDF_OBJECT_ATTRIBUTES_INIT_CONTEXT_TYPE(&adapterAttributes, RT_ADAPTER);
NETADAPTER netAdapter;
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
NetAdapterCreate(wdfDevice, &adapterAttributes, &adapterConfig, &netAdapter));
NetAdapterCreate(adapterInit, &adapterAttributes, &netAdapter));
RT_ADAPTER *adapter = RtGetAdapterContext(netAdapter);
RT_DEVICE *device = RtGetDeviceContext(wdfDevice);
@ -151,6 +167,11 @@ EvtDriverDeviceAdd(
RtInterruptCreate(wdfDevice, adapter, &adapter->Interrupt));
Exit:
if (adapterInit != nullptr)
{
NetAdapterInitFree(adapterInit);
}
TraceExitResult(status);
return status;

Просмотреть файл

@ -41,12 +41,12 @@ GigaMacWrite(
_In_ UINT32 data
)
{
NT_ASSERT(! adapter->GigaMacInUse);
WdfSpinLockAcquire(adapter->Lock);
adapter->GigaMacInUse = true;
adapter->CSRAddress->ERIData = data;
adapter->CSRAddress->ERIAccess = access;
bool completed = false;
for (size_t count = 0; count < GIGAMAC_WAIT_COUNT; count++)
{
KeStallExecutionProcessor(GIGAMAC_WAIT_TIME);
@ -54,14 +54,15 @@ GigaMacWrite(
if (GIGAMAC_WRITE_DONE(adapter->CSRAddress->ERIAccess))
{
KeStallExecutionProcessor(GIGAMAC_WAIT_EXIT_TIME);
adapter->GigaMacInUse = false;
completed = true;
return true;
break;
}
}
adapter->GigaMacInUse = false;
return false;
WdfSpinLockRelease(adapter->Lock);
return completed;
}
static

Просмотреть файл

@ -19,117 +19,6 @@
#include "rxqueue.h"
#include "link.h"
_Requires_lock_held_(adapter->Lock)
void
RtAdapterSetOffloadParameters(
_In_ RT_ADAPTER *adapter,
_In_ NDIS_OFFLOAD_PARAMETERS *offloadParameters,
_Out_ NDIS_OFFLOAD *offloadConfiguration
)
{
switch (offloadParameters->IPv4Checksum)
{
case NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED:
adapter->IPChksumOffv4 = RtChecksumOffloadDisabled;
break;
case NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED:
adapter->IPChksumOffv4 = RtChecksumOffloadTxEnabled;
break;
case NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED:
adapter->IPChksumOffv4 = RtChecksumOffloadRxEnabled;
break;
case NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED:
adapter->IPChksumOffv4 = RtChecksumOffloadTxRxEnabled;
break;
}
switch (offloadParameters->TCPIPv4Checksum)
{
case NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED:
adapter->TCPChksumOffv4 = RtChecksumOffloadDisabled;
break;
case NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED:
adapter->TCPChksumOffv4 = RtChecksumOffloadTxEnabled;
break;
case NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED:
adapter->TCPChksumOffv4 = RtChecksumOffloadRxEnabled;
break;
case NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED:
adapter->TCPChksumOffv4 = RtChecksumOffloadTxRxEnabled;
break;
}
switch (offloadParameters->UDPIPv4Checksum)
{
case NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED:
adapter->UDPChksumOffv4 = RtChecksumOffloadDisabled;
break;
case NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED:
adapter->UDPChksumOffv4 = RtChecksumOffloadTxEnabled;
break;
case NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED:
adapter->UDPChksumOffv4 = RtChecksumOffloadRxEnabled;
break;
case NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED:
adapter->UDPChksumOffv4 = RtChecksumOffloadTxRxEnabled;
break;
}
switch (offloadParameters->TCPIPv6Checksum)
{
case NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED:
adapter->TCPChksumOffv6 = RtChecksumOffloadDisabled;
break;
case NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED:
adapter->TCPChksumOffv6 = RtChecksumOffloadTxEnabled;
break;
case NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED:
adapter->TCPChksumOffv6 = RtChecksumOffloadRxEnabled;
break;
case NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED:
adapter->TCPChksumOffv6 = RtChecksumOffloadTxRxEnabled;
break;
}
switch (offloadParameters->UDPIPv6Checksum)
{
case NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED:
adapter->UDPChksumOffv6 = RtChecksumOffloadDisabled;
break;
case NDIS_OFFLOAD_PARAMETERS_TX_ENABLED_RX_DISABLED:
adapter->UDPChksumOffv6 = RtChecksumOffloadTxEnabled;
break;
case NDIS_OFFLOAD_PARAMETERS_RX_ENABLED_TX_DISABLED:
adapter->UDPChksumOffv6 = RtChecksumOffloadRxEnabled;
break;
case NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED:
adapter->UDPChksumOffv6 = RtChecksumOffloadTxRxEnabled;
break;
}
switch (offloadParameters->LsoV2IPv4)
{
case NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED:
adapter->LSOv4 = RtLsoOffloadDisabled;
break;
case NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED:
adapter->LSOv4 = RtLsoOffloadEnabled;
break;
}
switch (offloadParameters->LsoV2IPv6)
{
case NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED:
adapter->LSOv6 = RtLsoOffloadDisabled;
break;
case NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED:
adapter->LSOv6 = RtLsoOffloadEnabled;
break;
}
RtAdapterUpdateEnabledChecksumOffloads(adapter);
RtAdapterQueryOffloadConfiguration(adapter, offloadConfiguration);
}
#define RTK_NIC_GBE_PCIE_ADAPTER_NAME "Realtek PCIe GBE Family Controller"
@ -428,51 +317,6 @@ Exit:
TraceExit();
}
void
EvtNetRequestSetTcpOffloadParameters(
_In_ NETREQUESTQUEUE RequestQueue,
_In_ NETREQUEST Request,
_In_reads_bytes_(InputBufferLength)
PVOID InputBuffer,
UINT InputBufferLength)
{
UNREFERENCED_PARAMETER(InputBufferLength);
NDIS_OID oid = NetRequestGetId(Request);
NETADAPTER netAdapter = NetRequestQueueGetAdapter(RequestQueue);
RT_ADAPTER *adapter = RtGetAdapterContext(netAdapter);
TraceEntryRtAdapter(adapter, TraceLoggingUInt32(oid));
NDIS_OFFLOAD offloadConfiguration;
WdfSpinLockAcquire(adapter->Lock); {
RtAdapterSetOffloadParameters(adapter, (NDIS_OFFLOAD_PARAMETERS*)InputBuffer, &offloadConfiguration);
} WdfSpinLockRelease(adapter->Lock);
{
NDIS_STATUS_INDICATION statusIndication;
RtlZeroMemory(&statusIndication, sizeof(NDIS_STATUS_INDICATION));
statusIndication.Header.Type = NDIS_OBJECT_TYPE_STATUS_INDICATION;
statusIndication.Header.Revision = NDIS_STATUS_INDICATION_REVISION_1;
statusIndication.Header.Size = sizeof(NDIS_STATUS_INDICATION);
statusIndication.SourceHandle = adapter->NdisLegacyAdapterHandle;
statusIndication.StatusCode = NDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG;
statusIndication.StatusBuffer = &offloadConfiguration;
statusIndication.StatusBufferSize = sizeof(offloadConfiguration);
NdisMIndicateStatusEx(adapter->NdisLegacyAdapterHandle, &statusIndication);
}
NetRequestSetDataComplete(Request, STATUS_SUCCESS, sizeof(NDIS_OFFLOAD_PARAMETERS));
TraceExit();
}
void
EvtNetRequestSetOffloadEncapsulation(
_In_ NETREQUESTQUEUE RequestQueue,
@ -609,7 +453,6 @@ const RT_OID_SET ComplexSets[] = {
{ OID_802_3_MULTICAST_LIST, EvtNetRequestSetMulticastList, 0 },
{ OID_GEN_CURRENT_PACKET_FILTER, EvtNetRequestSetPacketFilter, sizeof(ULONG) },
{ OID_GEN_CURRENT_LOOKAHEAD, EvtNetRequestSetCurrentLookahead, sizeof(ULONG) },
{ OID_TCP_OFFLOAD_PARAMETERS, EvtNetRequestSetTcpOffloadParameters, sizeof(NDIS_OFFLOAD_PARAMETERS) },
{ OID_OFFLOAD_ENCAPSULATION, EvtNetRequestSetOffloadEncapsulation, sizeof(NDIS_OFFLOAD_ENCAPSULATION) },
{ OID_GEN_INTERRUPT_MODERATION, EvtNetRequestSetInterruptModeration, NDIS_SIZEOF_INTERRUPT_MODERATION_PARAMETERS_REVISION_1 },
};

Просмотреть файл

@ -80,7 +80,7 @@ EvtDeviceD0Entry(
// Interrupts will be fully enabled in EvtInterruptEnable
RtInterruptInitialize(adapter->Interrupt);
RtAdapterUpdateEnabledChecksumOffloads(adapter);
RtAdapterUpdateHardwareChecksum(adapter);
RtAdapterUpdateInterruptModeration(adapter);
if (previousState != WdfPowerDeviceD3Final)

Просмотреть файл

@ -81,16 +81,16 @@
#pragma region Software Limits
#define RT_MAX_FRAGMENT_SIZE 0x1000
#define RT_MAX_FRAGMENT_SIZE 0x10000
// max number of physical fragments supported per TCB
#define RT_MAX_PHYS_BUF_COUNT 16
// multicast list size
#define RT_MAX_MCAST_LIST 32
#define RT_MIN_RX_DESC 18
#define RT_MAX_RX_DESC 1024
#define RT_MIN_RX_DESC 18
#define RT_MAX_RX_DESC 1024
#define RT_MIN_TCB 32
#define RT_MAX_TCB 128

Просмотреть файл

@ -72,7 +72,7 @@ RxFillRtl8111DChecksumInfo(
{
packet->Layout.Layer3Type = NET_PACKET_LAYER3_TYPE_IPV4_UNSPECIFIED_OPTIONS;
if (adapter->IpRxHwChkSumv4)
if (adapter->IpHwChkSum)
{
checksumInfo->Layer3 =
(rxd->RxDescDataIpv6Rss.status & RXS_IPF)
@ -98,8 +98,7 @@ RxFillRtl8111DChecksumInfo(
{
packet->Layout.Layer4Type = NET_PACKET_LAYER4_TYPE_TCP;
if ((isIpv4 && adapter->TcpRxHwChkSumv4) ||
(isIpv6 && adapter->TcpRxHwChkSumv6))
if (adapter->TcpHwChkSum)
{
checksumInfo->Layer4 =
(rxd->RxDescDataIpv6Rss.IpRssTava & RXS_IPV6RSS_TCPF)
@ -111,8 +110,7 @@ RxFillRtl8111DChecksumInfo(
{
packet->Layout.Layer4Type = NET_PACKET_LAYER4_TYPE_UDP;
if ((isIpv4 && adapter->UdpRxHwChkSumv4) ||
(isIpv6 && adapter->UdpRxHwChkSumv6))
if (adapter->UdpHwChkSum)
{
checksumInfo->Layer4 =
(rxd->RxDescDataIpv6Rss.IpRssTava & RXS_IPV6RSS_UDPF)
@ -151,7 +149,7 @@ RxFillRtl8111EChecksumInfo(
{
packet->Layout.Layer3Type = NET_PACKET_LAYER3_TYPE_IPV4_UNSPECIFIED_OPTIONS;
if (adapter->IpRxHwChkSumv4)
if (adapter->IpHwChkSum)
{
checksumInfo->Layer3 =
(rxd->RxDescDataIpv6Rss.status & RXS_IPF)
@ -177,8 +175,7 @@ RxFillRtl8111EChecksumInfo(
{
packet->Layout.Layer4Type = NET_PACKET_LAYER4_TYPE_TCP;
if ((isIpv4 && adapter->TcpRxHwChkSumv4) ||
(isIpv6 && adapter->TcpRxHwChkSumv6))
if (adapter->TcpHwChkSum)
{
checksumInfo->Layer4 =
(rxd->RxDescDataIpv6Rss.TcpUdpFailure & TXS_TCPCS)
@ -190,8 +187,7 @@ RxFillRtl8111EChecksumInfo(
{
packet->Layout.Layer4Type = NET_PACKET_LAYER4_TYPE_UDP;
if ((isIpv4 && adapter->UdpRxHwChkSumv4) ||
(isIpv6 && adapter->UdpRxHwChkSumv6))
if (adapter->UdpHwChkSum)
{
checksumInfo->Layer4 =
(rxd->RxDescDataIpv6Rss.TcpUdpFailure & TXS_UDPCS)
@ -233,7 +229,7 @@ RxIndicateReceives(
for (i = rb->BeginIndex; i != rb->NextIndex; i = NetRingBufferIncrementIndex(rb, i))
{
RT_RX_DESC *rxd = &rx->RxdBase[i];
RT_RX_DESC const *rxd = &rx->RxdBase[i];
NET_PACKET *packet = NetRingBufferGetPacketAtIndex(descriptor, i);
if (0 != (rxd->RxDescDataIpv6Rss.status & RXS_OWN))
@ -243,7 +239,7 @@ RxIndicateReceives(
fragment->ValidLength = rxd->RxDescDataIpv6Rss.length - FRAME_CRC_SIZE;
fragment->Offset = 0;
fragment->LastFragmentOfFrame = true;
NT_FRE_ASSERT(packet->FragmentCount == 1);
if (rx->ChecksumExtensionOffSet != NET_PACKET_EXTENSION_INVALID_OFFSET)
{
@ -296,7 +292,7 @@ RxPostBuffers(
NTSTATUS
RtRxQueueInitialize(
_In_ NETRXQUEUE rxQueue,
_In_ NETPACKETQUEUE rxQueue,
_In_ RT_ADAPTER *adapter
)
{
@ -309,18 +305,17 @@ RtRxQueueInitialize(
rx->DatapathDescriptor = NetRxQueueGetDatapathDescriptor(rxQueue);
// allocate descriptors
{
SIZE_T rxdSize = NET_DATAPATH_DESCRIPTOR_GET_PACKET_RING_BUFFER(rx->DatapathDescriptor)->NumberOfElements * sizeof(RT_RX_DESC);
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
WdfCommonBufferCreate(
rx->Adapter->DmaEnabler,
rxdSize,
WDF_NO_OBJECT_ATTRIBUTES,
&rx->RxdArray));
auto descriptor = NET_DATAPATH_DESCRIPTOR_GET_PACKET_RING_BUFFER(rx->DatapathDescriptor);
auto const rxdSize = descriptor->NumberOfElements * sizeof(RT_RX_DESC);
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
WdfCommonBufferCreate(
rx->Adapter->DmaEnabler,
rxdSize,
WDF_NO_OBJECT_ATTRIBUTES,
&rx->RxdArray));
rx->RxdBase = static_cast<RT_RX_DESC*>(WdfCommonBufferGetAlignedVirtualAddress(rx->RxdArray));
RtlZeroMemory(rx->RxdBase, rxdSize);
}
rx->RxdBase = static_cast<RT_RX_DESC*>(WdfCommonBufferGetAlignedVirtualAddress(rx->RxdArray));
rx->RxdSize = rxdSize;
Exit:
return status;
@ -354,42 +349,6 @@ RtAdapterUpdateRcr(
RtConvertPacketFilterToRcr(adapter->PacketFilter);
}
_Use_decl_annotations_
void
RtRxQueueStart(
_In_ RT_RXQUEUE *rx
)
{
RT_ADAPTER *adapter = rx->Adapter;
bool first = true;
for (size_t i = 0; i < ARRAYSIZE(adapter->RxQueues); i++)
{
if (adapter->RxQueues[i])
{
first = false;
}
}
PHYSICAL_ADDRESS pa = WdfCommonBufferGetAlignedLogicalAddress(rx->RxdArray);
if (rx->QueueId == 0)
{
adapter->CSRAddress->RDSARLow = pa.LowPart;
adapter->CSRAddress->RDSARHigh = pa.HighPart;
}
else
{
GigaMacSetReceiveDescriptorStartAddress(adapter, rx->QueueId, pa);
}
RtAdapterUpdateRcr(adapter);
if (first)
{
adapter->CSRAddress->CmdReg |= CR_RE;
}
}
void
RtRxQueueSetInterrupt(
_In_ RT_RXQUEUE *rx,
@ -408,15 +367,50 @@ RtRxQueueSetInterrupt(
_Use_decl_annotations_
void
EvtRxQueueDestroy(
_In_ WDFOBJECT rxQueue
EvtRxQueueStart(
NETPACKETQUEUE rxQueue
)
{
TraceEntry(TraceLoggingPointer(rxQueue, "RxQueue"));
RT_RXQUEUE *rx = RtGetRxQueueContext(rxQueue);
RT_ADAPTER *adapter = rx->Adapter;
RtlZeroMemory(rx->RxdBase, rx->RxdSize);
PHYSICAL_ADDRESS pa = WdfCommonBufferGetAlignedLogicalAddress(rx->RxdArray);
if (rx->QueueId == 0)
{
adapter->CSRAddress->RDSARLow = pa.LowPart;
adapter->CSRAddress->RDSARHigh = pa.HighPart;
}
else
{
GigaMacSetReceiveDescriptorStartAddress(adapter, rx->QueueId, pa);
}
WdfSpinLockAcquire(adapter->Lock);
if (! (adapter->CSRAddress->CmdReg & CR_RE))
{
adapter->CSRAddress->CmdReg |= CR_RE;
}
adapter->RxQueues[rx->QueueId] = rxQueue;
RtAdapterUpdateRcr(adapter);
WdfSpinLockRelease(adapter->Lock);
}
_Use_decl_annotations_
void
EvtRxQueueStop(
NETPACKETQUEUE rxQueue
)
{
RT_RXQUEUE *rx = RtGetRxQueueContext(rxQueue);
size_t count = 0;
WdfSpinLockAcquire(rx->Adapter->Lock);
bool count = 0;
for (size_t i = 0; i < ARRAYSIZE(rx->Adapter->RxQueues); i++)
{
if (rx->Adapter->RxQueues[i])
@ -425,18 +419,26 @@ EvtRxQueueDestroy(
}
}
WdfSpinLockAcquire(rx->Adapter->Lock); {
if (1 == count)
{
rx->Adapter->CSRAddress->CmdReg &= ~CR_RE;
}
if (count == 1)
{
rx->Adapter->CSRAddress->CmdReg &= ~CR_RE;
}
RtRxQueueSetInterrupt(rx, false);
rx->Adapter->RxQueues[rx->QueueId] = WDF_NO_HANDLE;
RtRxQueueSetInterrupt(rx, false);
WdfSpinLockRelease(rx->Adapter->Lock);
}
rx->Adapter->RxQueues[rx->QueueId] = WDF_NO_HANDLE;
_Use_decl_annotations_
void
EvtRxQueueDestroy(
_In_ WDFOBJECT rxQueue
)
{
TraceEntry(TraceLoggingPointer(rxQueue, "RxQueue"));
} WdfSpinLockRelease(rx->Adapter->Lock);
RT_RXQUEUE *rx = RtGetRxQueueContext(rxQueue);
WdfObjectDelete(rx->RxdArray);
rx->RxdArray = NULL;
@ -447,7 +449,7 @@ EvtRxQueueDestroy(
_Use_decl_annotations_
VOID
EvtRxQueueSetNotificationEnabled(
_In_ NETRXQUEUE rxQueue,
_In_ NETPACKETQUEUE rxQueue,
_In_ BOOLEAN notificationEnabled
)
{
@ -463,7 +465,7 @@ EvtRxQueueSetNotificationEnabled(
_Use_decl_annotations_
void
EvtRxQueueAdvance(
_In_ NETRXQUEUE rxQueue
_In_ NETPACKETQUEUE rxQueue
)
{
TraceEntry(TraceLoggingPointer(rxQueue, "RxQueue"));
@ -479,11 +481,26 @@ EvtRxQueueAdvance(
_Use_decl_annotations_
void
EvtRxQueueCancel(
_In_ NETRXQUEUE rxQueue
_In_ NETPACKETQUEUE rxQueue
)
{
TraceEntry(TraceLoggingPointer(rxQueue, "RxQueue"));
RT_RXQUEUE *rx = RtGetRxQueueContext(rxQueue);
RT_ADAPTER *adapter = rx->Adapter;
WdfSpinLockAcquire(rx->Adapter->Lock);
adapter->CSRAddress->RCR = TCR_RCR_MXDMA_UNLIMITED << RCR_MXDMA_OFFSET;
adapter->CSRAddress->CmdReg &= ~CR_RE;
WdfSpinLockRelease(rx->Adapter->Lock);
// try (but not very hard) to grab anything that may have been
// indicated during rx disable. advance will continue to be called
// after cancel until all packets are returned to the framework.
RxIndicateReceives(rx);
NetRingBufferReturnAllPackets(NetRxQueueGetDatapathDescriptor(rxQueue));
TraceExit();

Просмотреть файл

@ -20,6 +20,7 @@ struct RT_RXQUEUE
WDFCOMMONBUFFER RxdArray;
RT_RX_DESC *RxdBase;
size_t RxdSize;
size_t ChecksumExtensionOffSet;
@ -28,16 +29,15 @@ struct RT_RXQUEUE
WDF_DECLARE_CONTEXT_TYPE_WITH_NAME(RT_RXQUEUE, RtGetRxQueueContext);
NTSTATUS RtRxQueueInitialize(_In_ NETRXQUEUE rxQueue, _In_ RT_ADAPTER * adapter);
NTSTATUS RtRxQueueInitialize(_In_ NETPACKETQUEUE rxQueue, _In_ RT_ADAPTER * adapter);
_Requires_lock_held_(adapter->Lock)
void RtAdapterUpdateRcr(_In_ RT_ADAPTER *adapter);
_Requires_lock_held_(rx->Adapter->Lock)
void RtRxQueueStart(_In_ RT_RXQUEUE *rx);
EVT_WDF_OBJECT_CONTEXT_DESTROY EvtRxQueueDestroy;
EVT_RXQUEUE_SET_NOTIFICATION_ENABLED EvtRxQueueSetNotificationEnabled;
EVT_RXQUEUE_ADVANCE EvtRxQueueAdvance;
EVT_RXQUEUE_CANCEL EvtRxQueueCancel;
EVT_PACKET_QUEUE_SET_NOTIFICATION_ENABLED EvtRxQueueSetNotificationEnabled;
EVT_PACKET_QUEUE_ADVANCE EvtRxQueueAdvance;
EVT_PACKET_QUEUE_CANCEL EvtRxQueueCancel;
EVT_PACKET_QUEUE_START EvtRxQueueStart;
EVT_PACKET_QUEUE_STOP EvtRxQueueStop;

Просмотреть файл

@ -80,17 +80,15 @@ Return Value:
NTSTATUS status = STATUS_SUCCESS;
// Allocate memory for Tally counter
WDFCOMMONBUFFER HwTallyMemAlloc = WDF_NO_HANDLE;
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
WdfCommonBufferCreate(
adapter->DmaEnabler,
sizeof(RT_TALLY),
WDF_NO_OBJECT_ATTRIBUTES,
&HwTallyMemAlloc));
&adapter->HwTallyMemAlloc));
adapter->GTally = static_cast<RT_TALLY*>(WdfCommonBufferGetAlignedVirtualAddress(HwTallyMemAlloc));
adapter->TallyPhy = WdfCommonBufferGetAlignedLogicalAddress(HwTallyMemAlloc);
adapter->GTally = static_cast<RT_TALLY*>(WdfCommonBufferGetAlignedVirtualAddress(adapter->HwTallyMemAlloc));
adapter->TallyPhy = WdfCommonBufferGetAlignedLogicalAddress(adapter->HwTallyMemAlloc);
RtlZeroMemory(adapter->GTally, sizeof(*adapter->GTally));
@ -286,4 +284,4 @@ EvtNetRequestQueryIndividualStatistics(
TraceExit();
}
}

Просмотреть файл

@ -37,16 +37,12 @@ RtUpdateSendStats(
}
PUCHAR ethHeader = (PUCHAR)fragment->VirtualAddress + fragment->Offset;
UINT32 fragmentCount = NetPacketGetFragmentCount(descriptor, packet);
ULONG length = 0;
for (UINT32 i = 0; i < fragmentCount; i ++)
for (UINT32 i = 0; i < packet->FragmentCount; i ++)
{
fragment = NET_PACKET_GET_FRAGMENT(packet, descriptor, i);
length += (ULONG)fragment->ValidLength;
if (fragment->LastFragmentOfFrame)
break;
}
RT_ADAPTER *adapter = tx->Adapter;
@ -68,17 +64,6 @@ RtUpdateSendStats(
}
}
TX_DMA_BOUNCE_ANALYSIS
EvtSgBounceAnalysis(
_In_ NETTXQUEUE txQueue,
_In_ NET_PACKET *packet
)
{
UNREFERENCED_PARAMETER((txQueue, packet));
return TxDmaTransmitInPlace;
}
static
USHORT
RtGetPacketLsoStatusSetting(
@ -102,7 +87,7 @@ RtGetPacketLsoStatusSetting(
}
static
ULONG
UINT16
RtGetPacketLsoMss(
_In_ const NET_PACKET *packet,
_In_ size_t lsoOffset
@ -114,7 +99,7 @@ RtGetPacketLsoMss(
static
USHORT
RtGetPacketChecksumSetting(
_In_ NET_PACKET *packet,
_In_ NET_PACKET const * packet,
_In_ size_t checksumOffset
)
{
@ -178,96 +163,122 @@ RtGetPacketChecksumSetting(
return 0;
}
void
EvtSgProgramDescriptors(
_In_ NETTXQUEUE txQueue,
_In_ NET_PACKET *packet,
_In_ SCATTER_GATHER_LIST *sgl
static
UINT16
RtProgramOffloadDescriptor(
_In_ RT_TXQUEUE const * tx,
_In_ NET_PACKET const * packet,
_In_ RT_TX_DESC * txd
)
{
RT_TXQUEUE *tx = RtGetTxQueueContext(txQueue);
UINT16 status = 0;
txd->TxDescDataIpv6Rss_All.OffloadGsoMssTagc = 0;
RT_ADAPTER* adapter = tx->Adapter;
if (packet->Layout.Layer4Type == NET_PACKET_LAYER4_TYPE_TCP
&& RtGetPacketLsoMss(packet, tx->LsoExtensionOffset) > 0)
{
if ((tx->LsoExtensionOffset != NET_PACKET_EXTENSION_INVALID_OFFSET) &&
(adapter->LSOv4 == RtLsoOffloadEnabled || adapter->LSOv6 == RtLsoOffloadEnabled))
{
status |= RtGetPacketLsoStatusSetting(packet);
txd->TxDescDataIpv6Rss_All.OffloadGsoMssTagc =
RtGetPacketLsoMss(packet, tx->LsoExtensionOffset) << TXS_IPV6RSS_MSS_OFFSET;
}
}
else
{
if ((tx->ChecksumExtensionOffSet != NET_PACKET_EXTENSION_INVALID_OFFSET) &&
(adapter->TcpHwChkSum || adapter->IpHwChkSum || adapter->UdpHwChkSum))
{
txd->TxDescDataIpv6Rss_All.OffloadGsoMssTagc =
RtGetPacketChecksumSetting(packet, tx->ChecksumExtensionOffSet);
}
}
return status;
}
static
void
RtProgramDescriptors(
_In_ RT_TXQUEUE *tx,
_In_ NET_PACKET *packet
)
{
PCNET_DATAPATH_DESCRIPTOR descriptor = tx->DatapathDescriptor;
RtUpdateSendStats(tx, packet);
RT_TCB *tcb = GetTcbFromPacketFromToken(tx->DatapathDescriptor, packet, tx->TcbToken);
for (ULONG sgeIndex = 0; sgeIndex < sgl->NumberOfElements; sgeIndex++)
for (UINT32 i = 0; i < packet->FragmentCount; i++)
{
SCATTER_GATHER_ELEMENT *sge = &sgl->Elements[sgeIndex];
RT_TX_DESC *txd = &tx->TxdBase[tx->TxDescGetptr];
bool const lastFragment = i + 1 == packet->FragmentCount;
NET_PACKET_FRAGMENT *fragment = NET_PACKET_GET_FRAGMENT(packet, descriptor, i);
RT_TX_DESC *txd = &tx->TxdBase[tx->TxDescIndex];
USHORT status = TXS_OWN;
// Last TXD; next should wrap
if (tx->TxDescGetptr == tx->NumTxDesc - 1)
if (tx->TxDescIndex == tx->NumTxDesc - 1)
{
status |= TXS_EOR;
}
// First fragment of packet
if (sgeIndex == 0)
if (i == 0)
{
status |= TXS_FS;
// Store the hardware descriptor of the first
// scatter/gather element
tcb->FirstTxDescIdx = tx->TxDescGetptr;
// Store the hardware descriptor of the first fragment
tcb->FirstTxDescIdx = tx->TxDescIndex;
tcb->NumTxDesc = 0;
}
// Last fragment of packet
if (sgeIndex + 1 == sgl->NumberOfElements)
if (lastFragment)
{
status |= TXS_LS;
}
// TODO: vlan
txd->BufferAddress = sge->Address;
txd->TxDescDataIpv6Rss_All.length = (USHORT)sge->Length;
txd->BufferAddress.QuadPart = fragment->Mapping.DmaLogicalAddress.QuadPart + fragment->Offset;
txd->TxDescDataIpv6Rss_All.length = (USHORT)fragment->ValidLength;
txd->TxDescDataIpv6Rss_All.VLAN_TAG.Value = 0;
if ((packet->Layout.Layer4Type == NET_PACKET_LAYER4_TYPE_TCP) &&
(RtGetPacketLsoMss(packet, tx->LsoExtensionOffset) > 0))
{
if (tx->LsoExtensionOffset != NET_PACKET_EXTENSION_INVALID_OFFSET)
{
status |= RtGetPacketLsoStatusSetting(packet);
txd->TxDescDataIpv6Rss_All.OffloadGsoMssTagc = (USHORT)(RtGetPacketLsoMss(packet, tx->LsoExtensionOffset) << TXS_IPV6RSS_MSS_OFFSET);
}
}
else
{
if (tx->ChecksumExtensionOffSet != NET_PACKET_EXTENSION_INVALID_OFFSET)
{
txd->TxDescDataIpv6Rss_All.OffloadGsoMssTagc = RtGetPacketChecksumSetting(packet, tx->ChecksumExtensionOffSet);
}
}
status |= RtProgramOffloadDescriptor(tx, packet, txd);
MemoryBarrier();
txd->TxDescDataIpv6Rss_All.status = status;
tx->TxDescGetptr = (tx->TxDescGetptr + 1) % tx->NumTxDesc;
}
tx->TxDescIndex = (tx->TxDescIndex + 1) % tx->NumTxDesc;
tcb->NumTxDesc = sgl->NumberOfElements;
if (lastFragment)
{
tcb->NumTxDesc = i + 1;
break;
}
}
}
static
void
EvtSgFlushTransation(
_In_ NETTXQUEUE txQueue
RtFlushTransation(
_In_ RT_TXQUEUE *tx
)
{
auto tx = RtGetTxQueueContext(txQueue);
MemoryBarrier();
*tx->TPPoll = TPPoll_NPQ;
}
NTSTATUS
EvtSgGetPacketStatus(
_In_ NETTXQUEUE txQueue,
static
bool
RtIsPacketTransferComplete(
_In_ RT_TXQUEUE *tx,
_In_ NET_PACKET *packet
)
{
RT_TXQUEUE *tx = RtGetTxQueueContext(txQueue);
RT_TCB *tcb = GetTcbFromPacketFromToken(tx->DatapathDescriptor, packet, tx->TcbToken);
RT_TX_DESC *txd = &tx->TxdBase[tcb->FirstTxDescIdx];
@ -275,7 +286,7 @@ EvtSgGetPacketStatus(
// If the hardware-ownership flag is still set, then the packet isn't done.
if (0 != (txd->TxDescDataIpv6Rss_All.status & TXS_OWN))
{
return STATUS_PENDING;
return false;
}
else
{
@ -287,12 +298,84 @@ EvtSgGetPacketStatus(
}
}
return STATUS_SUCCESS;
return true;
}
static
void
RtTransmitPackets(
_In_ RT_TXQUEUE *tx
)
{
PCNET_DATAPATH_DESCRIPTOR descriptor = tx->DatapathDescriptor;
NET_RING_BUFFER *ringBuffer = NET_DATAPATH_DESCRIPTOR_GET_PACKET_RING_BUFFER(descriptor);
size_t programmedPackets = 0;
while (ringBuffer->NextIndex != ringBuffer->EndIndex)
{
NET_PACKET *netPacket = NetRingBufferGetNextPacket(descriptor);
if (!netPacket->IgnoreThisPacket)
{
RtProgramDescriptors(tx, netPacket);
programmedPackets++;
}
ringBuffer->NextIndex = NetRingBufferIncrementIndex(ringBuffer, ringBuffer->NextIndex);
}
if (programmedPackets > 0)
{
RtFlushTransation(tx);
}
}
static
void
RtCompleteTransmitPackets(
_In_ RT_TXQUEUE *tx
)
{
PCNET_DATAPATH_DESCRIPTOR descriptor = tx->DatapathDescriptor;
NET_RING_BUFFER *ringBuffer = NET_DATAPATH_DESCRIPTOR_GET_PACKET_RING_BUFFER(descriptor);
while (ringBuffer->BeginIndex != ringBuffer->NextIndex)
{
NET_PACKET *packet = NetRingBufferGetPacketAtIndex(descriptor, ringBuffer->BeginIndex);
if (!packet->IgnoreThisPacket)
{
if (!RtIsPacketTransferComplete(tx, packet))
{
// We need to complete packets in order, if the current is still
// pending there is no point in keep trying
break;
}
}
ringBuffer->BeginIndex = NetRingBufferIncrementIndex(ringBuffer, ringBuffer->BeginIndex);
}
}
_Use_decl_annotations_
void
EvtTxQueueAdvance(
_In_ NETPACKETQUEUE txQueue
)
{
TraceEntry(TraceLoggingPointer(txQueue, "TxQueue"));
RT_TXQUEUE *tx = RtGetTxQueueContext(txQueue);
RtTransmitPackets(tx);
RtCompleteTransmitPackets(tx);
TraceExit();
}
NTSTATUS
RtTxQueueInitialize(
_In_ NETTXQUEUE txQueue,
_In_ NETPACKETQUEUE txQueue,
_In_ RT_ADAPTER * adapter
)
{
@ -301,7 +384,7 @@ RtTxQueueInitialize(
tx->Adapter = adapter;
tx->TcbToken = NET_TXQUEUE_GET_PACKET_CONTEXT_TOKEN(txQueue, RT_TCB);
tx->TcbToken = NetTxQueueGetPacketContextToken(txQueue, WDF_GET_CONTEXT_TYPE_INFO(RT_TCB));
tx->TPPoll = &adapter->CSRAddress->TPPoll;
tx->Interrupt = adapter->Interrupt;
@ -309,54 +392,25 @@ RtTxQueueInitialize(
tx->DatapathDescriptor = NetTxQueueGetDatapathDescriptor(txQueue);
// Allocate descriptors
{
ULONG allocSize;
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
RtlULongMult(tx->NumTxDesc, sizeof(RT_TX_DESC), &allocSize));
ULONG txSize;
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
RtlULongMult(tx->NumTxDesc, sizeof(RT_TX_DESC), &txSize));
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
WdfCommonBufferCreate(
tx->Adapter->DmaEnabler,
allocSize,
WDF_NO_OBJECT_ATTRIBUTES,
&tx->TxdArray));
GOTO_IF_NOT_NT_SUCCESS(Exit, status,
WdfCommonBufferCreate(
tx->Adapter->DmaEnabler,
txSize,
WDF_NO_OBJECT_ATTRIBUTES,
&tx->TxdArray));
tx->TxdBase = static_cast<RT_TX_DESC*>(
WdfCommonBufferGetAlignedVirtualAddress(tx->TxdArray));
tx->TxDescGetptr = 0;
RtlZeroMemory(tx->TxdBase, allocSize);
}
tx->TxdBase = static_cast<RT_TX_DESC*>(
WdfCommonBufferGetAlignedVirtualAddress(tx->TxdArray));
tx->TxSize = txSize;
Exit:
return status;
}
_Use_decl_annotations_
void RtTxQueueStart(
_In_ RT_TXQUEUE *tx
)
{
RT_ADAPTER *adapter = tx->Adapter;
adapter->CSRAddress->TDFNR = 8;
// Max transmit packet size
adapter->CSRAddress->MtpsReg.MTPS = (RT_MAX_FRAME_SIZE + 128 - 1) / 128;
PHYSICAL_ADDRESS pa = WdfCommonBufferGetAlignedLogicalAddress(tx->TxdArray);
// let hardware know where transmit descriptors are at
adapter->CSRAddress->TNPDSLow = pa.LowPart;
adapter->CSRAddress->TNPDSHigh = pa.HighPart;
adapter->CSRAddress->CmdReg |= CR_TE;
// data sheet says TCR should only be modified after the transceiver is enabled
adapter->CSRAddress->TCR = (TCR_RCR_MXDMA_UNLIMITED << TCR_MXDMA_OFFSET) | (TCR_IFG0 | TCR_IFG1 | TCR_BIT0);
}
void
RtTxQueueSetInterrupt(
_In_ RT_TXQUEUE *tx,
@ -373,6 +427,59 @@ RtTxQueueSetInterrupt(
KeFlushQueuedDpcs();
}
_Use_decl_annotations_
void
EvtTxQueueStart(
_In_ NETPACKETQUEUE txQueue
)
{
RT_TXQUEUE *tx = RtGetTxQueueContext(txQueue);
RT_ADAPTER *adapter = tx->Adapter;
RtlZeroMemory(tx->TxdBase, tx->TxSize);
tx->TxDescIndex = 0;
WdfSpinLockAcquire(adapter->Lock);
adapter->CSRAddress->TDFNR = 8;
// Max transmit packet size
adapter->CSRAddress->MtpsReg.MTPS = (RT_MAX_FRAME_SIZE + 128 - 1) / 128;
PHYSICAL_ADDRESS pa = WdfCommonBufferGetAlignedLogicalAddress(tx->TxdArray);
// let hardware know where transmit descriptors are at
adapter->CSRAddress->TNPDSLow = pa.LowPart;
adapter->CSRAddress->TNPDSHigh = pa.HighPart;
adapter->CSRAddress->CmdReg |= CR_TE;
// data sheet says TCR should only be modified after the transceiver is enabled
adapter->CSRAddress->TCR = (TCR_RCR_MXDMA_UNLIMITED << TCR_MXDMA_OFFSET) | (TCR_IFG0 | TCR_IFG1 | TCR_BIT0);
adapter->TxQueue = txQueue;
WdfSpinLockRelease(adapter->Lock);
}
_Use_decl_annotations_
void
EvtTxQueueStop(
NETPACKETQUEUE txQueue
)
{
RT_TXQUEUE *tx = RtGetTxQueueContext(txQueue);
WdfSpinLockAcquire(tx->Adapter->Lock);
tx->Adapter->CSRAddress->CmdReg &= ~CR_TE;
RtTxQueueSetInterrupt(tx, false);
tx->Adapter->TxQueue = WDF_NO_HANDLE;
WdfSpinLockRelease(tx->Adapter->Lock);
}
_Use_decl_annotations_
void
EvtTxQueueDestroy(
@ -381,16 +488,6 @@ EvtTxQueueDestroy(
{
RT_TXQUEUE *tx = RtGetTxQueueContext(txQueue);
WdfSpinLockAcquire(tx->Adapter->Lock); {
tx->Adapter->CSRAddress->CmdReg &= ~CR_TE;
RtTxQueueSetInterrupt(tx, false);
tx->Adapter->TxQueue = WDF_NO_HANDLE;
} WdfSpinLockRelease(tx->Adapter->Lock);
WdfObjectDelete(tx->TxdArray);
tx->TxdArray = NULL;
}
@ -398,7 +495,7 @@ EvtTxQueueDestroy(
_Use_decl_annotations_
VOID
EvtTxQueueSetNotificationEnabled(
_In_ NETTXQUEUE txQueue,
_In_ NETPACKETQUEUE txQueue,
_In_ BOOLEAN notificationEnabled
)
{
@ -414,7 +511,7 @@ EvtTxQueueSetNotificationEnabled(
_Use_decl_annotations_
void
EvtTxQueueCancel(
_In_ NETTXQUEUE txQueue
_In_ NETPACKETQUEUE txQueue
)
{
TraceEntry(TraceLoggingPointer(txQueue, "TxQueue"));

Просмотреть файл

@ -11,22 +11,6 @@
#pragma once
#define TX_DMA_FX_ALLOC_TAG 'xTtR'
#include "txdmafxtypes.h"
EVT_TX_DMA_QUEUE_PROGRAM_DESCRIPTORS EvtSgProgramDescriptors;
EVT_TX_DMA_QUEUE_FLUSH_TRANSACTION EvtSgFlushTransation;
EVT_TX_DMA_QUEUE_GET_PACKET_STATUS EvtSgGetPacketStatus;
EVT_TX_DMA_QUEUE_BOUNCE_ANALYSIS EvtSgBounceAnalysis;
#define TX_DMA_FX_PROGRAM_DESCRIPTORS EvtSgProgramDescriptors
#define TX_DMA_FX_GET_PACKET_STATUS EvtSgGetPacketStatus
#define TX_DMA_FX_FLUSH_TRANSACTION EvtSgFlushTransation
#define TX_DMA_FX_BOUNCE_ANALYSIS EvtSgBounceAnalysis
#include "txdmafx.h"
typedef struct _RT_TXQUEUE
{
RT_ADAPTER *Adapter;
@ -38,9 +22,10 @@ typedef struct _RT_TXQUEUE
// descriptor information
WDFCOMMONBUFFER TxdArray;
RT_TX_DESC *TxdBase;
size_t TxSize;
USHORT NumTxDesc;
USHORT TxDescGetptr;
USHORT TxDescIndex;
UCHAR volatile *TPPoll;
@ -63,11 +48,15 @@ typedef struct _RT_TCB
NET_PACKET_DECLARE_CONTEXT_TYPE_WITH_NAME(RT_TCB, GetTcbFromPacket);
NTSTATUS RtTxQueueInitialize(_In_ NETTXQUEUE txQueue, _In_ RT_ADAPTER *adapter);
NTSTATUS RtTxQueueInitialize(_In_ NETPACKETQUEUE txQueue, _In_ RT_ADAPTER *adapter);
_Requires_lock_held_(tx->Adapter->Lock)
void RtTxQueueStart(_In_ RT_TXQUEUE *tx);
EVT_WDF_OBJECT_CONTEXT_DESTROY EvtTxQueueDestroy;
EVT_TXQUEUE_SET_NOTIFICATION_ENABLED EvtTxQueueSetNotificationEnabled;
EVT_TXQUEUE_CANCEL EvtTxQueueCancel;
EVT_PACKET_QUEUE_SET_NOTIFICATION_ENABLED EvtTxQueueSetNotificationEnabled;
EVT_PACKET_QUEUE_ADVANCE EvtTxQueueAdvance;
EVT_PACKET_QUEUE_CANCEL EvtTxQueueCancel;
EVT_PACKET_QUEUE_START EvtTxQueueStart;
EVT_PACKET_QUEUE_STOP EvtTxQueueStop;