net: ep93xx_eth: allocate buffers using kmalloc()
We can use simply kmalloc() to allocate the buffers. This also simplifies the code and allows us to perform DMA sync operations more easily. Memory is allocated with only GFP_KERNEL since there are no DMA allocation restrictions on this platform. Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi> Acked-by: Russell King <rmk+kernel@arm.linux.org.uk> Acked-by: H Hartley Sweeten <hsweeten@visionengravers.com> Tested-by: Petr Stetiar <ynezz@true.cz> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
fc9b4910b0
Коммит
3247a1fcee
|
@ -460,36 +460,32 @@ static void ep93xx_free_buffers(struct ep93xx_priv *ep)
|
||||||
struct device *dev = ep->dev->dev.parent;
|
struct device *dev = ep->dev->dev.parent;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
|
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
|
||||||
dma_addr_t d;
|
dma_addr_t d;
|
||||||
|
|
||||||
d = ep->descs->rdesc[i].buf_addr;
|
d = ep->descs->rdesc[i].buf_addr;
|
||||||
if (d)
|
if (d)
|
||||||
dma_unmap_single(dev, d, PAGE_SIZE, DMA_FROM_DEVICE);
|
dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
|
||||||
|
|
||||||
if (ep->rx_buf[i] != NULL)
|
if (ep->rx_buf[i] != NULL)
|
||||||
free_page((unsigned long)ep->rx_buf[i]);
|
kfree(ep->rx_buf[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) {
|
for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
|
||||||
dma_addr_t d;
|
dma_addr_t d;
|
||||||
|
|
||||||
d = ep->descs->tdesc[i].buf_addr;
|
d = ep->descs->tdesc[i].buf_addr;
|
||||||
if (d)
|
if (d)
|
||||||
dma_unmap_single(dev, d, PAGE_SIZE, DMA_TO_DEVICE);
|
dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (ep->tx_buf[i] != NULL)
|
if (ep->tx_buf[i] != NULL)
|
||||||
free_page((unsigned long)ep->tx_buf[i]);
|
kfree(ep->tx_buf[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
|
dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
|
||||||
ep->descs_dma_addr);
|
ep->descs_dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* The hardware enforces a sub-2K maximum packet size, so we put
|
|
||||||
* two buffers on every hardware page.
|
|
||||||
*/
|
|
||||||
static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
|
static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
|
||||||
{
|
{
|
||||||
struct device *dev = ep->dev->dev.parent;
|
struct device *dev = ep->dev->dev.parent;
|
||||||
|
@ -500,48 +496,41 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
|
||||||
if (ep->descs == NULL)
|
if (ep->descs == NULL)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) {
|
for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
|
||||||
void *page;
|
void *buf;
|
||||||
dma_addr_t d;
|
dma_addr_t d;
|
||||||
|
|
||||||
page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
|
buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
|
||||||
if (page == NULL)
|
if (buf == NULL)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
d = dma_map_single(dev, page, PAGE_SIZE, DMA_FROM_DEVICE);
|
d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
|
||||||
if (dma_mapping_error(dev, d)) {
|
if (dma_mapping_error(dev, d)) {
|
||||||
free_page((unsigned long)page);
|
kfree(buf);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ep->rx_buf[i] = page;
|
ep->rx_buf[i] = buf;
|
||||||
ep->descs->rdesc[i].buf_addr = d;
|
ep->descs->rdesc[i].buf_addr = d;
|
||||||
ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
|
ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
|
||||||
|
|
||||||
ep->rx_buf[i + 1] = page + PKT_BUF_SIZE;
|
|
||||||
ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
|
|
||||||
ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) {
|
for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
|
||||||
void *page;
|
void *buf;
|
||||||
dma_addr_t d;
|
dma_addr_t d;
|
||||||
|
|
||||||
page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
|
buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
|
||||||
if (page == NULL)
|
if (buf == NULL)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
d = dma_map_single(dev, page, PAGE_SIZE, DMA_TO_DEVICE);
|
d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(dev, d)) {
|
if (dma_mapping_error(dev, d)) {
|
||||||
free_page((unsigned long)page);
|
kfree(buf);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
ep->tx_buf[i] = page;
|
ep->tx_buf[i] = buf;
|
||||||
ep->descs->tdesc[i].buf_addr = d;
|
ep->descs->tdesc[i].buf_addr = d;
|
||||||
|
|
||||||
ep->tx_buf[i + 1] = page + PKT_BUF_SIZE;
|
|
||||||
ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче