2010-09-03 22:29:53 +04:00
|
|
|
/*
|
|
|
|
* IBM Power Virtual Ethernet Device Driver
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
|
|
*
|
|
|
|
* Copyright (C) IBM Corporation, 2003, 2010
|
|
|
|
*
|
|
|
|
* Authors: Dave Larson <larson1@us.ibm.com>
|
|
|
|
* Santiago Leon <santil@linux.vnet.ibm.com>
|
|
|
|
* Brian King <brking@linux.vnet.ibm.com>
|
|
|
|
* Robert Jennings <rcj@linux.vnet.ibm.com>
|
|
|
|
* Anton Blanchard <anton@au.ibm.com>
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#ifndef _IBMVETH_H
|
|
|
|
#define _IBMVETH_H
|
|
|
|
|
|
|
|
/* constants for H_MULTICAST_CTRL */
|
|
|
|
#define IbmVethMcastReceptionModifyBit 0x80000UL
|
|
|
|
#define IbmVethMcastReceptionEnableBit 0x20000UL
|
|
|
|
#define IbmVethMcastFilterModifyBit 0x40000UL
|
|
|
|
#define IbmVethMcastFilterEnableBit 0x10000UL
|
|
|
|
|
|
|
|
#define IbmVethMcastEnableRecv (IbmVethMcastReceptionModifyBit | IbmVethMcastReceptionEnableBit)
|
|
|
|
#define IbmVethMcastDisableRecv (IbmVethMcastReceptionModifyBit)
|
|
|
|
#define IbmVethMcastEnableFiltering (IbmVethMcastFilterModifyBit | IbmVethMcastFilterEnableBit)
|
|
|
|
#define IbmVethMcastDisableFiltering (IbmVethMcastFilterModifyBit)
|
|
|
|
#define IbmVethMcastAddFilter 0x1UL
|
|
|
|
#define IbmVethMcastRemoveFilter 0x2UL
|
|
|
|
#define IbmVethMcastClearFilterTable 0x3UL
|
|
|
|
|
2009-01-06 21:47:44 +03:00
|
|
|
#define IBMVETH_ILLAN_PADDED_PKT_CSUM 0x0000000000002000UL
|
|
|
|
#define IBMVETH_ILLAN_TRUNK_PRI_MASK 0x0000000000000F00UL
|
|
|
|
#define IBMVETH_ILLAN_IPV6_TCP_CSUM 0x0000000000000004UL
|
|
|
|
#define IBMVETH_ILLAN_IPV4_TCP_CSUM 0x0000000000000002UL
|
|
|
|
#define IBMVETH_ILLAN_ACTIVE_TRUNK 0x0000000000000001UL
|
2007-08-17 18:16:56 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* hcall macros */
|
|
|
|
#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
|
|
|
|
plpar_hcall_norets(H_REGISTER_LOGICAL_LAN, ua, buflst, rxq, fltlst, mac)
|
|
|
|
|
|
|
|
#define h_free_logical_lan(ua) \
|
|
|
|
plpar_hcall_norets(H_FREE_LOGICAL_LAN, ua)
|
|
|
|
|
|
|
|
#define h_add_logical_lan_buffer(ua, buf) \
|
|
|
|
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
|
|
|
|
|
2006-07-19 02:01:28 +04:00
|
|
|
static inline long h_send_logical_lan(unsigned long unit_address,
|
|
|
|
unsigned long desc1, unsigned long desc2, unsigned long desc3,
|
|
|
|
unsigned long desc4, unsigned long desc5, unsigned long desc6,
|
|
|
|
unsigned long corellator_in, unsigned long *corellator_out)
|
|
|
|
{
|
|
|
|
long rc;
|
|
|
|
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
|
|
|
|
|
|
|
|
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
|
|
|
|
desc2, desc3, desc4, desc5, desc6, corellator_in);
|
|
|
|
|
|
|
|
*corellator_out = retbuf[0];
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-09-16 00:36:07 +04:00
|
|
|
static inline long h_illan_attributes(unsigned long unit_address,
|
|
|
|
unsigned long reset_mask, unsigned long set_mask,
|
|
|
|
unsigned long *ret_attributes)
|
|
|
|
{
|
|
|
|
long rc;
|
|
|
|
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
|
|
|
|
|
|
|
rc = plpar_hcall(H_ILLAN_ATTRIBUTES, retbuf, unit_address,
|
|
|
|
reset_mask, set_mask);
|
|
|
|
|
|
|
|
*ret_attributes = retbuf[0];
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#define h_multicast_ctrl(ua, cmd, mac) \
|
|
|
|
plpar_hcall_norets(H_MULTICAST_CTRL, ua, cmd, mac)
|
|
|
|
|
|
|
|
#define h_change_logical_lan_mac(ua, mac) \
|
|
|
|
plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
|
|
|
|
|
2010-09-03 22:29:25 +04:00
|
|
|
#define IBMVETH_NUM_BUFF_POOLS 5
|
2008-07-23 22:34:52 +04:00
|
|
|
#define IBMVETH_IO_ENTITLEMENT_DEFAULT 4243456 /* MTU of 1500 needs 4.2Mb */
|
2005-10-26 20:47:01 +04:00
|
|
|
#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
|
2010-09-03 22:29:25 +04:00
|
|
|
#define IBMVETH_MIN_MTU 68
|
[PATCH] ibmveth change buffer pools dynamically
This patch provides a sysfs interface to change some properties of the
ibmveth buffer pools (size of the buffers, number of buffers per pool,
and whether a pool is active). Ethernet drivers use ethtool to provide
this type of functionality. However, the buffers in the ibmveth driver
can have an arbitrary size (not only regular, mini, and jumbo which are
the only sizes that ethtool can change), and also ibmveth can have an
arbitrary number of buffer pools
Under heavy load we have seen dropped packets which obviously kills TCP
performance. We have created several fixes that mitigate this issue,
but we definitely need a way of changing the number of buffers for an
adapter dynamically. Also, changing the size of the buffers allows
users to change the MTU to something big (bigger than a jumbo frame)
greatly improving performance on partition to partition transfers.
The patch creates directories pool1...pool4 in the device directory in
sysfs, each with files: num, size, and active (which default to the
values in the mainline version).
Comments and suggestions are welcome...
--
Santiago A. Leon
Power Linux Development
IBM Linux Technology Center
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2006-04-25 20:19:59 +04:00
|
|
|
#define IBMVETH_MAX_POOL_COUNT 4096
|
2008-07-23 22:34:52 +04:00
|
|
|
#define IBMVETH_BUFF_LIST_SIZE 4096
|
|
|
|
#define IBMVETH_FILT_LIST_SIZE 4096
|
[PATCH] ibmveth change buffer pools dynamically
This patch provides a sysfs interface to change some properties of the
ibmveth buffer pools (size of the buffers, number of buffers per pool,
and whether a pool is active). Ethernet drivers use ethtool to provide
this type of functionality. However, the buffers in the ibmveth driver
can have an arbitrary size (not only regular, mini, and jumbo which are
the only sizes that ethtool can change), and also ibmveth can have an
arbitrary number of buffer pools
Under heavy load we have seen dropped packets which obviously kills TCP
performance. We have created several fixes that mitigate this issue,
but we definitely need a way of changing the number of buffers for an
adapter dynamically. Also, changing the size of the buffers allows
users to change the MTU to something big (bigger than a jumbo frame)
greatly improving performance on partition to partition transfers.
The patch creates directories pool1...pool4 in the device directory in
sysfs, each with files: num, size, and active (which default to the
values in the mainline version).
Comments and suggestions are welcome...
--
Santiago A. Leon
Power Linux Development
IBM Linux Technology Center
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2006-04-25 20:19:59 +04:00
|
|
|
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
|
2005-10-26 20:47:01 +04:00
|
|
|
|
|
|
|
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
|
2010-09-03 22:28:09 +04:00
|
|
|
static int pool_count[] = { 256, 512, 256, 256, 256 };
|
[PATCH] ibmveth change buffer pools dynamically
This patch provides a sysfs interface to change some properties of the
ibmveth buffer pools (size of the buffers, number of buffers per pool,
and whether a pool is active). Ethernet drivers use ethtool to provide
this type of functionality. However, the buffers in the ibmveth driver
can have an arbitrary size (not only regular, mini, and jumbo which are
the only sizes that ethtool can change), and also ibmveth can have an
arbitrary number of buffer pools
Under heavy load we have seen dropped packets which obviously kills TCP
performance. We have created several fixes that mitigate this issue,
but we definitely need a way of changing the number of buffers for an
adapter dynamically. Also, changing the size of the buffers allows
users to change the MTU to something big (bigger than a jumbo frame)
greatly improving performance on partition to partition transfers.
The patch creates directories pool1...pool4 in the device directory in
sysfs, each with files: num, size, and active (which default to the
values in the mainline version).
Comments and suggestions are welcome...
--
Santiago A. Leon
Power Linux Development
IBM Linux Technology Center
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2006-04-25 20:19:59 +04:00
|
|
|
static int pool_active[] = { 1, 1, 0, 0, 0};
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#define IBM_VETH_INVALID_MAP ((u16)0xffff)
|
|
|
|
|
|
|
|
struct ibmveth_buff_pool {
|
|
|
|
u32 size;
|
|
|
|
u32 index;
|
|
|
|
u32 buff_size;
|
|
|
|
u32 threshold;
|
|
|
|
atomic_t available;
|
|
|
|
u32 consumer_index;
|
|
|
|
u32 producer_index;
|
|
|
|
u16 *free_map;
|
|
|
|
dma_addr_t *dma_addr;
|
|
|
|
struct sk_buff **skbuff;
|
2005-10-26 20:47:01 +04:00
|
|
|
int active;
|
[PATCH] ibmveth change buffer pools dynamically
This patch provides a sysfs interface to change some properties of the
ibmveth buffer pools (size of the buffers, number of buffers per pool,
and whether a pool is active). Ethernet drivers use ethtool to provide
this type of functionality. However, the buffers in the ibmveth driver
can have an arbitrary size (not only regular, mini, and jumbo which are
the only sizes that ethtool can change), and also ibmveth can have an
arbitrary number of buffer pools
Under heavy load we have seen dropped packets which obviously kills TCP
performance. We have created several fixes that mitigate this issue,
but we definitely need a way of changing the number of buffers for an
adapter dynamically. Also, changing the size of the buffers allows
users to change the MTU to something big (bigger than a jumbo frame)
greatly improving performance on partition to partition transfers.
The patch creates directories pool1...pool4 in the device directory in
sysfs, each with files: num, size, and active (which default to the
values in the mainline version).
Comments and suggestions are welcome...
--
Santiago A. Leon
Power Linux Development
IBM Linux Technology Center
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2006-04-25 20:19:59 +04:00
|
|
|
struct kobject kobj;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ibmveth_rx_q {
|
|
|
|
u64 index;
|
|
|
|
u64 num_slots;
|
|
|
|
u64 toggle;
|
|
|
|
dma_addr_t queue_dma;
|
|
|
|
u32 queue_len;
|
|
|
|
struct ibmveth_rx_q_entry *queue_addr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ibmveth_adapter {
|
|
|
|
struct vio_dev *vdev;
|
|
|
|
struct net_device *netdev;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 03:41:36 +04:00
|
|
|
struct napi_struct napi;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct net_device_stats stats;
|
|
|
|
unsigned int mcastFilterSize;
|
|
|
|
unsigned long mac_addr;
|
|
|
|
void * buffer_list_addr;
|
|
|
|
void * filter_list_addr;
|
|
|
|
dma_addr_t buffer_list_dma;
|
|
|
|
dma_addr_t filter_list_dma;
|
2010-09-03 22:29:25 +04:00
|
|
|
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ibmveth_rx_q rx_queue;
|
[PATCH] ibmveth change buffer pools dynamically
This patch provides a sysfs interface to change some properties of the
ibmveth buffer pools (size of the buffers, number of buffers per pool,
and whether a pool is active). Ethernet drivers use ethtool to provide
this type of functionality. However, the buffers in the ibmveth driver
can have an arbitrary size (not only regular, mini, and jumbo which are
the only sizes that ethtool can change), and also ibmveth can have an
arbitrary number of buffer pools
Under heavy load we have seen dropped packets which obviously kills TCP
performance. We have created several fixes that mitigate this issue,
but we definitely need a way of changing the number of buffers for an
adapter dynamically. Also, changing the size of the buffers allows
users to change the MTU to something big (bigger than a jumbo frame)
greatly improving performance on partition to partition transfers.
The patch creates directories pool1...pool4 in the device directory in
sysfs, each with files: num, size, and active (which default to the
values in the mainline version).
Comments and suggestions are welcome...
--
Santiago A. Leon
Power Linux Development
IBM Linux Technology Center
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2006-04-25 20:19:59 +04:00
|
|
|
int pool_config;
|
2007-08-17 18:16:31 +04:00
|
|
|
int rx_csum;
|
2008-07-23 22:34:52 +04:00
|
|
|
void *bounce_buffer;
|
|
|
|
dma_addr_t bounce_buffer_dma;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-09-03 22:28:52 +04:00
|
|
|
u64 fw_ipv6_csum_support;
|
|
|
|
u64 fw_ipv4_csum_support;
|
2005-04-17 02:20:36 +04:00
|
|
|
/* adapter specific stats */
|
|
|
|
u64 replenish_task_cycles;
|
|
|
|
u64 replenish_no_mem;
|
|
|
|
u64 replenish_add_buff_failure;
|
|
|
|
u64 replenish_add_buff_success;
|
|
|
|
u64 rx_invalid_buffer;
|
|
|
|
u64 rx_no_buffer;
|
|
|
|
u64 tx_map_failed;
|
|
|
|
u64 tx_send_failed;
|
|
|
|
};
|
|
|
|
|
2006-05-24 09:31:14 +04:00
|
|
|
struct ibmveth_buf_desc_fields {
|
2007-08-17 18:16:56 +04:00
|
|
|
u32 flags_len;
|
|
|
|
#define IBMVETH_BUF_VALID 0x80000000
|
|
|
|
#define IBMVETH_BUF_TOGGLE 0x40000000
|
|
|
|
#define IBMVETH_BUF_NO_CSUM 0x02000000
|
|
|
|
#define IBMVETH_BUF_CSUM_GOOD 0x01000000
|
|
|
|
#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
|
|
|
|
u32 address;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
union ibmveth_buf_desc {
|
2006-05-24 09:31:14 +04:00
|
|
|
u64 desc;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ibmveth_buf_desc_fields fields;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ibmveth_rx_q_entry {
|
2007-08-17 18:16:56 +04:00
|
|
|
u32 flags_off;
|
|
|
|
#define IBMVETH_RXQ_TOGGLE 0x80000000
|
|
|
|
#define IBMVETH_RXQ_TOGGLE_SHIFT 31
|
|
|
|
#define IBMVETH_RXQ_VALID 0x40000000
|
|
|
|
#define IBMVETH_RXQ_NO_CSUM 0x02000000
|
|
|
|
#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
|
|
|
|
#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
|
|
|
|
|
|
|
|
u32 length;
|
|
|
|
u64 correlator;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* _IBMVETH_H */
|