rfs: better sizing of dev_flow_table

Aim of this patch is to provide full range of rps_flow_cnt on 64bit arches.

Theorical limit on number of flows is 2^32

Fix some buggy RPS/RFS macros as well.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Tom Herbert <therbert@google.com>
CC: Xi Wang <xi.wang@gmail.com>
CC: Laurent Chavey <chavey@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2011-12-24 06:56:49 +00:00 коммит произвёл David S. Miller
Родитель 035c4c16be
Коммит 60b778ce51
2 изменённых файлов: 31 добавлений и 21 удалений

Просмотреть файл

@ -597,7 +597,7 @@ struct rps_map {
struct rcu_head rcu;
u16 cpus[0];
};
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
/*
* The rps_dev_flow structure contains the mapping of a flow to a CPU, the
@ -621,7 +621,7 @@ struct rps_dev_flow_table {
struct rps_dev_flow flows[0];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
(_num * sizeof(struct rps_dev_flow)))
((_num) * sizeof(struct rps_dev_flow)))
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
@ -632,7 +632,7 @@ struct rps_sock_flow_table {
u16 ents[0];
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
(_num * sizeof(u16)))
((_num) * sizeof(u16)))
#define RPS_NO_CPU 0xffff
@ -684,7 +684,7 @@ struct xps_map {
struct rcu_head rcu;
u16 queues[0];
};
#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
/ sizeof(u16))

Просмотреть файл

@ -622,15 +622,15 @@ static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
char *buf)
{
struct rps_dev_flow_table *flow_table;
unsigned int val = 0;
unsigned long val = 0;
rcu_read_lock();
flow_table = rcu_dereference(queue->rps_flow_table);
if (flow_table)
val = flow_table->mask + 1;
val = (unsigned long)flow_table->mask + 1;
rcu_read_unlock();
return sprintf(buf, "%u\n", val);
return sprintf(buf, "%lu\n", val);
}
static void rps_dev_flow_table_release_work(struct work_struct *work)
@ -654,36 +654,46 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
struct rx_queue_attribute *attr,
const char *buf, size_t len)
{
unsigned int count;
char *endp;
unsigned long mask, count;
struct rps_dev_flow_table *table, *old_table;
static DEFINE_SPINLOCK(rps_dev_flow_lock);
int rc;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
count = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
rc = kstrtoul(buf, 0, &count);
if (rc < 0)
return rc;
if (count) {
int i;
if (count > INT_MAX)
mask = count - 1;
/* mask = roundup_pow_of_two(count) - 1;
* without overflows...
*/
while ((mask | (mask >> 1)) != mask)
mask |= (mask >> 1);
/* On 64 bit arches, must check mask fits in table->mask (u32),
* and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
* doesnt overflow.
*/
#if BITS_PER_LONG > 32
if (mask > (unsigned long)(u32)mask)
return -EINVAL;
count = roundup_pow_of_two(count);
if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
#else
if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
/ sizeof(struct rps_dev_flow)) {
/* Enforce a limit to prevent overflow */
return -EINVAL;
}
table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
#endif
table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
if (!table)
return -ENOMEM;
table->mask = count - 1;
for (i = 0; i < count; i++)
table->flows[i].cpu = RPS_NO_CPU;
table->mask = mask;
for (count = 0; count <= mask; count++)
table->flows[count].cpu = RPS_NO_CPU;
} else
table = NULL;