ipvs: unify the formula to estimate the overhead of processing connections

lc and wlc use the same formula, but lblc and lblcr use another one. There
is no reason for using two different formulas for the lc variants.

The formula used by lc is used by all the lc variants in this patch.

Signed-off-by: Changli Gao <xiaosuo@gmail.com>
Acked-by: Wensong Zhang <wensong@linux-vs.org>
Signed-off-by: Simon Horman <horms@verge.net.au>
This commit is contained in:
Changli Gao 2011-02-19 17:32:28 +08:00 коммит произвёл Simon Horman
Родитель 17a8f8e373
Коммит b552f7e3a9
5 изменённых файлов: 27 добавлений и 63 удалений

Просмотреть файл

@ -1243,6 +1243,20 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
/* CONFIG_IP_VS_NFCT */
#endif
static inline unsigned int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
/*
* We think the overhead of processing active connections is 256
* times higher than that of inactive connections in average. (This
* 256 times might not be accurate, we will change it later) We
* use the following formula to estimate the overhead now:
* dest->activeconns*256 + dest->inactconns
*/
return (atomic_read(&dest->activeconns) << 8) +
atomic_read(&dest->inactconns);
}
#endif /* __KERNEL__ */
#endif /* _NET_IP_VS_H */

Просмотреть файл

@ -389,12 +389,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
int loh, doh;
/*
* We think the overhead of processing active connections is fifty
* times higher than that of inactive connections in average. (This
* fifty times might not be accurate, we will change it later.) We
* use the following formula to estimate the overhead:
* dest->activeconns*50 + dest->inactconns
* and the load:
* We use the following formula to estimate the load:
* (dest overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
@ -410,8 +405,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
continue;
if (atomic_read(&dest->weight) > 0) {
least = dest;
loh = atomic_read(&least->activeconns) * 50
+ atomic_read(&least->inactconns);
loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
@ -425,8 +419,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = atomic_read(&dest->activeconns) * 50
+ atomic_read(&dest->inactconns);
doh = ip_vs_dest_conn_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
least = dest;

Просмотреть файл

@ -178,8 +178,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
if ((atomic_read(&least->weight) > 0)
&& (least->flags & IP_VS_DEST_F_AVAILABLE)) {
loh = atomic_read(&least->activeconns) * 50
+ atomic_read(&least->inactconns);
loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
@ -192,8 +191,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = atomic_read(&dest->activeconns) * 50
+ atomic_read(&dest->inactconns);
doh = ip_vs_dest_conn_overhead(dest);
if ((loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight))
&& (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
@ -228,8 +226,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
list_for_each_entry(e, &set->list, list) {
most = e->dest;
if (atomic_read(&most->weight) > 0) {
moh = atomic_read(&most->activeconns) * 50
+ atomic_read(&most->inactconns);
moh = ip_vs_dest_conn_overhead(most);
goto nextstage;
}
}
@ -239,8 +236,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
nextstage:
list_for_each_entry(e, &set->list, list) {
dest = e->dest;
doh = atomic_read(&dest->activeconns) * 50
+ atomic_read(&dest->inactconns);
doh = ip_vs_dest_conn_overhead(dest);
/* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
if ((moh * atomic_read(&dest->weight) <
doh * atomic_read(&most->weight))
@ -563,12 +559,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
int loh, doh;
/*
* We think the overhead of processing active connections is fifty
* times higher than that of inactive connections in average. (This
* fifty times might not be accurate, we will change it later.) We
* use the following formula to estimate the overhead:
* dest->activeconns*50 + dest->inactconns
* and the load:
* We use the following formula to estimate the load:
* (dest overhead) / dest->weight
*
* Remember -- no floats in kernel mode!!!
@ -585,8 +576,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
if (atomic_read(&dest->weight) > 0) {
least = dest;
loh = atomic_read(&least->activeconns) * 50
+ atomic_read(&least->inactconns);
loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
@ -600,8 +590,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = atomic_read(&dest->activeconns) * 50
+ atomic_read(&dest->inactconns);
doh = ip_vs_dest_conn_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
least = dest;

Просмотреть файл

@ -22,22 +22,6 @@
#include <net/ip_vs.h>
static inline unsigned int
ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
{
/*
* We think the overhead of processing active connections is 256
* times higher than that of inactive connections in average. (This
* 256 times might not be accurate, we will change it later) We
* use the following formula to estimate the overhead now:
* dest->activeconns*256 + dest->inactconns
*/
return (atomic_read(&dest->activeconns) << 8) +
atomic_read(&dest->inactconns);
}
/*
* Least Connection scheduling
*/
@ -62,7 +46,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
atomic_read(&dest->weight) == 0)
continue;
doh = ip_vs_lc_dest_overhead(dest);
doh = ip_vs_dest_conn_overhead(dest);
if (!least || doh < loh) {
least = dest;
loh = doh;

Просмотреть файл

@ -27,22 +27,6 @@
#include <net/ip_vs.h>
static inline unsigned int
ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
{
/*
* We think the overhead of processing active connections is 256
* times higher than that of inactive connections in average. (This
* 256 times might not be accurate, we will change it later) We
* use the following formula to estimate the overhead now:
* dest->activeconns*256 + dest->inactconns
*/
return (atomic_read(&dest->activeconns) << 8) +
atomic_read(&dest->inactconns);
}
/*
* Weighted Least Connection scheduling
*/
@ -71,7 +55,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
atomic_read(&dest->weight) > 0) {
least = dest;
loh = ip_vs_wlc_dest_overhead(least);
loh = ip_vs_dest_conn_overhead(least);
goto nextstage;
}
}
@ -85,7 +69,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
list_for_each_entry_continue(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
doh = ip_vs_wlc_dest_overhead(dest);
doh = ip_vs_dest_conn_overhead(dest);
if (loh * atomic_read(&dest->weight) >
doh * atomic_read(&least->weight)) {
least = dest;