[NET]: Cris checksum annotations and cleanups.

* sanitize prototypes and annotate
* kill cast-as-lvalue abuses in csum_partial()
* usual ntohs-equals-shift for checksum purposes

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Al Viro 2006-11-14 21:15:19 -08:00 коммит произвёл David S. Miller
Родитель 9be259aae5
Коммит 3532010bcf
4 изменённых файлов: 57 добавлений и 59 удалений

Просмотреть файл

@ -47,39 +47,41 @@
#include <asm/delay.h> #include <asm/delay.h>
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) __wsum csum_partial(const void *p, int len, __wsum __sum)
{ {
/* u32 sum = (__force u32)__sum;
* Experiments with ethernet and slip connections show that buff const u16 *buff = p;
* is aligned on either a 2-byte or 4-byte boundary. /*
*/ * Experiments with ethernet and slip connections show that buff
const unsigned char *endMarker = buff + len; * is aligned on either a 2-byte or 4-byte boundary.
const unsigned char *marker = endMarker - (len % 16); */
const void *endMarker = p + len;
const void *marker = endMarker - (len % 16);
#if 0 #if 0
if((int)buff & 0x3) if((int)buff & 0x3)
printk("unaligned buff %p\n", buff); printk("unaligned buff %p\n", buff);
__delay(900); /* extra delay of 90 us to test performance hit */ __delay(900); /* extra delay of 90 us to test performance hit */
#endif #endif
BITON; BITON;
while (buff < marker) { while (buff < marker) {
sum += *((unsigned short *)buff)++; sum += *buff++;
sum += *((unsigned short *)buff)++; sum += *buff++;
sum += *((unsigned short *)buff)++; sum += *buff++;
sum += *((unsigned short *)buff)++; sum += *buff++;
sum += *((unsigned short *)buff)++; sum += *buff++;
sum += *((unsigned short *)buff)++; sum += *buff++;
sum += *((unsigned short *)buff)++; sum += *buff++;
sum += *((unsigned short *)buff)++; sum += *buff++;
} }
marker = endMarker - (len % 2); marker = endMarker - (len % 2);
while(buff < marker) { while (buff < marker)
sum += *((unsigned short *)buff)++; sum += *buff++;
}
if(endMarker - buff > 0) { if (endMarker > buff)
sum += *buff; /* add extra byte seperately */ sum += *(const u8 *)buff; /* add extra byte seperately */
}
BITOFF; BITOFF;
return(sum); return (__force __wsum)sum;
} }
EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial);

Просмотреть файл

@ -8,11 +8,11 @@
* to split all of those into 16-bit components, then add. * to split all of those into 16-bit components, then add.
*/ */
static inline unsigned int static inline __wsum
csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, unsigned int sum) unsigned short proto, __wsum sum)
{ {
int res; __wsum res;
__asm__ ("add.d %2, %0\n\t" __asm__ ("add.d %2, %0\n\t"
"ax\n\t" "ax\n\t"
"add.d %3, %0\n\t" "add.d %3, %0\n\t"
@ -21,7 +21,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
"ax\n\t" "ax\n\t"
"addq 0, %0\n" "addq 0, %0\n"
: "=r" (res) : "=r" (res)
: "0" (sum), "r" (daddr), "r" (saddr), "r" ((ntohs(len) << 16) + (proto << 8))); : "0" (sum), "r" (daddr), "r" (saddr), "r" ((len + proto) << 8));
return res; return res;
} }

Просмотреть файл

@ -9,11 +9,11 @@
* checksum. Which means it would be necessary to split all those into * checksum. Which means it would be necessary to split all those into
* 16-bit components and then add. * 16-bit components and then add.
*/ */
static inline unsigned int static inline __wsum
csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len, unsigned short proto, unsigned int sum) unsigned short len, unsigned short proto, __wsum sum)
{ {
int res; __wsum res;
__asm__ __volatile__ ("add.d %2, %0\n\t" __asm__ __volatile__ ("add.d %2, %0\n\t"
"addc %3, %0\n\t" "addc %3, %0\n\t"
@ -21,7 +21,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
"addc 0, %0\n\t" "addc 0, %0\n\t"
: "=r" (res) : "=r" (res)
: "0" (sum), "r" (daddr), "r" (saddr), \ : "0" (sum), "r" (daddr), "r" (saddr), \
"r" ((ntohs(len) << 16) + (proto << 8))); "r" ((len + proto) << 8));
return res; return res;
} }

Просмотреть файл

@ -17,7 +17,7 @@
* *
* it's best to have buff aligned on a 32-bit boundary * it's best to have buff aligned on a 32-bit boundary
*/ */
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); __wsum csum_partial(const void *buff, int len, __wsum sum);
/* /*
* the same as csum_partial, but copies from src while it * the same as csum_partial, but copies from src while it
@ -27,26 +27,23 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
* better 64-bit) boundary * better 64-bit) boundary
*/ */
unsigned int csum_partial_copy_nocheck(const char *src, char *dst, __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, unsigned int sum); int len, __wsum sum);
/* /*
* Fold a partial checksum into a word * Fold a partial checksum into a word
*/ */
static inline unsigned int csum_fold(unsigned int sum) static inline __sum16 csum_fold(__wsum csum)
{ {
/* the while loop is unnecessary really, it's always enough with two u32 sum = (__force u32)csum;
iterations */ sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
while(sum >> 16) return (__force __sum16)~sum;
sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
return ~sum;
} }
extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, unsigned int sum, int len, __wsum sum,
int *errptr); int *errptr);
/* /*
@ -55,8 +52,7 @@ extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
* *
*/ */
static inline unsigned short ip_fast_csum(unsigned char * iph, static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
unsigned int ihl)
{ {
return csum_fold(csum_partial(iph, ihl * 4, 0)); return csum_fold(csum_partial(iph, ihl * 4, 0));
} }
@ -66,11 +62,10 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
* returns a 16-bit checksum, already complemented * returns a 16-bit checksum, already complemented
*/ */
static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, static inline __sum16 int csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned long daddr,
unsigned short len, unsigned short len,
unsigned short proto, unsigned short proto,
unsigned int sum) __wsum sum)
{ {
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
} }
@ -80,7 +75,8 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
* in icmp.c * in icmp.c
*/ */
static inline unsigned short ip_compute_csum(unsigned char * buff, int len) { static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold (csum_partial(buff, len, 0)); return csum_fold (csum_partial(buff, len, 0));
} }