diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2006-11-14 21:15:19 -0800 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-02 21:23:02 -0800 |
commit | 3532010bcf7699f2ce9a2baab58b4b9a5426d97e (patch) | |
tree | 47d1c423fe2345bea93ff3a576363971b9b0a572 | |
parent | 9be259aae5264511fe0a8b5e3d6711e0fd1d55df (diff) | |
download | lwn-3532010bcf7699f2ce9a2baab58b4b9a5426d97e.tar.gz lwn-3532010bcf7699f2ce9a2baab58b4b9a5426d97e.zip |
[NET]: Cris checksum annotations and cleanups.
* sanitize prototypes and annotate
* kill cast-as-lvalue abuses in csum_partial()
* usual ntohs-equals-shift for checksum purposes
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/cris/arch-v10/lib/old_checksum.c | 62 | ||||
-rw-r--r-- | include/asm-cris/arch-v10/checksum.h | 10 | ||||
-rw-r--r-- | include/asm-cris/arch-v32/checksum.h | 10 | ||||
-rw-r--r-- | include/asm-cris/checksum.h | 34 |
4 files changed, 57 insertions, 59 deletions
diff --git a/arch/cris/arch-v10/lib/old_checksum.c b/arch/cris/arch-v10/lib/old_checksum.c index 22a6f0aa9cef..497634a64829 100644 --- a/arch/cris/arch-v10/lib/old_checksum.c +++ b/arch/cris/arch-v10/lib/old_checksum.c @@ -47,39 +47,41 @@ #include <asm/delay.h> -unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) +__wsum csum_partial(const void *p, int len, __wsum __sum) { - /* - * Experiments with ethernet and slip connections show that buff - * is aligned on either a 2-byte or 4-byte boundary. - */ - const unsigned char *endMarker = buff + len; - const unsigned char *marker = endMarker - (len % 16); + u32 sum = (__force u32)__sum; + const u16 *buff = p; + /* + * Experiments with ethernet and slip connections show that buff + * is aligned on either a 2-byte or 4-byte boundary. + */ + const void *endMarker = p + len; + const void *marker = endMarker - (len % 16); #if 0 - if((int)buff & 0x3) - printk("unaligned buff %p\n", buff); - __delay(900); /* extra delay of 90 us to test performance hit */ + if((int)buff & 0x3) + printk("unaligned buff %p\n", buff); + __delay(900); /* extra delay of 90 us to test performance hit */ #endif - BITON; - while (buff < marker) { - sum += *((unsigned short *)buff)++; - sum += *((unsigned short *)buff)++; - sum += *((unsigned short *)buff)++; - sum += *((unsigned short *)buff)++; - sum += *((unsigned short *)buff)++; - sum += *((unsigned short *)buff)++; - sum += *((unsigned short *)buff)++; - sum += *((unsigned short *)buff)++; - } - marker = endMarker - (len % 2); - while(buff < marker) { - sum += *((unsigned short *)buff)++; - } - if(endMarker - buff > 0) { - sum += *buff; /* add extra byte seperately */ - } - BITOFF; - return(sum); + BITON; + while (buff < marker) { + sum += *buff++; + sum += *buff++; + sum += *buff++; + sum += *buff++; + sum += *buff++; + sum += *buff++; + sum += *buff++; + sum += *buff++; + } + marker = endMarker - (len % 2); + while (buff < marker) + sum += *buff++; + + if (endMarker > buff) + sum += *(const u8 *)buff; /* add extra byte seperately */ + + BITOFF; + return (__force __wsum)sum; } EXPORT_SYMBOL(csum_partial); diff --git a/include/asm-cris/arch-v10/checksum.h b/include/asm-cris/arch-v10/checksum.h index 633f234f336b..b8000c5d7fe1 100644 --- a/include/asm-cris/arch-v10/checksum.h +++ b/include/asm-cris/arch-v10/checksum.h @@ -8,11 +8,11 @@ * to split all of those into 16-bit components, then add. */ -static inline unsigned int -csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, - unsigned short proto, unsigned int sum) +static inline __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, + unsigned short proto, __wsum sum) { - int res; + __wsum res; __asm__ ("add.d %2, %0\n\t" "ax\n\t" "add.d %3, %0\n\t" @@ -21,7 +21,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, "ax\n\t" "addq 0, %0\n" : "=r" (res) - : "0" (sum), "r" (daddr), "r" (saddr), "r" ((ntohs(len) << 16) + (proto << 8))); + : "0" (sum), "r" (daddr), "r" (saddr), "r" ((len + proto) << 8)); return res; } diff --git a/include/asm-cris/arch-v32/checksum.h b/include/asm-cris/arch-v32/checksum.h index 97ef89efea62..e5dcfce6e0dc 100644 --- a/include/asm-cris/arch-v32/checksum.h +++ b/include/asm-cris/arch-v32/checksum.h @@ -9,11 +9,11 @@ * checksum. Which means it would be necessary to split all those into * 16-bit components and then add. */ -static inline unsigned int -csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, - unsigned short len, unsigned short proto, unsigned int sum) +static inline __wsum +csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + unsigned short len, unsigned short proto, __wsum sum) { - int res; + __wsum res; __asm__ __volatile__ ("add.d %2, %0\n\t" "addc %3, %0\n\t" @@ -21,7 +21,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, "addc 0, %0\n\t" : "=r" (res) : "0" (sum), "r" (daddr), "r" (saddr), \ - "r" ((ntohs(len) << 16) + (proto << 8))); + "r" ((len + proto) << 8)); return res; } diff --git a/include/asm-cris/checksum.h b/include/asm-cris/checksum.h index 26a7719bbb84..180dbf2757b0 100644 --- a/include/asm-cris/checksum.h +++ b/include/asm-cris/checksum.h @@ -17,7 +17,7 @@ * * it's best to have buff aligned on a 32-bit boundary */ -unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); +__wsum csum_partial(const void *buff, int len, __wsum sum); /* * the same as csum_partial, but copies from src while it @@ -27,26 +27,23 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) * better 64-bit) boundary */ -unsigned int csum_partial_copy_nocheck(const char *src, char *dst, - int len, unsigned int sum); +__wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum); /* * Fold a partial checksum into a word */ -static inline unsigned int csum_fold(unsigned int sum) +static inline __sum16 csum_fold(__wsum csum) { - /* the while loop is unnecessary really, it's always enough with two - iterations */ - - while(sum >> 16) - sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ - - return ~sum; + u32 sum = (__force u32)csum; + sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ + sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ + return (__force __sum16)~sum; } -extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, - int len, unsigned int sum, +extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, + int len, __wsum sum, int *errptr); /* @@ -55,8 +52,7 @@ extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, * */ -static inline unsigned short ip_fast_csum(unsigned char * iph, - unsigned int ihl) +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) { return csum_fold(csum_partial(iph, ihl * 4, 0)); } @@ -66,11 +62,10 @@ static inline unsigned short ip_fast_csum(unsigned char * iph, * returns a 16-bit checksum, already complemented */ -static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, - unsigned long daddr, +static inline __sum16 int csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, - unsigned int sum) + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } @@ -80,7 +75,8 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, * in icmp.c */ -static inline unsigned short ip_compute_csum(unsigned char * buff, int len) { +static inline __sum16 ip_compute_csum(const void *buff, int len) +{ return csum_fold (csum_partial(buff, len, 0)); } |