blob: de032ad96f4ac0e82b3028fe41e523a7636d2170 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Arnd Bergmann26a28fa2009-05-13 22:56:38 +00002/*
3 *
4 * INET An implementation of the TCP/IP protocol suite for the LINUX
5 * operating system. INET is implemented using the BSD Socket
6 * interface as the means of communication with the user level.
7 *
8 * IP/TCP/UDP checksumming routines
9 *
10 * Authors: Jorge Cwik, <jorge@laser.satlink.net>
11 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
12 * Tom May, <ftom@netcom.com>
13 * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
14 * Lots of code moved from tcp.c and ip.c; see those files
15 * for more names.
16 *
17 * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
18 * Fixed some nasty bugs, causing some horrible crashes.
19 * A: At some points, the sum (%0) was used as
20 * length-counter instead of the length counter
21 * (%1). Thanks to Roman Hodek for pointing this out.
22 * B: GCC seems to mess up if one uses too many
23 * data-registers to hold input values and one tries to
24 * specify d0 and d1 as scratch registers. Letting gcc
25 * choose these registers itself solves the problem.
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000026 */
27
28/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
29 kills, so most of the assembly has to go. */
30
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050031#include <linux/export.h>
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000032#include <net/checksum.h>
33
34#include <asm/byteorder.h>
35
Arnd Bergmann20c1f642009-06-23 21:37:26 +020036#ifndef do_csum
Arnd Bergmannc44ba9f2009-06-23 21:22:58 +020037static inline unsigned short from32to16(unsigned int x)
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000038{
39 /* add up 16-bit and 16-bit for 16+c bit */
40 x = (x & 0xffff) + (x >> 16);
41 /* add up carry.. */
42 x = (x & 0xffff) + (x >> 16);
43 return x;
44}
45
46static unsigned int do_csum(const unsigned char *buff, int len)
47{
Ian Abbottbe0e1e72011-07-07 01:18:49 +000048 int odd;
Arnd Bergmannc44ba9f2009-06-23 21:22:58 +020049 unsigned int result = 0;
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000050
51 if (len <= 0)
52 goto out;
53 odd = 1 & (unsigned long) buff;
54 if (odd) {
Arnd Bergmann32a9ff92009-06-19 10:41:19 +020055#ifdef __LITTLE_ENDIAN
Arnd Bergmann32a9ff92009-06-19 10:41:19 +020056 result += (*buff << 8);
Arnd Bergmann0a5549e2009-06-23 22:52:51 +020057#else
58 result = *buff;
Arnd Bergmann32a9ff92009-06-19 10:41:19 +020059#endif
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000060 len--;
61 buff++;
62 }
Ian Abbottbe0e1e72011-07-07 01:18:49 +000063 if (len >= 2) {
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000064 if (2 & (unsigned long) buff) {
65 result += *(unsigned short *) buff;
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000066 len -= 2;
67 buff += 2;
68 }
Ian Abbottbe0e1e72011-07-07 01:18:49 +000069 if (len >= 4) {
70 const unsigned char *end = buff + ((unsigned)len & ~3);
Arnd Bergmannc44ba9f2009-06-23 21:22:58 +020071 unsigned int carry = 0;
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000072 do {
Arnd Bergmannc44ba9f2009-06-23 21:22:58 +020073 unsigned int w = *(unsigned int *) buff;
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000074 buff += 4;
75 result += carry;
76 result += w;
77 carry = (w > result);
Ian Abbottbe0e1e72011-07-07 01:18:49 +000078 } while (buff < end);
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000079 result += carry;
80 result = (result & 0xffff) + (result >> 16);
81 }
82 if (len & 2) {
83 result += *(unsigned short *) buff;
84 buff += 2;
85 }
86 }
87 if (len & 1)
Arnd Bergmann32a9ff92009-06-19 10:41:19 +020088#ifdef __LITTLE_ENDIAN
89 result += *buff;
90#else
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000091 result += (*buff << 8);
Arnd Bergmann32a9ff92009-06-19 10:41:19 +020092#endif
Arnd Bergmann26a28fa2009-05-13 22:56:38 +000093 result = from32to16(result);
94 if (odd)
95 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
96out:
97 return result;
98}
Arnd Bergmann20c1f642009-06-23 21:37:26 +020099#endif
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000100
Vineet Gupta64e69072013-01-18 15:12:16 +0530101#ifndef ip_fast_csum
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000102/*
103 * This is a version of ip_compute_csum() optimized for IP headers,
104 * which always checksum on 4 octet boundaries.
105 */
106__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
107{
108 return (__force __sum16)~do_csum(iph, ihl*4);
109}
110EXPORT_SYMBOL(ip_fast_csum);
Vineet Gupta64e69072013-01-18 15:12:16 +0530111#endif
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000112
113/*
114 * computes the checksum of a memory block at buff, length len,
115 * and adds in "sum" (32-bit)
116 *
117 * returns a 32-bit number suitable for feeding into itself
118 * or csum_tcpudp_magic
119 *
120 * this function must be called with even lengths, except
121 * for the last fragment, which may be odd
122 *
123 * it's best to have buff aligned on a 32-bit boundary
124 */
125__wsum csum_partial(const void *buff, int len, __wsum wsum)
126{
127 unsigned int sum = (__force unsigned int)wsum;
128 unsigned int result = do_csum(buff, len);
129
130 /* add in old sum, and carry.. */
131 result += sum;
132 if (sum > result)
133 result += 1;
134 return (__force __wsum)result;
135}
136EXPORT_SYMBOL(csum_partial);
137
138/*
139 * this routine is used for miscellaneous IP-like checksums, mainly
140 * in icmp.c
141 */
142__sum16 ip_compute_csum(const void *buff, int len)
143{
144 return (__force __sum16)~do_csum(buff, len);
145}
146EXPORT_SYMBOL(ip_compute_csum);
147
148/*
149 * copy from fs while checksumming, otherwise like csum_partial
150 */
151__wsum
152csum_partial_copy_from_user(const void __user *src, void *dst, int len,
153 __wsum sum, int *csum_err)
154{
155 int missing;
156
157 missing = __copy_from_user(dst, src, len);
158 if (missing) {
159 memset(dst + len - missing, 0, missing);
160 *csum_err = -EFAULT;
161 } else
162 *csum_err = 0;
163
164 return csum_partial(dst, len, sum);
165}
166EXPORT_SYMBOL(csum_partial_copy_from_user);
167
168/*
169 * copy from ds while checksumming, otherwise like csum_partial
170 */
171__wsum
172csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
173{
174 memcpy(dst, src, len);
175 return csum_partial(dst, len, sum);
176}
177EXPORT_SYMBOL(csum_partial_copy);
178
179#ifndef csum_tcpudp_nofold
karl beldan9ce35772015-01-29 11:10:22 +0100180static inline u32 from64to32(u64 x)
181{
182 /* add up 32-bit and 32-bit for 32+c bit */
183 x = (x & 0xffffffff) + (x >> 32);
184 /* add up carry.. */
185 x = (x & 0xffffffff) + (x >> 32);
186 return (u32)x;
187}
188
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000189__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
Alexander Duyck01cfbad2016-03-11 14:05:34 -0800190 __u32 len, __u8 proto, __wsum sum)
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000191{
192 unsigned long long s = (__force u32)sum;
193
194 s += (__force u32)saddr;
195 s += (__force u32)daddr;
196#ifdef __BIG_ENDIAN
197 s += proto + len;
198#else
199 s += (proto + len) << 8;
200#endif
karl beldan150ae0e2015-01-28 10:58:11 +0100201 return (__force __wsum)from64to32(s);
Arnd Bergmann26a28fa2009-05-13 22:56:38 +0000202}
203EXPORT_SYMBOL(csum_tcpudp_nofold);
204#endif