summaryrefslogtreecommitdiff
path: root/include/net/checksum.h
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2021-06-05 17:19:30 +0300
committerAl Viro <viro@zeniv.linux.org.uk>2021-06-10 18:45:14 +0300
commit594e450b3f4435a9d663df3d48d7fa34e685cbd1 (patch)
tree59b3a48032406362876577688e0785f0fc9fcb13 /include/net/checksum.h
parentf0b65f39ac505e8f1dcdaa165aa7b8c0bd6fd454 (diff)
downloadlinux-594e450b3f4435a9d663df3d48d7fa34e685cbd1.tar.xz
csum_and_copy_to_iter(): massage into form closer to csum_and_copy_from_iter()
Namely, have off counted starting from 0 rather than from csstate->off. To compensate we need to shift the initial value (csstate->sum) (rotate by 8 bits, as usual for csum) and do the same after we are finished adding the pieces up. What we get out of that is a bit more redundancy in our variables - from is always equal to addr + off, which will be useful several commits down the road. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'include/net/checksum.h')
-rw-r--r--include/net/checksum.h14
1 files changed, 8 insertions, 6 deletions
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 0d05b9e8690b..5b96d5bd6e54 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -80,16 +80,18 @@ static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
return csum16_add(csum, ~addend);
}
-static inline __wsum
-csum_block_add(__wsum csum, __wsum csum2, int offset)
+static inline __wsum csum_shift(__wsum sum, int offset)
{
- u32 sum = (__force u32)csum2;
-
/* rotate sum to align it with a 16b boundary */
if (offset & 1)
- sum = ror32(sum, 8);
+ return (__force __wsum)ror32((__force u32)sum, 8);
+ return sum;
+}
- return csum_add(csum, (__force __wsum)sum);
+static inline __wsum
+csum_block_add(__wsum csum, __wsum csum2, int offset)
+{
+ return csum_add(csum, csum_shift(csum2, offset));
}
static inline __wsum