2019-05-27 14:55:01 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-09-26 14:04:21 +08:00
|
|
|
/*
|
|
|
|
* This file contains assembly-language implementations
|
|
|
|
* of IP-style 1's complement checksum routines.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
*
|
|
|
|
* Severely hacked about by Paul Mackerras (paulus@cs.anu.edu.au).
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sys.h>
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/errno.h>
|
|
|
|
#include <asm/ppc_asm.h>
|
2016-01-14 12:33:46 +08:00
|
|
|
#include <asm/export.h>
|
2005-09-26 14:04:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Computes the checksum of a memory block at buff, length len,
|
|
|
|
* and adds in "sum" (32-bit).
|
|
|
|
*
|
2016-03-08 01:44:37 +08:00
|
|
|
* __csum_partial(r3=buff, r4=len, r5=sum)
|
2005-09-26 14:04:21 +08:00
|
|
|
*/
|
2016-03-08 01:44:37 +08:00
|
|
|
_GLOBAL(__csum_partial)
|
2010-08-03 04:08:34 +08:00
|
|
|
addic r0,r5,0 /* clear carry */
|
|
|
|
|
|
|
|
srdi. r6,r4,3 /* less than 8 bytes? */
|
|
|
|
beq .Lcsum_tail_word
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If only halfword aligned, align to a double word. Since odd
|
|
|
|
* aligned addresses should be rare and they would require more
|
|
|
|
* work to calculate the correct checksum, we ignore that case
|
|
|
|
* and take the potential slowdown of unaligned loads.
|
|
|
|
*/
|
2016-11-03 13:15:42 +08:00
|
|
|
rldicl. r6,r3,64-1,64-2 /* r6 = (r3 >> 1) & 0x3 */
|
2010-08-03 04:08:34 +08:00
|
|
|
beq .Lcsum_aligned
|
|
|
|
|
|
|
|
li r7,4
|
|
|
|
sub r6,r7,r6
|
|
|
|
mtctr r6
|
|
|
|
|
|
|
|
1:
|
|
|
|
lhz r6,0(r3) /* align to doubleword */
|
|
|
|
subi r4,r4,2
|
|
|
|
addi r3,r3,2
|
|
|
|
adde r0,r0,r6
|
|
|
|
bdnz 1b
|
|
|
|
|
|
|
|
.Lcsum_aligned:
|
|
|
|
/*
|
|
|
|
* We unroll the loop such that each iteration is 64 bytes with an
|
|
|
|
* entry and exit limb of 64 bytes, meaning a minimum size of
|
|
|
|
* 128 bytes.
|
|
|
|
*/
|
|
|
|
srdi. r6,r4,7
|
|
|
|
beq .Lcsum_tail_doublewords /* len < 128 */
|
|
|
|
|
|
|
|
srdi r6,r4,6
|
|
|
|
subi r6,r6,1
|
|
|
|
mtctr r6
|
|
|
|
|
|
|
|
stdu r1,-STACKFRAMESIZE(r1)
|
2012-06-25 21:33:10 +08:00
|
|
|
std r14,STK_REG(R14)(r1)
|
|
|
|
std r15,STK_REG(R15)(r1)
|
|
|
|
std r16,STK_REG(R16)(r1)
|
2010-08-03 04:08:34 +08:00
|
|
|
|
|
|
|
ld r6,0(r3)
|
|
|
|
ld r9,8(r3)
|
|
|
|
|
|
|
|
ld r10,16(r3)
|
|
|
|
ld r11,24(r3)
|
|
|
|
|
|
|
|
/*
|
2016-05-23 09:27:01 +08:00
|
|
|
* On POWER6 and POWER7 back to back adde instructions take 2 cycles
|
|
|
|
* because of the XER dependency. This means the fastest this loop can
|
|
|
|
* go is 16 cycles per iteration. The scheduling of the loop below has
|
2010-08-03 04:08:34 +08:00
|
|
|
* been shown to hit this on both POWER6 and POWER7.
|
|
|
|
*/
|
|
|
|
.align 5
|
|
|
|
2:
|
|
|
|
adde r0,r0,r6
|
|
|
|
ld r12,32(r3)
|
|
|
|
ld r14,40(r3)
|
|
|
|
|
|
|
|
adde r0,r0,r9
|
|
|
|
ld r15,48(r3)
|
|
|
|
ld r16,56(r3)
|
|
|
|
addi r3,r3,64
|
|
|
|
|
|
|
|
adde r0,r0,r10
|
|
|
|
|
|
|
|
adde r0,r0,r11
|
|
|
|
|
|
|
|
adde r0,r0,r12
|
|
|
|
|
|
|
|
adde r0,r0,r14
|
|
|
|
|
|
|
|
adde r0,r0,r15
|
|
|
|
ld r6,0(r3)
|
|
|
|
ld r9,8(r3)
|
|
|
|
|
|
|
|
adde r0,r0,r16
|
|
|
|
ld r10,16(r3)
|
|
|
|
ld r11,24(r3)
|
|
|
|
bdnz 2b
|
|
|
|
|
|
|
|
|
|
|
|
adde r0,r0,r6
|
|
|
|
ld r12,32(r3)
|
|
|
|
ld r14,40(r3)
|
|
|
|
|
|
|
|
adde r0,r0,r9
|
|
|
|
ld r15,48(r3)
|
|
|
|
ld r16,56(r3)
|
|
|
|
addi r3,r3,64
|
|
|
|
|
|
|
|
adde r0,r0,r10
|
|
|
|
adde r0,r0,r11
|
|
|
|
adde r0,r0,r12
|
|
|
|
adde r0,r0,r14
|
|
|
|
adde r0,r0,r15
|
|
|
|
adde r0,r0,r16
|
|
|
|
|
2012-06-25 21:33:10 +08:00
|
|
|
ld r14,STK_REG(R14)(r1)
|
|
|
|
ld r15,STK_REG(R15)(r1)
|
|
|
|
ld r16,STK_REG(R16)(r1)
|
2010-08-03 04:08:34 +08:00
|
|
|
addi r1,r1,STACKFRAMESIZE
|
|
|
|
|
|
|
|
andi. r4,r4,63
|
|
|
|
|
|
|
|
.Lcsum_tail_doublewords: /* Up to 127 bytes to go */
|
|
|
|
srdi. r6,r4,3
|
|
|
|
beq .Lcsum_tail_word
|
|
|
|
|
|
|
|
mtctr r6
|
|
|
|
3:
|
|
|
|
ld r6,0(r3)
|
|
|
|
addi r3,r3,8
|
|
|
|
adde r0,r0,r6
|
|
|
|
bdnz 3b
|
|
|
|
|
|
|
|
andi. r4,r4,7
|
|
|
|
|
|
|
|
.Lcsum_tail_word: /* Up to 7 bytes to go */
|
|
|
|
srdi. r6,r4,2
|
|
|
|
beq .Lcsum_tail_halfword
|
|
|
|
|
|
|
|
lwz r6,0(r3)
|
2005-09-26 14:04:21 +08:00
|
|
|
addi r3,r3,4
|
2010-08-03 04:08:34 +08:00
|
|
|
adde r0,r0,r6
|
2005-09-26 14:04:21 +08:00
|
|
|
subi r4,r4,4
|
2010-08-03 04:08:34 +08:00
|
|
|
|
|
|
|
.Lcsum_tail_halfword: /* Up to 3 bytes to go */
|
|
|
|
srdi. r6,r4,1
|
|
|
|
beq .Lcsum_tail_byte
|
|
|
|
|
|
|
|
lhz r6,0(r3)
|
|
|
|
addi r3,r3,2
|
|
|
|
adde r0,r0,r6
|
|
|
|
subi r4,r4,2
|
|
|
|
|
|
|
|
.Lcsum_tail_byte: /* Up to 1 byte to go */
|
|
|
|
andi. r6,r4,1
|
|
|
|
beq .Lcsum_finish
|
|
|
|
|
|
|
|
lbz r6,0(r3)
|
2016-11-03 13:15:42 +08:00
|
|
|
#ifdef __BIG_ENDIAN__
|
2010-08-03 04:08:34 +08:00
|
|
|
sldi r9,r6,8 /* Pad the byte out to 16 bits */
|
|
|
|
adde r0,r0,r9
|
2016-11-03 13:15:42 +08:00
|
|
|
#else
|
|
|
|
adde r0,r0,r6
|
|
|
|
#endif
|
2010-08-03 04:08:34 +08:00
|
|
|
|
|
|
|
.Lcsum_finish:
|
|
|
|
addze r0,r0 /* add in final carry */
|
|
|
|
rldicl r4,r0,32,0 /* fold two 32 bit halves together */
|
|
|
|
add r3,r4,r0
|
|
|
|
srdi r3,r3,32
|
|
|
|
blr
|
2016-01-14 12:33:46 +08:00
|
|
|
EXPORT_SYMBOL(__csum_partial)
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2010-08-03 04:09:52 +08:00
|
|
|
|
2013-10-01 15:11:35 +08:00
|
|
|
.macro srcnr
|
2010-08-03 04:09:52 +08:00
|
|
|
100:
|
2020-07-20 22:09:24 +08:00
|
|
|
EX_TABLE(100b,.Lerror_nr)
|
2010-08-03 04:09:52 +08:00
|
|
|
.endm
|
|
|
|
|
2013-10-01 15:11:35 +08:00
|
|
|
.macro source
|
|
|
|
150:
|
2020-07-20 22:09:24 +08:00
|
|
|
EX_TABLE(150b,.Lerror)
|
2013-10-01 15:11:35 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro dstnr
|
2010-08-03 04:09:52 +08:00
|
|
|
200:
|
2020-07-20 22:09:24 +08:00
|
|
|
EX_TABLE(200b,.Lerror_nr)
|
2013-10-01 15:11:35 +08:00
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro dest
|
|
|
|
250:
|
2020-07-20 22:09:24 +08:00
|
|
|
EX_TABLE(250b,.Lerror)
|
2010-08-03 04:09:52 +08:00
|
|
|
.endm
|
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
/*
|
|
|
|
* Computes the checksum of a memory block at src, length len,
|
2020-07-20 22:09:24 +08:00
|
|
|
* and adds in 0xffffffff (32-bit), while copying the block to dst.
|
|
|
|
* If an access exception occurs, it returns 0.
|
2005-09-26 14:04:21 +08:00
|
|
|
*
|
2020-07-20 22:09:24 +08:00
|
|
|
* csum_partial_copy_generic(r3=src, r4=dst, r5=len)
|
2005-09-26 14:04:21 +08:00
|
|
|
*/
|
|
|
|
_GLOBAL(csum_partial_copy_generic)
|
2020-07-20 22:09:24 +08:00
|
|
|
li r6,-1
|
2010-08-03 04:09:52 +08:00
|
|
|
addic r0,r6,0 /* clear carry */
|
|
|
|
|
|
|
|
srdi. r6,r5,3 /* less than 8 bytes? */
|
|
|
|
beq .Lcopy_tail_word
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If only halfword aligned, align to a double word. Since odd
|
|
|
|
* aligned addresses should be rare and they would require more
|
|
|
|
* work to calculate the correct checksum, we ignore that case
|
|
|
|
* and take the potential slowdown of unaligned loads.
|
|
|
|
*
|
|
|
|
* If the source and destination are relatively unaligned we only
|
|
|
|
* align the source. This keeps things simple.
|
|
|
|
*/
|
2016-11-03 13:15:42 +08:00
|
|
|
rldicl. r6,r3,64-1,64-2 /* r6 = (r3 >> 1) & 0x3 */
|
2010-08-03 04:09:52 +08:00
|
|
|
beq .Lcopy_aligned
|
|
|
|
|
2013-10-01 14:54:05 +08:00
|
|
|
li r9,4
|
|
|
|
sub r6,r9,r6
|
2010-08-03 04:09:52 +08:00
|
|
|
mtctr r6
|
|
|
|
|
|
|
|
1:
|
2013-10-01 15:11:35 +08:00
|
|
|
srcnr; lhz r6,0(r3) /* align to doubleword */
|
2005-09-26 14:04:21 +08:00
|
|
|
subi r5,r5,2
|
|
|
|
addi r3,r3,2
|
2010-08-03 04:09:52 +08:00
|
|
|
adde r0,r0,r6
|
2013-10-01 15:11:35 +08:00
|
|
|
dstnr; sth r6,0(r4)
|
2005-09-26 14:04:21 +08:00
|
|
|
addi r4,r4,2
|
2010-08-03 04:09:52 +08:00
|
|
|
bdnz 1b
|
|
|
|
|
|
|
|
.Lcopy_aligned:
|
|
|
|
/*
|
|
|
|
* We unroll the loop such that each iteration is 64 bytes with an
|
|
|
|
* entry and exit limb of 64 bytes, meaning a minimum size of
|
|
|
|
* 128 bytes.
|
|
|
|
*/
|
|
|
|
srdi. r6,r5,7
|
|
|
|
beq .Lcopy_tail_doublewords /* len < 128 */
|
|
|
|
|
|
|
|
srdi r6,r5,6
|
|
|
|
subi r6,r6,1
|
|
|
|
mtctr r6
|
|
|
|
|
|
|
|
stdu r1,-STACKFRAMESIZE(r1)
|
2012-06-25 21:33:10 +08:00
|
|
|
std r14,STK_REG(R14)(r1)
|
|
|
|
std r15,STK_REG(R15)(r1)
|
|
|
|
std r16,STK_REG(R16)(r1)
|
2010-08-03 04:09:52 +08:00
|
|
|
|
|
|
|
source; ld r6,0(r3)
|
|
|
|
source; ld r9,8(r3)
|
|
|
|
|
|
|
|
source; ld r10,16(r3)
|
|
|
|
source; ld r11,24(r3)
|
|
|
|
|
|
|
|
/*
|
2016-05-23 09:27:01 +08:00
|
|
|
* On POWER6 and POWER7 back to back adde instructions take 2 cycles
|
|
|
|
* because of the XER dependency. This means the fastest this loop can
|
|
|
|
* go is 16 cycles per iteration. The scheduling of the loop below has
|
2010-08-03 04:09:52 +08:00
|
|
|
* been shown to hit this on both POWER6 and POWER7.
|
|
|
|
*/
|
|
|
|
.align 5
|
|
|
|
2:
|
|
|
|
adde r0,r0,r6
|
|
|
|
source; ld r12,32(r3)
|
|
|
|
source; ld r14,40(r3)
|
|
|
|
|
|
|
|
adde r0,r0,r9
|
|
|
|
source; ld r15,48(r3)
|
|
|
|
source; ld r16,56(r3)
|
|
|
|
addi r3,r3,64
|
|
|
|
|
|
|
|
adde r0,r0,r10
|
|
|
|
dest; std r6,0(r4)
|
|
|
|
dest; std r9,8(r4)
|
|
|
|
|
|
|
|
adde r0,r0,r11
|
|
|
|
dest; std r10,16(r4)
|
|
|
|
dest; std r11,24(r4)
|
|
|
|
|
|
|
|
adde r0,r0,r12
|
|
|
|
dest; std r12,32(r4)
|
|
|
|
dest; std r14,40(r4)
|
|
|
|
|
|
|
|
adde r0,r0,r14
|
|
|
|
dest; std r15,48(r4)
|
|
|
|
dest; std r16,56(r4)
|
|
|
|
addi r4,r4,64
|
|
|
|
|
|
|
|
adde r0,r0,r15
|
|
|
|
source; ld r6,0(r3)
|
|
|
|
source; ld r9,8(r3)
|
|
|
|
|
|
|
|
adde r0,r0,r16
|
|
|
|
source; ld r10,16(r3)
|
|
|
|
source; ld r11,24(r3)
|
|
|
|
bdnz 2b
|
|
|
|
|
|
|
|
|
2005-09-26 14:04:21 +08:00
|
|
|
adde r0,r0,r6
|
2010-08-03 04:09:52 +08:00
|
|
|
source; ld r12,32(r3)
|
|
|
|
source; ld r14,40(r3)
|
|
|
|
|
|
|
|
adde r0,r0,r9
|
|
|
|
source; ld r15,48(r3)
|
|
|
|
source; ld r16,56(r3)
|
|
|
|
addi r3,r3,64
|
|
|
|
|
|
|
|
adde r0,r0,r10
|
|
|
|
dest; std r6,0(r4)
|
|
|
|
dest; std r9,8(r4)
|
|
|
|
|
|
|
|
adde r0,r0,r11
|
|
|
|
dest; std r10,16(r4)
|
|
|
|
dest; std r11,24(r4)
|
|
|
|
|
|
|
|
adde r0,r0,r12
|
|
|
|
dest; std r12,32(r4)
|
|
|
|
dest; std r14,40(r4)
|
|
|
|
|
|
|
|
adde r0,r0,r14
|
|
|
|
dest; std r15,48(r4)
|
|
|
|
dest; std r16,56(r4)
|
|
|
|
addi r4,r4,64
|
|
|
|
|
|
|
|
adde r0,r0,r15
|
|
|
|
adde r0,r0,r16
|
|
|
|
|
2012-06-25 21:33:10 +08:00
|
|
|
ld r14,STK_REG(R14)(r1)
|
|
|
|
ld r15,STK_REG(R15)(r1)
|
|
|
|
ld r16,STK_REG(R16)(r1)
|
2010-08-03 04:09:52 +08:00
|
|
|
addi r1,r1,STACKFRAMESIZE
|
|
|
|
|
|
|
|
andi. r5,r5,63
|
|
|
|
|
|
|
|
.Lcopy_tail_doublewords: /* Up to 127 bytes to go */
|
|
|
|
srdi. r6,r5,3
|
|
|
|
beq .Lcopy_tail_word
|
|
|
|
|
|
|
|
mtctr r6
|
|
|
|
3:
|
2013-10-01 15:11:35 +08:00
|
|
|
srcnr; ld r6,0(r3)
|
2010-08-03 04:09:52 +08:00
|
|
|
addi r3,r3,8
|
2005-09-26 14:04:21 +08:00
|
|
|
adde r0,r0,r6
|
2013-10-01 15:11:35 +08:00
|
|
|
dstnr; std r6,0(r4)
|
2010-08-03 04:09:52 +08:00
|
|
|
addi r4,r4,8
|
|
|
|
bdnz 3b
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2010-08-03 04:09:52 +08:00
|
|
|
andi. r5,r5,7
|
2005-09-26 14:04:21 +08:00
|
|
|
|
2010-08-03 04:09:52 +08:00
|
|
|
.Lcopy_tail_word: /* Up to 7 bytes to go */
|
|
|
|
srdi. r6,r5,2
|
|
|
|
beq .Lcopy_tail_halfword
|
|
|
|
|
2013-10-01 15:11:35 +08:00
|
|
|
srcnr; lwz r6,0(r3)
|
2010-08-03 04:09:52 +08:00
|
|
|
addi r3,r3,4
|
|
|
|
adde r0,r0,r6
|
2013-10-01 15:11:35 +08:00
|
|
|
dstnr; stw r6,0(r4)
|
2010-08-03 04:09:52 +08:00
|
|
|
addi r4,r4,4
|
|
|
|
subi r5,r5,4
|
|
|
|
|
|
|
|
.Lcopy_tail_halfword: /* Up to 3 bytes to go */
|
|
|
|
srdi. r6,r5,1
|
|
|
|
beq .Lcopy_tail_byte
|
|
|
|
|
2013-10-01 15:11:35 +08:00
|
|
|
srcnr; lhz r6,0(r3)
|
2010-08-03 04:09:52 +08:00
|
|
|
addi r3,r3,2
|
|
|
|
adde r0,r0,r6
|
2013-10-01 15:11:35 +08:00
|
|
|
dstnr; sth r6,0(r4)
|
2005-09-26 14:04:21 +08:00
|
|
|
addi r4,r4,2
|
2010-08-03 04:09:52 +08:00
|
|
|
subi r5,r5,2
|
|
|
|
|
|
|
|
.Lcopy_tail_byte: /* Up to 1 byte to go */
|
|
|
|
andi. r6,r5,1
|
|
|
|
beq .Lcopy_finish
|
|
|
|
|
2013-10-01 15:11:35 +08:00
|
|
|
srcnr; lbz r6,0(r3)
|
2016-11-03 13:15:42 +08:00
|
|
|
#ifdef __BIG_ENDIAN__
|
2010-08-03 04:09:52 +08:00
|
|
|
sldi r9,r6,8 /* Pad the byte out to 16 bits */
|
|
|
|
adde r0,r0,r9
|
2016-11-03 13:15:42 +08:00
|
|
|
#else
|
|
|
|
adde r0,r0,r6
|
|
|
|
#endif
|
2013-10-01 15:11:35 +08:00
|
|
|
dstnr; stb r6,0(r4)
|
2010-08-03 04:09:52 +08:00
|
|
|
|
|
|
|
.Lcopy_finish:
|
|
|
|
addze r0,r0 /* add in final carry */
|
|
|
|
rldicl r4,r0,32,0 /* fold two 32 bit halves together */
|
|
|
|
add r3,r4,r0
|
|
|
|
srdi r3,r3,32
|
|
|
|
blr
|
|
|
|
|
2020-07-20 22:09:24 +08:00
|
|
|
.Lerror:
|
2013-10-01 15:11:35 +08:00
|
|
|
ld r14,STK_REG(R14)(r1)
|
|
|
|
ld r15,STK_REG(R15)(r1)
|
|
|
|
ld r16,STK_REG(R16)(r1)
|
|
|
|
addi r1,r1,STACKFRAMESIZE
|
2020-07-20 22:09:24 +08:00
|
|
|
.Lerror_nr:
|
|
|
|
li r3,0
|
2005-09-26 14:04:21 +08:00
|
|
|
blr
|
|
|
|
|
2016-01-14 12:33:46 +08:00
|
|
|
EXPORT_SYMBOL(csum_partial_copy_generic)
|
powerpc: Implement csum_ipv6_magic in assembly
The generic csum_ipv6_magic() generates a pretty bad result
00000000 <csum_ipv6_magic>: (PPC32)
0: 81 23 00 00 lwz r9,0(r3)
4: 81 03 00 04 lwz r8,4(r3)
8: 7c e7 4a 14 add r7,r7,r9
c: 7d 29 38 10 subfc r9,r9,r7
10: 7d 4a 51 10 subfe r10,r10,r10
14: 7d 27 42 14 add r9,r7,r8
18: 7d 2a 48 50 subf r9,r10,r9
1c: 80 e3 00 08 lwz r7,8(r3)
20: 7d 08 48 10 subfc r8,r8,r9
24: 7d 4a 51 10 subfe r10,r10,r10
28: 7d 29 3a 14 add r9,r9,r7
2c: 81 03 00 0c lwz r8,12(r3)
30: 7d 2a 48 50 subf r9,r10,r9
34: 7c e7 48 10 subfc r7,r7,r9
38: 7d 4a 51 10 subfe r10,r10,r10
3c: 7d 29 42 14 add r9,r9,r8
40: 7d 2a 48 50 subf r9,r10,r9
44: 80 e4 00 00 lwz r7,0(r4)
48: 7d 08 48 10 subfc r8,r8,r9
4c: 7d 4a 51 10 subfe r10,r10,r10
50: 7d 29 3a 14 add r9,r9,r7
54: 7d 2a 48 50 subf r9,r10,r9
58: 81 04 00 04 lwz r8,4(r4)
5c: 7c e7 48 10 subfc r7,r7,r9
60: 7d 4a 51 10 subfe r10,r10,r10
64: 7d 29 42 14 add r9,r9,r8
68: 7d 2a 48 50 subf r9,r10,r9
6c: 80 e4 00 08 lwz r7,8(r4)
70: 7d 08 48 10 subfc r8,r8,r9
74: 7d 4a 51 10 subfe r10,r10,r10
78: 7d 29 3a 14 add r9,r9,r7
7c: 7d 2a 48 50 subf r9,r10,r9
80: 81 04 00 0c lwz r8,12(r4)
84: 7c e7 48 10 subfc r7,r7,r9
88: 7d 4a 51 10 subfe r10,r10,r10
8c: 7d 29 42 14 add r9,r9,r8
90: 7d 2a 48 50 subf r9,r10,r9
94: 7d 08 48 10 subfc r8,r8,r9
98: 7d 4a 51 10 subfe r10,r10,r10
9c: 7d 29 2a 14 add r9,r9,r5
a0: 7d 2a 48 50 subf r9,r10,r9
a4: 7c a5 48 10 subfc r5,r5,r9
a8: 7c 63 19 10 subfe r3,r3,r3
ac: 7d 29 32 14 add r9,r9,r6
b0: 7d 23 48 50 subf r9,r3,r9
b4: 7c c6 48 10 subfc r6,r6,r9
b8: 7c 63 19 10 subfe r3,r3,r3
bc: 7c 63 48 50 subf r3,r3,r9
c0: 54 6a 80 3e rotlwi r10,r3,16
c4: 7c 63 52 14 add r3,r3,r10
c8: 7c 63 18 f8 not r3,r3
cc: 54 63 84 3e rlwinm r3,r3,16,16,31
d0: 4e 80 00 20 blr
0000000000000000 <.csum_ipv6_magic>: (PPC64)
0: 81 23 00 00 lwz r9,0(r3)
4: 80 03 00 04 lwz r0,4(r3)
8: 81 63 00 08 lwz r11,8(r3)
c: 7c e7 4a 14 add r7,r7,r9
10: 7f 89 38 40 cmplw cr7,r9,r7
14: 7d 47 02 14 add r10,r7,r0
18: 7d 30 10 26 mfocrf r9,1
1c: 55 29 f7 fe rlwinm r9,r9,30,31,31
20: 7d 4a 4a 14 add r10,r10,r9
24: 7f 80 50 40 cmplw cr7,r0,r10
28: 7d 2a 5a 14 add r9,r10,r11
2c: 80 03 00 0c lwz r0,12(r3)
30: 81 44 00 00 lwz r10,0(r4)
34: 7d 10 10 26 mfocrf r8,1
38: 55 08 f7 fe rlwinm r8,r8,30,31,31
3c: 7d 29 42 14 add r9,r9,r8
40: 81 04 00 04 lwz r8,4(r4)
44: 7f 8b 48 40 cmplw cr7,r11,r9
48: 7d 29 02 14 add r9,r9,r0
4c: 7d 70 10 26 mfocrf r11,1
50: 55 6b f7 fe rlwinm r11,r11,30,31,31
54: 7d 29 5a 14 add r9,r9,r11
58: 7f 80 48 40 cmplw cr7,r0,r9
5c: 7d 29 52 14 add r9,r9,r10
60: 7c 10 10 26 mfocrf r0,1
64: 54 00 f7 fe rlwinm r0,r0,30,31,31
68: 7d 69 02 14 add r11,r9,r0
6c: 7f 8a 58 40 cmplw cr7,r10,r11
70: 7c 0b 42 14 add r0,r11,r8
74: 81 44 00 08 lwz r10,8(r4)
78: 7c f0 10 26 mfocrf r7,1
7c: 54 e7 f7 fe rlwinm r7,r7,30,31,31
80: 7c 00 3a 14 add r0,r0,r7
84: 7f 88 00 40 cmplw cr7,r8,r0
88: 7d 20 52 14 add r9,r0,r10
8c: 80 04 00 0c lwz r0,12(r4)
90: 7d 70 10 26 mfocrf r11,1
94: 55 6b f7 fe rlwinm r11,r11,30,31,31
98: 7d 29 5a 14 add r9,r9,r11
9c: 7f 8a 48 40 cmplw cr7,r10,r9
a0: 7d 29 02 14 add r9,r9,r0
a4: 7d 70 10 26 mfocrf r11,1
a8: 55 6b f7 fe rlwinm r11,r11,30,31,31
ac: 7d 29 5a 14 add r9,r9,r11
b0: 7f 80 48 40 cmplw cr7,r0,r9
b4: 7d 29 2a 14 add r9,r9,r5
b8: 7c 10 10 26 mfocrf r0,1
bc: 54 00 f7 fe rlwinm r0,r0,30,31,31
c0: 7d 29 02 14 add r9,r9,r0
c4: 7f 85 48 40 cmplw cr7,r5,r9
c8: 7c 09 32 14 add r0,r9,r6
cc: 7d 50 10 26 mfocrf r10,1
d0: 55 4a f7 fe rlwinm r10,r10,30,31,31
d4: 7c 00 52 14 add r0,r0,r10
d8: 7f 80 30 40 cmplw cr7,r0,r6
dc: 7d 30 10 26 mfocrf r9,1
e0: 55 29 ef fe rlwinm r9,r9,29,31,31
e4: 7c 09 02 14 add r0,r9,r0
e8: 54 03 80 3e rotlwi r3,r0,16
ec: 7c 03 02 14 add r0,r3,r0
f0: 7c 03 00 f8 not r3,r0
f4: 78 63 84 22 rldicl r3,r3,48,48
f8: 4e 80 00 20 blr
This patch implements it in assembly for both PPC32 and PPC64
Link: https://github.com/linuxppc/linux/issues/9
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Reviewed-by: Segher Boessenkool <segher@kernel.crashing.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-05-24 19:33:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
|
|
|
|
* const struct in6_addr *daddr,
|
|
|
|
* __u32 len, __u8 proto, __wsum sum)
|
|
|
|
*/
|
|
|
|
|
|
|
|
_GLOBAL(csum_ipv6_magic)
|
|
|
|
ld r8, 0(r3)
|
|
|
|
ld r9, 8(r3)
|
|
|
|
add r5, r5, r6
|
|
|
|
addc r0, r8, r9
|
|
|
|
ld r10, 0(r4)
|
|
|
|
ld r11, 8(r4)
|
2018-09-10 14:09:04 +08:00
|
|
|
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
|
|
|
rotldi r5, r5, 8
|
|
|
|
#endif
|
powerpc: Implement csum_ipv6_magic in assembly
The generic csum_ipv6_magic() generates a pretty bad result
00000000 <csum_ipv6_magic>: (PPC32)
0: 81 23 00 00 lwz r9,0(r3)
4: 81 03 00 04 lwz r8,4(r3)
8: 7c e7 4a 14 add r7,r7,r9
c: 7d 29 38 10 subfc r9,r9,r7
10: 7d 4a 51 10 subfe r10,r10,r10
14: 7d 27 42 14 add r9,r7,r8
18: 7d 2a 48 50 subf r9,r10,r9
1c: 80 e3 00 08 lwz r7,8(r3)
20: 7d 08 48 10 subfc r8,r8,r9
24: 7d 4a 51 10 subfe r10,r10,r10
28: 7d 29 3a 14 add r9,r9,r7
2c: 81 03 00 0c lwz r8,12(r3)
30: 7d 2a 48 50 subf r9,r10,r9
34: 7c e7 48 10 subfc r7,r7,r9
38: 7d 4a 51 10 subfe r10,r10,r10
3c: 7d 29 42 14 add r9,r9,r8
40: 7d 2a 48 50 subf r9,r10,r9
44: 80 e4 00 00 lwz r7,0(r4)
48: 7d 08 48 10 subfc r8,r8,r9
4c: 7d 4a 51 10 subfe r10,r10,r10
50: 7d 29 3a 14 add r9,r9,r7
54: 7d 2a 48 50 subf r9,r10,r9
58: 81 04 00 04 lwz r8,4(r4)
5c: 7c e7 48 10 subfc r7,r7,r9
60: 7d 4a 51 10 subfe r10,r10,r10
64: 7d 29 42 14 add r9,r9,r8
68: 7d 2a 48 50 subf r9,r10,r9
6c: 80 e4 00 08 lwz r7,8(r4)
70: 7d 08 48 10 subfc r8,r8,r9
74: 7d 4a 51 10 subfe r10,r10,r10
78: 7d 29 3a 14 add r9,r9,r7
7c: 7d 2a 48 50 subf r9,r10,r9
80: 81 04 00 0c lwz r8,12(r4)
84: 7c e7 48 10 subfc r7,r7,r9
88: 7d 4a 51 10 subfe r10,r10,r10
8c: 7d 29 42 14 add r9,r9,r8
90: 7d 2a 48 50 subf r9,r10,r9
94: 7d 08 48 10 subfc r8,r8,r9
98: 7d 4a 51 10 subfe r10,r10,r10
9c: 7d 29 2a 14 add r9,r9,r5
a0: 7d 2a 48 50 subf r9,r10,r9
a4: 7c a5 48 10 subfc r5,r5,r9
a8: 7c 63 19 10 subfe r3,r3,r3
ac: 7d 29 32 14 add r9,r9,r6
b0: 7d 23 48 50 subf r9,r3,r9
b4: 7c c6 48 10 subfc r6,r6,r9
b8: 7c 63 19 10 subfe r3,r3,r3
bc: 7c 63 48 50 subf r3,r3,r9
c0: 54 6a 80 3e rotlwi r10,r3,16
c4: 7c 63 52 14 add r3,r3,r10
c8: 7c 63 18 f8 not r3,r3
cc: 54 63 84 3e rlwinm r3,r3,16,16,31
d0: 4e 80 00 20 blr
0000000000000000 <.csum_ipv6_magic>: (PPC64)
0: 81 23 00 00 lwz r9,0(r3)
4: 80 03 00 04 lwz r0,4(r3)
8: 81 63 00 08 lwz r11,8(r3)
c: 7c e7 4a 14 add r7,r7,r9
10: 7f 89 38 40 cmplw cr7,r9,r7
14: 7d 47 02 14 add r10,r7,r0
18: 7d 30 10 26 mfocrf r9,1
1c: 55 29 f7 fe rlwinm r9,r9,30,31,31
20: 7d 4a 4a 14 add r10,r10,r9
24: 7f 80 50 40 cmplw cr7,r0,r10
28: 7d 2a 5a 14 add r9,r10,r11
2c: 80 03 00 0c lwz r0,12(r3)
30: 81 44 00 00 lwz r10,0(r4)
34: 7d 10 10 26 mfocrf r8,1
38: 55 08 f7 fe rlwinm r8,r8,30,31,31
3c: 7d 29 42 14 add r9,r9,r8
40: 81 04 00 04 lwz r8,4(r4)
44: 7f 8b 48 40 cmplw cr7,r11,r9
48: 7d 29 02 14 add r9,r9,r0
4c: 7d 70 10 26 mfocrf r11,1
50: 55 6b f7 fe rlwinm r11,r11,30,31,31
54: 7d 29 5a 14 add r9,r9,r11
58: 7f 80 48 40 cmplw cr7,r0,r9
5c: 7d 29 52 14 add r9,r9,r10
60: 7c 10 10 26 mfocrf r0,1
64: 54 00 f7 fe rlwinm r0,r0,30,31,31
68: 7d 69 02 14 add r11,r9,r0
6c: 7f 8a 58 40 cmplw cr7,r10,r11
70: 7c 0b 42 14 add r0,r11,r8
74: 81 44 00 08 lwz r10,8(r4)
78: 7c f0 10 26 mfocrf r7,1
7c: 54 e7 f7 fe rlwinm r7,r7,30,31,31
80: 7c 00 3a 14 add r0,r0,r7
84: 7f 88 00 40 cmplw cr7,r8,r0
88: 7d 20 52 14 add r9,r0,r10
8c: 80 04 00 0c lwz r0,12(r4)
90: 7d 70 10 26 mfocrf r11,1
94: 55 6b f7 fe rlwinm r11,r11,30,31,31
98: 7d 29 5a 14 add r9,r9,r11
9c: 7f 8a 48 40 cmplw cr7,r10,r9
a0: 7d 29 02 14 add r9,r9,r0
a4: 7d 70 10 26 mfocrf r11,1
a8: 55 6b f7 fe rlwinm r11,r11,30,31,31
ac: 7d 29 5a 14 add r9,r9,r11
b0: 7f 80 48 40 cmplw cr7,r0,r9
b4: 7d 29 2a 14 add r9,r9,r5
b8: 7c 10 10 26 mfocrf r0,1
bc: 54 00 f7 fe rlwinm r0,r0,30,31,31
c0: 7d 29 02 14 add r9,r9,r0
c4: 7f 85 48 40 cmplw cr7,r5,r9
c8: 7c 09 32 14 add r0,r9,r6
cc: 7d 50 10 26 mfocrf r10,1
d0: 55 4a f7 fe rlwinm r10,r10,30,31,31
d4: 7c 00 52 14 add r0,r0,r10
d8: 7f 80 30 40 cmplw cr7,r0,r6
dc: 7d 30 10 26 mfocrf r9,1
e0: 55 29 ef fe rlwinm r9,r9,29,31,31
e4: 7c 09 02 14 add r0,r9,r0
e8: 54 03 80 3e rotlwi r3,r0,16
ec: 7c 03 02 14 add r0,r3,r0
f0: 7c 03 00 f8 not r3,r0
f4: 78 63 84 22 rldicl r3,r3,48,48
f8: 4e 80 00 20 blr
This patch implements it in assembly for both PPC32 and PPC64
Link: https://github.com/linuxppc/linux/issues/9
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Reviewed-by: Segher Boessenkool <segher@kernel.crashing.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-05-24 19:33:18 +08:00
|
|
|
adde r0, r0, r10
|
|
|
|
add r5, r5, r7
|
|
|
|
adde r0, r0, r11
|
|
|
|
adde r0, r0, r5
|
|
|
|
addze r0, r0
|
|
|
|
rotldi r3, r0, 32 /* fold two 32 bit halves together */
|
|
|
|
add r3, r0, r3
|
|
|
|
srdi r0, r3, 32
|
|
|
|
rotlwi r3, r0, 16 /* fold two 16 bit halves together */
|
|
|
|
add r3, r0, r3
|
|
|
|
not r3, r3
|
|
|
|
rlwinm r3, r3, 16, 16, 31
|
|
|
|
blr
|
|
|
|
EXPORT_SYMBOL(csum_ipv6_magic)
|