Commit 8695c37d authored by David S. Miller's avatar David S. Miller
Browse files

sparc: Convert some assembler over to linakge.h's ENTRY/ENDPROC



Use those, instead of doing it all by hand.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b55e81b9
......@@ -5,10 +5,10 @@
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
#include <linux/linkage.h>
.text
.align 4
.globl __ashldi3
__ashldi3:
ENTRY(__ashldi3)
cmp %o2, 0
be 9f
mov 0x20, %g2
......@@ -32,3 +32,4 @@ __ashldi3:
9:
retl
nop
ENDPROC(__ashldi3)
......@@ -5,10 +5,10 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/linkage.h>
.text
.align 4
.globl __ashrdi3
__ashrdi3:
ENTRY(__ashrdi3)
tst %o2
be 3f
or %g0, 32, %g2
......@@ -34,3 +34,4 @@ __ashrdi3:
3:
jmpl %o7 + 8, %g0
nop
ENDPROC(__ashrdi3)
......@@ -3,6 +3,7 @@
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
......@@ -13,9 +14,7 @@
* memory barriers, and a second which returns
* a value and does the barriers.
*/
.globl atomic_add
.type atomic_add,#function
atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
add %g1, %o0, %g7
......@@ -26,11 +25,9 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add, .-atomic_add
ENDPROC(atomic_add)
.globl atomic_sub
.type atomic_sub,#function
atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
sub %g1, %o0, %g7
......@@ -41,11 +38,9 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_sub, .-atomic_sub
ENDPROC(atomic_sub)
.globl atomic_add_ret
.type atomic_add_ret,#function
atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
add %g1, %o0, %g7
......@@ -56,11 +51,9 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
retl
sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_add_ret, .-atomic_add_ret
ENDPROC(atomic_add_ret)
.globl atomic_sub_ret
.type atomic_sub_ret,#function
atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: lduw [%o1], %g1
sub %g1, %o0, %g7
......@@ -71,11 +64,9 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
retl
sra %g1, 0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic_sub_ret, .-atomic_sub_ret
ENDPROC(atomic_sub_ret)
.globl atomic64_add
.type atomic64_add,#function
atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
add %g1, %o0, %g7
......@@ -86,11 +77,9 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add, .-atomic64_add
ENDPROC(atomic64_add)
.globl atomic64_sub
.type atomic64_sub,#function
atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
sub %g1, %o0, %g7
......@@ -101,11 +90,9 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
retl
nop
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_sub, .-atomic64_sub
ENDPROC(atomic64_sub)
.globl atomic64_add_ret
.type atomic64_add_ret,#function
atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
add %g1, %o0, %g7
......@@ -116,11 +103,9 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
retl
add %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_add_ret, .-atomic64_add_ret
ENDPROC(atomic64_add_ret)
.globl atomic64_sub_ret
.type atomic64_sub_ret,#function
atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o1], %g1
sub %g1, %o0, %g7
......@@ -131,4 +116,4 @@ atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
retl
sub %g1, %o0, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
.size atomic64_sub_ret, .-atomic64_sub_ret
ENDPROC(atomic64_sub_ret)
......@@ -3,14 +3,13 @@
* Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
.text
.globl test_and_set_bit
.type test_and_set_bit,#function
test_and_set_bit: /* %o0=nr, %o1=addr */
ENTRY(test_and_set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
......@@ -29,11 +28,9 @@ test_and_set_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_set_bit, .-test_and_set_bit
ENDPROC(test_and_set_bit)
.globl test_and_clear_bit
.type test_and_clear_bit,#function
test_and_clear_bit: /* %o0=nr, %o1=addr */
ENTRY(test_and_clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
......@@ -52,11 +49,9 @@ test_and_clear_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_clear_bit, .-test_and_clear_bit
ENDPROC(test_and_clear_bit)
.globl test_and_change_bit
.type test_and_change_bit,#function
test_and_change_bit: /* %o0=nr, %o1=addr */
ENTRY(test_and_change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
......@@ -75,11 +70,9 @@ test_and_change_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size test_and_change_bit, .-test_and_change_bit
ENDPROC(test_and_change_bit)
.globl set_bit
.type set_bit,#function
set_bit: /* %o0=nr, %o1=addr */
ENTRY(set_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
......@@ -96,11 +89,9 @@ set_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size set_bit, .-set_bit
ENDPROC(set_bit)
.globl clear_bit
.type clear_bit,#function
clear_bit: /* %o0=nr, %o1=addr */
ENTRY(clear_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
......@@ -117,11 +108,9 @@ clear_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size clear_bit, .-clear_bit
ENDPROC(clear_bit)
.globl change_bit
.type change_bit,#function
change_bit: /* %o0=nr, %o1=addr */
ENTRY(change_bit) /* %o0=nr, %o1=addr */
BACKOFF_SETUP(%o3)
srlx %o0, 6, %g1
mov 1, %o2
......@@ -138,4 +127,4 @@ change_bit: /* %o0=nr, %o1=addr */
retl
nop
2: BACKOFF_SPIN(%o3, %o4, 1b)
.size change_bit, .-change_bit
ENDPROC(change_bit)
......@@ -4,6 +4,7 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/linkage.h>
#include <asm/page.h>
/* Zero out 64 bytes of memory at (buf + offset).
......@@ -44,10 +45,7 @@
*/
.text
.align 4
.globl bzero_1page, __copy_1page
bzero_1page:
ENTRY(bzero_1page)
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = buf */
......@@ -65,8 +63,9 @@ bzero_1page:
retl
nop
ENDPROC(bzero_1page)
__copy_1page:
ENTRY(__copy_1page)
/* NOTE: If you change the number of insns of this routine, please check
* arch/sparc/mm/hypersparc.S */
/* %o0 = dst, %o1 = src */
......@@ -87,3 +86,4 @@ __copy_1page:
retl
nop
ENDPROC(__copy_1page)
......@@ -4,11 +4,11 @@
* Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
.text
.globl memset
.type memset, #function
memset: /* %o0=buf, %o1=pat, %o2=len */
ENTRY(memset) /* %o0=buf, %o1=pat, %o2=len */
and %o1, 0xff, %o3
mov %o2, %o1
sllx %o3, 8, %g1
......@@ -19,9 +19,7 @@ memset: /* %o0=buf, %o1=pat, %o2=len */
ba,pt %xcc, 1f
or %g1, %o2, %o2
.globl __bzero
.type __bzero, #function
__bzero: /* %o0=buf, %o1=len */
ENTRY(__bzero) /* %o0=buf, %o1=len */
clr %o2
1: mov %o0, %o3
brz,pn %o1, __bzero_done
......@@ -78,8 +76,8 @@ __bzero_tiny:
__bzero_done:
retl
mov %o3, %o0
.size __bzero, .-__bzero
.size memset, .-memset
ENDPROC(__bzero)
ENDPROC(memset)
#define EX_ST(x,y) \
98: x,y; \
......@@ -89,9 +87,7 @@ __bzero_done:
.text; \
.align 4;
.globl __clear_user
.type __clear_user, #function
__clear_user: /* %o0=buf, %o1=len */
ENTRY(__clear_user) /* %o0=buf, %o1=len */
brz,pn %o1, __clear_user_done
cmp %o1, 16
bl,pn %icc, __clear_user_tiny
......@@ -146,4 +142,4 @@ __clear_user_tiny:
__clear_user_done:
retl
clr %o0
.size __clear_user, .-__clear_user
ENDPROC(__clear_user)
#include <linux/linkage.h>
.text
.align 32
.globl ip_fast_csum
.type ip_fast_csum,#function
ip_fast_csum: /* %o0 = iph, %o1 = ihl */
ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
sub %o1, 4, %g7
lduw [%o0 + 0x00], %o2
lduw [%o0 + 0x04], %g2
......@@ -31,4 +30,4 @@ ip_fast_csum: /* %o0 = iph, %o1 = ihl */
set 0xffff, %o1
retl
and %o2, %o1, %o0
.size ip_fast_csum, .-ip_fast_csum
ENDPROC(ip_fast_csum)
#include <linux/linkage.h>
.globl __lshrdi3
__lshrdi3:
ENTRY(__lshrdi3)
cmp %o2, 0
be 3f
mov 0x20, %g2
......@@ -24,3 +24,4 @@ __lshrdi3:
3:
retl
nop
ENDPROC(__lshrdi3)
......@@ -4,11 +4,10 @@
* Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/linkage.h>
.text
.align 32
.globl memmove
.type memmove,#function
memmove: /* o0=dst o1=src o2=len */
ENTRY(memmove) /* o0=dst o1=src o2=len */
mov %o0, %g1
cmp %o0, %o1
bleu,pt %xcc, memcpy
......@@ -28,4 +27,4 @@ memmove: /* o0=dst o1=src o2=len */
retl
mov %g1, %o0
.size memmove, .-memmove
ENDPROC(memmove)
......@@ -8,16 +8,16 @@
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/linkage.h>
#include <asm/asi.h>
#define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080
.align 4
.global __strlen_user, __strnlen_user
__strlen_user:
ENTRY(__strlen_user)
sethi %hi(32768), %o1
__strnlen_user:
ENTRY(__strnlen_user)
mov %o1, %g1
mov %o0, %o1
andcc %o0, 3, %g0
......@@ -78,6 +78,8 @@ __strnlen_user:
mov 2, %o0
23: retl
mov 3, %o0
ENDPROC(__strlen_user)
ENDPROC(__strnlen_user)
.section .fixup,#alloc,#execinstr
.align 4
......
......@@ -3,10 +3,10 @@
* generic strncmp routine.
*/
#include <linux/linkage.h>
.text
.align 4
.global strncmp
strncmp:
ENTRY(strncmp)
mov %o0, %g3
mov 0, %o3
......@@ -115,3 +115,4 @@ strncmp:
and %g2, 0xff, %o0
retl
sub %o3, %o0, %o0
ENDPROC(strncmp)
......@@ -4,13 +4,11 @@
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/linkage.h>
#include <asm/asi.h>
.text
.align 32
.globl strncmp
.type strncmp,#function
strncmp:
ENTRY(strncmp)
brlez,pn %o2, 3f
lduba [%o0] (ASI_PNF), %o3
1:
......@@ -29,4 +27,4 @@ strncmp:
3:
retl
clr %o0
.size strncmp, .-strncmp
ENDPROC(strncmp)
......@@ -3,11 +3,11 @@
* Copyright(C) 1996 David S. Miller
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/errno.h>
.text
.align 4
/* Must return:
*
......@@ -16,8 +16,7 @@
* bytes copied if we hit a null byte
*/
.globl __strncpy_from_user
__strncpy_from_user:
ENTRY(__strncpy_from_user)
/* %o0=dest, %o1=src, %o2=count */
mov %o2, %o3
1:
......@@ -35,6 +34,7 @@ __strncpy_from_user:
add %o2, 1, %o0
retl
sub %o3, %o0, %o0
ENDPROC(__strncpy_from_user)
.section .fixup,#alloc,#execinstr
.align 4
......
......@@ -4,6 +4,7 @@
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/errno.h>
......@@ -12,7 +13,6 @@
0: .xword 0x0101010101010101
.text
.align 32
/* Must return:
*
......@@ -30,9 +30,7 @@
* and average length is 18 or so.
*/
.globl __strncpy_from_user
.type __strncpy_from_user,#function
__strncpy_from_user:
ENTRY(__strncpy_from_user)
/* %o0=dest, %o1=src, %o2=count */
andcc %o1, 7, %g0 ! IEU1 Group
bne,pn %icc, 30f ! CTI
......@@ -123,7 +121,7 @@ __strncpy_from_user:
mov %o2, %o0
2: retl
add %o2, %o3, %o0
.size __strncpy_from_user, .-__strncpy_from_user
ENDPROC(__strncpy_from_user)
.section __ex_table,"a"
.align 4
......
......@@ -8,6 +8,7 @@
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
......@@ -19,12 +20,9 @@
* !(len & 127) && len >= 256
*/
.text
.align 32
/* VIS versions. */
.globl xor_vis_2
.type xor_vis_2,#function
xor_vis_2:
ENTRY(xor_vis_2)
rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0
be,pt %icc, 0f
......@@ -91,11 +89,9 @@ xor_vis_2:
wr %g1, %g0, %asi
retl
wr %g0, 0, %fprs
.size xor_vis_2, .-xor_vis_2
ENDPROC(xor_vis_2)
.globl xor_vis_3
.type xor_vis_3,#function
xor_vis_3:
ENTRY(xor_vis_3)
rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0
be,pt %icc, 0f
......@@ -159,11 +155,9 @@ xor_vis_3:
wr %g1, %g0, %asi
retl
wr %g0, 0, %fprs
.size xor_vis_3, .-xor_vis_3
ENDPROC(xor_vis_3)
.globl xor_vis_4
.type xor_vis_4,#function
xor_vis_4:
ENTRY(xor_vis_4)
rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0
be,pt %icc, 0f
......@@ -246,11 +240,9 @@ xor_vis_4:
wr %g1, %g0, %asi
retl
wr %g0, 0, %fprs
.size xor_vis_4, .-xor_vis_4
ENDPROC(xor_vis_4)
.globl xor_vis_5
.type xor_vis_5,#function
xor_vis_5:
ENTRY(xor_vis_5)
save %sp, -192, %sp
rd %fprs, %o5
andcc %o5, FPRS_FEF|FPRS_DU, %g0
......@@ -354,12 +346,10 @@ xor_vis_5:
wr %g0, 0, %fprs
ret
restore
.size xor_vis_5, .-xor_vis_5
ENDPROC(xor_vis_5)
/* Niagara versions. */
.globl xor_niagara_2
.type xor_niagara_2,#function
xor_niagara_2: /* %o0=bytes, %o1=dest, %o2=src */
ENTRY(xor_niagara_2) /* %o0=bytes, %o1=dest, %o2=src */
save %sp, -192, %sp
prefetch [%i1], #n_writes
prefetch [%i2], #one_read
......@@ -402,11 +392,9 @@ xor_niagara_2: /* %o0=bytes, %o1=dest, %o2=src */
wr %g7, 0x0, %asi
ret
restore
.size xor_niagara_2, .-xor_niagara_2
ENDPROC(xor_niagara_2)
.globl xor_niagara_3
.type xor_niagara_3,#function
xor_niagara_3: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
ENTRY(xor_niagara_3) /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
save %sp, -192, %sp
prefetch [%i1], #n_writes
prefetch [%i2], #one_read
......@@ -465,11 +453,9 @@ xor_niagara_3: /* %o0=bytes, %o1=dest, %o2=src1, %o3=src2 */
wr %g7, 0x0, %asi