entry-common.S 13.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11
/*
 *  linux/arch/arm/kernel/entry-common.S
 *
 *  Copyright (C) 2000 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <asm/unistd.h>
12
#include <asm/ftrace.h>
13
#include <asm/unwind.h>
Linus Torvalds's avatar
Linus Torvalds committed
14

15 16 17 18 19 20 21
#ifdef CONFIG_NEED_RET_TO_USER
#include <mach/entry-macro.S>
#else
	.macro  arch_ret_to_user, tmp1, tmp2
	.endm
#endif

Linus Torvalds's avatar
Linus Torvalds committed
22 23 24 25 26 27 28 29 30 31
#include "entry-header.S"


	.align	5
/*
 * This is the fast syscall return path.  We do as little as
 * possible here, and this includes saving r0 back into the SVC
 * stack.
 */
ret_fast_syscall:
32 33
 UNWIND(.fnstart	)
 UNWIND(.cantunwind	)
34
	disable_irq				@ disable interrupts
Linus Torvalds's avatar
Linus Torvalds committed
35 36 37
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	fast_work_pending
38 39 40
#if defined(CONFIG_IRQSOFF_TRACER)
	asm_trace_hardirqs_on
#endif
41

42 43 44
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

45
	restore_user_regs fast = 1, offset = S_OFF
46
 UNWIND(.fnend		)
Linus Torvalds's avatar
Linus Torvalds committed
47 48 49 50 51 52 53 54 55

/*
 * Ok, we need to do extra processing, enter the slow path.
 */
fast_work_pending:
	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
work_pending:
	mov	r0, sp				@ 'regs'
	mov	r2, why				@ 'syscall'
56
	bl	do_work_pending
57
	cmp	r0, #0
58
	beq	no_work_pending
59
	movlt	scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
60 61 62
	ldmia	sp, {r0 - r6}			@ have to reload r0 - r6
	b	local_restart			@ ... and off we go

Linus Torvalds's avatar
Linus Torvalds committed
63 64 65 66 67
/*
 * "slow" syscall return path.  "why" tells us if this was a real syscall.
 */
ENTRY(ret_to_user)
ret_slow_syscall:
68
	disable_irq				@ disable interrupts
69
ENTRY(ret_to_user_from_irq)
Linus Torvalds's avatar
Linus Torvalds committed
70 71 72 73
	ldr	r1, [tsk, #TI_FLAGS]
	tst	r1, #_TIF_WORK_MASK
	bne	work_pending
no_work_pending:
74 75 76
#if defined(CONFIG_IRQSOFF_TRACER)
	asm_trace_hardirqs_on
#endif
77 78 79
	/* perform architecture specific actions before user return */
	arch_ret_to_user r1, lr

80
	restore_user_regs fast = 0, offset = 0
81
ENDPROC(ret_to_user_from_irq)
82
ENDPROC(ret_to_user)
Linus Torvalds's avatar
Linus Torvalds committed
83 84 85 86 87 88

/*
 * This is how we return from a fork.
 */
ENTRY(ret_from_fork)
	bl	schedule_tail
89 90
	cmp	r5, #0
	movne	r0, r4
91
	adrne	lr, BSYM(1f)
92
	movne	pc, r5
93
1:	get_thread_info tsk
Linus Torvalds's avatar
Linus Torvalds committed
94
	b	ret_slow_syscall
95
ENDPROC(ret_from_fork)
Linus Torvalds's avatar
Linus Torvalds committed
96

97 98
	.equ NR_syscalls,0
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
Linus Torvalds's avatar
Linus Torvalds committed
99
#include "calls.S"
100 101 102 103 104 105 106 107 108

/*
 * Ensure that the system call table is equal to __NR_syscalls,
 * which is the value the rest of the system sees
 */
.ifne NR_syscalls - __NR_syscalls
.error "__NR_syscalls is not equal to the size of the syscall table"
.endif

109 110
#undef CALL
#define CALL(x) .long x
Linus Torvalds's avatar
Linus Torvalds committed
111

112
#ifdef CONFIG_FUNCTION_TRACER
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
/*
 * When compiling with -pg, gcc inserts a call to the mcount routine at the
 * start of every function.  In mcount, apart from the function's address (in
 * lr), we need to get hold of the function's caller's address.
 *
 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
 *
 *	bl	mcount
 *
 * These versions have the limitation that in order for the mcount routine to
 * be able to determine the function's caller's address, an APCS-style frame
 * pointer (which is set up with something like the code below) is required.
 *
 *	mov     ip, sp
 *	push    {fp, ip, lr, pc}
 *	sub     fp, ip, #4
 *
 * With EABI, these frame pointers are not available unless -mapcs-frame is
 * specified, and if building as Thumb-2, not even then.
 *
 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
 * with call sites like:
 *
 *	push	{lr}
 *	bl	__gnu_mcount_nc
 *
 * With these compilers, frame pointers are not necessary.
 *
 * mcount can be thought of as a function called in the middle of a subroutine
 * call.  As such, it needs to be transparent for both the caller and the
 * callee: the original lr needs to be restored when leaving mcount, and no
 * registers should be clobbered.  (In the __gnu_mcount_nc implementation, we
 * clobber the ip register.  This is OK because the ARM calling convention
 * allows it to be clobbered in subroutines and doesn't use it to hold
 * parameters.)
148 149 150 151
 *
 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
 * arch/arm/kernel/ftrace.c).
152
 */
153 154 155 156 157 158 159

#ifndef CONFIG_OLD_MCOUNT
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
#endif
#endif

160 161 162 163 164
.macro mcount_adjust_addr rd, rn
	bic	\rd, \rn, #1		@ clear the Thumb bit if present
	sub	\rd, \rd, #MCOUNT_INSN_SIZE
.endm

165 166 167 168 169 170 171
.macro __mcount suffix
	mcount_enter
	ldr	r0, =ftrace_trace_function
	ldr	r2, [r0]
	adr	r0, .Lftrace_stub
	cmp	r0, r2
	bne	1f
172

173 174 175 176 177 178 179 180 181 182 183 184 185
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	ldr     r1, =ftrace_graph_return
	ldr     r2, [r1]
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix

	ldr     r1, =ftrace_graph_entry
	ldr     r2, [r1]
	ldr     r0, =ftrace_graph_entry_stub
	cmp     r0, r2
	bne     ftrace_graph_caller\suffix
#endif

186
	mcount_exit
187

188
1: 	mcount_get_lr	r1			@ lr of instrumented func
189
	mcount_adjust_addr	r0, lr		@ instrumented function
190 191 192 193
	adr	lr, BSYM(2f)
	mov	pc, r2
2:	mcount_exit
.endm
Abhishek Sagar's avatar
Abhishek Sagar committed
194

195 196
.macro __ftrace_caller suffix
	mcount_enter
Abhishek Sagar's avatar
Abhishek Sagar committed
197

198
	mcount_get_lr	r1			@ lr of instrumented func
199
	mcount_adjust_addr	r0, lr		@ instrumented function
200 201 202

	.globl ftrace_call\suffix
ftrace_call\suffix:
203
	bl	ftrace_stub
204

205 206 207 208 209 210
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
	mov	r0, r0
#endif

211 212
	mcount_exit
.endm
Abhishek Sagar's avatar
Abhishek Sagar committed
213

214 215
.macro __ftrace_graph_caller
	sub	r0, fp, #4		@ &lr of instrumented routine (&parent)
216 217 218
#ifdef CONFIG_DYNAMIC_FTRACE
	@ called from __ftrace_caller, saved in mcount_enter
	ldr	r1, [sp, #16]		@ instrumented routine (func)
219
	mcount_adjust_addr	r1, r1
220 221
#else
	@ called from __mcount, untouched in lr
222
	mcount_adjust_addr	r1, lr	@ instrumented routine (func)
223
#endif
224 225 226 227
	mov	r2, fp			@ frame pointer
	bl	prepare_ftrace_return
	mcount_exit
.endm
Abhishek Sagar's avatar
Abhishek Sagar committed
228

229
#ifdef CONFIG_OLD_MCOUNT
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
/*
 * mcount
 */

.macro mcount_enter
	stmdb	sp!, {r0-r3, lr}
.endm

.macro mcount_get_lr reg
	ldr	\reg, [fp, #-4]
.endm

.macro mcount_exit
	ldr	lr, [fp, #-4]
	ldmia	sp!, {r0-r3, pc}
.endm

247
ENTRY(mcount)
248
#ifdef CONFIG_DYNAMIC_FTRACE
249 250 251
	stmdb	sp!, {lr}
	ldr	lr, [fp, #-4]
	ldmia	sp!, {pc}
252 253 254
#else
	__mcount _old
#endif
255
ENDPROC(mcount)
Abhishek Sagar's avatar
Abhishek Sagar committed
256

257
#ifdef CONFIG_DYNAMIC_FTRACE
258
ENTRY(ftrace_caller_old)
259
	__ftrace_caller _old
260 261
ENDPROC(ftrace_caller_old)
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
262

263 264 265 266 267
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller_old)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller_old)
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
268

269 270 271 272
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
273

274 275 276 277 278
/*
 * __gnu_mcount_nc
 */

.macro mcount_enter
279
	stmdb	sp!, {r0-r3, lr}
280 281 282 283 284 285 286
.endm

.macro mcount_get_lr reg
	ldr	\reg, [sp, #20]
.endm

.macro mcount_exit
287 288
	ldmia	sp!, {r0-r3, ip, lr}
	mov	pc, ip
289
.endm
290

291 292 293 294
ENTRY(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
	mov	ip, lr
	ldmia	sp!, {lr}
295
	mov	pc, ip
296 297 298
#else
	__mcount
#endif
299
ENDPROC(__gnu_mcount_nc)
300

301 302 303 304
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
	__ftrace_caller
ENDPROC(ftrace_caller)
305
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
306

307 308 309 310
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
	__ftrace_graph_caller
ENDPROC(ftrace_graph_caller)
311
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
312

313 314 315
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
Abhishek Sagar's avatar
Abhishek Sagar committed
316

317 318 319 320 321 322 323 324 325 326
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.globl return_to_handler
return_to_handler:
	stmdb	sp!, {r0-r3}
	mov	r0, fp			@ frame pointer
	bl	ftrace_return_to_handler
	mov	lr, r0			@ r0 has real ret addr
	ldmia	sp!, {r0-r3}
	mov	pc, lr
#endif
Abhishek Sagar's avatar
Abhishek Sagar committed
327

328
ENTRY(ftrace_stub)
329
.Lftrace_stub:
330
	mov	pc, lr
331
ENDPROC(ftrace_stub)
Abhishek Sagar's avatar
Abhishek Sagar committed
332

333
#endif /* CONFIG_FUNCTION_TRACER */
Abhishek Sagar's avatar
Abhishek Sagar committed
334

Linus Torvalds's avatar
Linus Torvalds committed
335 336 337 338 339 340 341
/*=============================================================================
 * SWI handler
 *-----------------------------------------------------------------------------
 */

	.align	5
ENTRY(vector_swi)
342 343
	sub	sp, sp, #S_FRAME_SIZE
	stmia	sp, {r0 - r12}			@ Calling r0 - r12
344 345 346 347
 ARM(	add	r8, sp, #S_PC		)
 ARM(	stmdb	r8, {sp, lr}^		)	@ Calling sp, lr
 THUMB(	mov	r8, sp			)
 THUMB(	store_user_sp_lr r8, r10, S_SP	)	@ calling sp, lr
348 349 350 351
	mrs	r8, spsr			@ called from non-FIQ mode, so ok.
	str	lr, [sp, #S_PC]			@ Save calling PC
	str	r8, [sp, #S_PSR]		@ Save CPSR
	str	r0, [sp, #S_OLD_R0]		@ Save OLD_R0
Linus Torvalds's avatar
Linus Torvalds committed
352
	zero_fp
353 354 355 356

	/*
	 * Get the system call number.
	 */
357

358
#if defined(CONFIG_OABI_COMPAT)
359

360 361 362 363 364 365 366 367 368 369 370
	/*
	 * If we have CONFIG_OABI_COMPAT then we need to look at the swi
	 * value to determine if it is an EABI or an old ABI call.
	 */
#ifdef CONFIG_ARM_THUMB
	tst	r8, #PSR_T_BIT
	movne	r10, #0				@ no thumb OABI emulation
	ldreq	r10, [lr, #-4]			@ get SWI instruction
#else
	ldr	r10, [lr, #-4]			@ get SWI instruction
#endif
371 372 373
#ifdef CONFIG_CPU_ENDIAN_BE8
	rev	r10, r10			@ little endian instruction
#endif
374 375 376 377 378 379

#elif defined(CONFIG_AEABI)

	/*
	 * Pure EABI user space always put syscall number into scno (r7).
	 */
380
#elif defined(CONFIG_ARM_THUMB)
381
	/* Legacy ABI only, possibly thumb mode. */
382 383 384
	tst	r8, #PSR_T_BIT			@ this is SPSR from save_user_regs
	addne	scno, r7, #__NR_SYSCALL_BASE	@ put OS number in
	ldreq	scno, [lr, #-4]
385

386
#else
387
	/* Legacy ABI only. */
388 389
	ldr	scno, [lr, #-4]			@ get SWI instruction
#endif
Linus Torvalds's avatar
Linus Torvalds committed
390 391 392 393 394 395

#ifdef CONFIG_ALIGNMENT_TRAP
	ldr	ip, __cr_alignment
	ldr	ip, [ip]
	mcr	p15, 0, ip, c1, c0		@ update control register
#endif
396
	enable_irq
Linus Torvalds's avatar
Linus Torvalds committed
397 398

	get_thread_info tsk
399 400 401 402 403 404 405 406 407 408 409 410 411
	adr	tbl, sys_call_table		@ load syscall table pointer

#if defined(CONFIG_OABI_COMPAT)
	/*
	 * If the swi argument is zero, this is an EABI call and we do nothing.
	 *
	 * If this is an old ABI call, get the syscall number into scno and
	 * get the old ABI syscall table address.
	 */
	bics	r10, r10, #0xff000000
	eorne	scno, r10, #__NR_OABI_SYSCALL_BASE
	ldrne	tbl, =sys_oabi_call_table
#elif !defined(CONFIG_AEABI)
Linus Torvalds's avatar
Linus Torvalds committed
412
	bic	scno, scno, #0xff000000		@ mask off SWI op-code
413
	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
414
#endif
415

416
local_restart:
Nicolas Pitre's avatar
Nicolas Pitre committed
417
	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
418
	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
Nicolas Pitre's avatar
Nicolas Pitre committed
419 420 421 422 423 424 425 426 427 428 429

#ifdef CONFIG_SECCOMP
	tst	r10, #_TIF_SECCOMP
	beq	1f
	mov	r0, scno
	bl	__secure_computing	
	add	r0, sp, #S_R0 + S_OFF		@ pointer to regs
	ldmia	r0, {r0 - r3}			@ have to reload r0 - r3
1:
#endif

430
	tst	r10, #_TIF_SYSCALL_WORK		@ are we tracing syscalls?
Linus Torvalds's avatar
Linus Torvalds committed
431 432 433
	bne	__sys_trace

	cmp	scno, #NR_syscalls		@ check upper syscall limit
434
	adr	lr, BSYM(ret_fast_syscall)	@ return address
Linus Torvalds's avatar
Linus Torvalds committed
435 436 437 438
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine

	add	r1, sp, #S_OFF
2:	mov	why, #0				@ no longer a real syscall
439 440
	cmp	scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
	eor	r0, scno, #__NR_SYSCALL_BASE	@ put OS number back
Linus Torvalds's avatar
Linus Torvalds committed
441 442
	bcs	arm_syscall	
	b	sys_ni_syscall			@ not private func
443
ENDPROC(vector_swi)
Linus Torvalds's avatar
Linus Torvalds committed
444 445 446 447 448 449

	/*
	 * This is the really slow path.  We're going to be doing
	 * context switches, and waiting for our parent to respond.
	 */
__sys_trace:
450 451 452
	mov	r1, scno
	add	r0, sp, #S_OFF
	bl	syscall_trace_enter
Linus Torvalds's avatar
Linus Torvalds committed
453

454
	adr	lr, BSYM(__sys_trace_return)	@ return address
455
	mov	scno, r0			@ syscall number (possibly new)
Linus Torvalds's avatar
Linus Torvalds committed
456 457
	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
	cmp	scno, #NR_syscalls		@ check upper syscall limit
458 459
	ldmccia	r1, {r0 - r6}			@ have to reload r0 - r6
	stmccia	sp, {r4, r5}			@ and update the stack args
Linus Torvalds's avatar
Linus Torvalds committed
460 461 462 463 464
	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
	b	2b

__sys_trace_return:
	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
465 466 467
	mov	r1, scno
	mov	r0, sp
	bl	syscall_trace_exit
Linus Torvalds's avatar
Linus Torvalds committed
468 469 470 471 472 473 474
	b	ret_slow_syscall

	.align	5
#ifdef CONFIG_ALIGNMENT_TRAP
	.type	__cr_alignment, #object
__cr_alignment:
	.word	cr_alignment
475 476 477 478 479 480 481 482 483 484 485 486
#endif
	.ltorg

/*
 * This is the syscall table declaration for native ABI syscalls.
 * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
 */
#define ABI(native, compat) native
#ifdef CONFIG_AEABI
#define OBSOLETE(syscall) sys_ni_syscall
#else
#define OBSOLETE(syscall) syscall
Linus Torvalds's avatar
Linus Torvalds committed
487 488 489 490 491
#endif

	.type	sys_call_table, #object
ENTRY(sys_call_table)
#include "calls.S"
492 493
#undef ABI
#undef OBSOLETE
Linus Torvalds's avatar
Linus Torvalds committed
494 495 496 497 498

/*============================================================================
 * Special system call wrappers
 */
@ r0 = syscall number
499
@ r8 = syscall table
Linus Torvalds's avatar
Linus Torvalds committed
500
sys_syscall:
501
		bic	scno, r0, #__NR_OABI_SYSCALL_BASE
Linus Torvalds's avatar
Linus Torvalds committed
502 503 504 505 506 507 508 509 510
		cmp	scno, #__NR_syscall - __NR_SYSCALL_BASE
		cmpne	scno, #NR_syscalls	@ check range
		stmloia	sp, {r5, r6}		@ shuffle args
		movlo	r0, r1
		movlo	r1, r2
		movlo	r2, r3
		movlo	r3, r4
		ldrlo	pc, [tbl, scno, lsl #2]
		b	sys_ni_syscall
511
ENDPROC(sys_syscall)
Linus Torvalds's avatar
Linus Torvalds committed
512 513 514

sys_sigreturn_wrapper:
		add	r0, sp, #S_OFF
Al Viro's avatar
Al Viro committed
515
		mov	why, #0		@ prevent syscall restart handling
Linus Torvalds's avatar
Linus Torvalds committed
516
		b	sys_sigreturn
517
ENDPROC(sys_sigreturn_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
518 519 520

sys_rt_sigreturn_wrapper:
		add	r0, sp, #S_OFF
Al Viro's avatar
Al Viro committed
521
		mov	why, #0		@ prevent syscall restart handling
Linus Torvalds's avatar
Linus Torvalds committed
522
		b	sys_rt_sigreturn
523
ENDPROC(sys_rt_sigreturn_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
524 525 526 527

sys_sigaltstack_wrapper:
		ldr	r2, [sp, #S_OFF + S_SP]
		b	do_sigaltstack
528
ENDPROC(sys_sigaltstack_wrapper)
Linus Torvalds's avatar
Linus Torvalds committed
529

530 531 532 533
sys_statfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_statfs64
534
ENDPROC(sys_statfs64_wrapper)
535 536 537 538 539

sys_fstatfs64_wrapper:
		teq	r1, #88
		moveq	r1, #84
		b	sys_fstatfs64
540
ENDPROC(sys_fstatfs64_wrapper)
541

Linus Torvalds's avatar
Linus Torvalds committed
542 543 544 545 546 547 548 549 550
/*
 * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
 * offset, we return EINVAL.
 */
sys_mmap2:
#if PAGE_SHIFT > 12
		tst	r5, #PGOFF_MASK
		moveq	r5, r5, lsr #PAGE_SHIFT - 12
		streq	r5, [sp, #4]
Al Viro's avatar
Al Viro committed
551
		beq	sys_mmap_pgoff
Linus Torvalds's avatar
Linus Torvalds committed
552
		mov	r0, #-EINVAL
Russell King's avatar
Russell King committed
553
		mov	pc, lr
Linus Torvalds's avatar
Linus Torvalds committed
554 555
#else
		str	r5, [sp, #4]
Al Viro's avatar
Al Viro committed
556
		b	sys_mmap_pgoff
Linus Torvalds's avatar
Linus Torvalds committed
557
#endif
558
ENDPROC(sys_mmap2)
559 560

#ifdef CONFIG_OABI_COMPAT
561

562 563 564 565 566 567 568
/*
 * These are syscalls with argument register differences
 */

sys_oabi_pread64:
		stmia	sp, {r3, r4}
		b	sys_pread64
569
ENDPROC(sys_oabi_pread64)
570 571 572 573

sys_oabi_pwrite64:
		stmia	sp, {r3, r4}
		b	sys_pwrite64
574
ENDPROC(sys_oabi_pwrite64)
575 576 577 578 579

sys_oabi_truncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_truncate64
580
ENDPROC(sys_oabi_truncate64)
581 582 583 584 585

sys_oabi_ftruncate64:
		mov	r3, r2
		mov	r2, r1
		b	sys_ftruncate64
586
ENDPROC(sys_oabi_ftruncate64)
587 588 589 590 591 592

sys_oabi_readahead:
		str	r3, [sp]
		mov	r3, r2
		mov	r2, r1
		b	sys_readahead
593
ENDPROC(sys_oabi_readahead)
594

595 596 597 598 599 600 601 602 603 604 605 606 607
/*
 * Let's declare a second syscall table for old ABI binaries
 * using the compatibility syscall entries.
 */
#define ABI(native, compat) compat
#define OBSOLETE(syscall) syscall

	.type	sys_oabi_call_table, #object
ENTRY(sys_oabi_call_table)
#include "calls.S"
#undef ABI
#undef OBSOLETE

608 609
#endif