Commit d91fb5c2 authored by Will Deacon's avatar Will Deacon Committed by Catalin Marinas

arm64: vdso: rework __do_get_tspec register allocation and return shift

In preparation for sub-ns precision in the vdso timespec maths, change
the __do_get_tspec register allocation so that we return the clocksource
shift value instead of the unused xtime tspec.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent f84a935d
...@@ -62,18 +62,18 @@ ENTRY(__kernel_gettimeofday) ...@@ -62,18 +62,18 @@ ENTRY(__kernel_gettimeofday)
/* If tv is NULL, skip to the timezone code. */ /* If tv is NULL, skip to the timezone code. */
cbz x0, 2f cbz x0, 2f
bl __do_get_tspec bl __do_get_tspec
seqcnt_check w13, 1b seqcnt_check w9, 1b
/* Convert ns to us. */ /* Convert ns to us. */
mov x11, #1000 mov x13, #1000
udiv x10, x10, x11 udiv x11, x11, x13
stp x9, x10, [x0, #TVAL_TV_SEC] stp x10, x11, [x0, #TVAL_TV_SEC]
2: 2:
/* If tz is NULL, return 0. */ /* If tz is NULL, return 0. */
cbz x1, 3f cbz x1, 3f
ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST] ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
seqcnt_read w13 seqcnt_read w9
seqcnt_check w13, 1b seqcnt_check w9, 1b
stp w4, w5, [x1, #TZ_MINWEST] stp w4, w5, [x1, #TZ_MINWEST]
3: 3:
mov x0, xzr mov x0, xzr
...@@ -102,17 +102,17 @@ ENTRY(__kernel_clock_gettime) ...@@ -102,17 +102,17 @@ ENTRY(__kernel_clock_gettime)
cbnz use_syscall, 7f cbnz use_syscall, 7f
bl __do_get_tspec bl __do_get_tspec
seqcnt_check w13, 1b seqcnt_check w9, 1b
cmp w0, #CLOCK_MONOTONIC cmp w0, #CLOCK_MONOTONIC
b.ne 6f b.ne 6f
/* Get wtm timespec. */ /* Get wtm timespec. */
ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC] ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
/* Check the sequence counter. */ /* Check the sequence counter. */
seqcnt_read w13 seqcnt_read w9
seqcnt_check w13, 1b seqcnt_check w9, 1b
b 4f b 4f
2: 2:
cmp w0, #CLOCK_REALTIME_COARSE cmp w0, #CLOCK_REALTIME_COARSE
...@@ -122,37 +122,37 @@ ENTRY(__kernel_clock_gettime) ...@@ -122,37 +122,37 @@ ENTRY(__kernel_clock_gettime)
/* Get coarse timespec. */ /* Get coarse timespec. */
adr vdso_data, _vdso_data adr vdso_data, _vdso_data
3: seqcnt_acquire 3: seqcnt_acquire
ldp x9, x10, [vdso_data, #VDSO_XTIME_CRS_SEC] ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
/* Get wtm timespec. */ /* Get wtm timespec. */
ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC] ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
/* Check the sequence counter. */ /* Check the sequence counter. */
seqcnt_read w13 seqcnt_read w9
seqcnt_check w13, 3b seqcnt_check w9, 3b
cmp w0, #CLOCK_MONOTONIC_COARSE cmp w0, #CLOCK_MONOTONIC_COARSE
b.ne 6f b.ne 6f
4: 4:
/* Add on wtm timespec. */ /* Add on wtm timespec. */
add x9, x9, x14 add x10, x10, x13
add x10, x10, x15 add x11, x11, x14
/* Normalise the new timespec. */ /* Normalise the new timespec. */
mov x14, #NSEC_PER_SEC_LO16 mov x15, #NSEC_PER_SEC_LO16
movk x14, #NSEC_PER_SEC_HI16, lsl #16 movk x15, #NSEC_PER_SEC_HI16, lsl #16
cmp x10, x14 cmp x11, x15
b.lt 5f b.lt 5f
sub x10, x10, x14 sub x11, x11, x15
add x9, x9, #1 add x10, x10, #1
5: 5:
cmp x10, #0 cmp x11, #0
b.ge 6f b.ge 6f
add x10, x10, x14 add x11, x11, x15
sub x9, x9, #1 sub x10, x10, #1
6: /* Store to the user timespec. */ 6: /* Store to the user timespec. */
stp x9, x10, [x1, #TSPEC_TV_SEC] stp x10, x11, [x1, #TSPEC_TV_SEC]
mov x0, xzr mov x0, xzr
ret x2 ret x2
7: 7:
...@@ -203,39 +203,39 @@ ENDPROC(__kernel_clock_getres) ...@@ -203,39 +203,39 @@ ENDPROC(__kernel_clock_getres)
* Expects vdso_data to be initialised. * Expects vdso_data to be initialised.
* Clobbers the temporary registers (x9 - x15). * Clobbers the temporary registers (x9 - x15).
* Returns: * Returns:
* - (x9, x10) = (ts->tv_sec, ts->tv_nsec) * - w9 = vDSO sequence counter
* - (x11, x12) = (xtime->tv_sec, xtime->tv_nsec) * - (x10, x11) = (ts->tv_sec, ts->tv_nsec)
* - w13 = vDSO sequence counter * - w12 = cs_shift
*/ */
ENTRY(__do_get_tspec) ENTRY(__do_get_tspec)
.cfi_startproc .cfi_startproc
/* Read from the vDSO data page. */ /* Read from the vDSO data page. */
ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
ldp x11, x12, [vdso_data, #VDSO_XTIME_CLK_SEC] ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
ldp w14, w15, [vdso_data, #VDSO_CS_MULT] ldp w11, w12, [vdso_data, #VDSO_CS_MULT]
seqcnt_read w13 seqcnt_read w9
/* Read the physical counter. */ /* Read the physical counter. */
isb isb
mrs x9, cntpct_el0 mrs x15, cntpct_el0
/* Calculate cycle delta and convert to ns. */ /* Calculate cycle delta and convert to ns. */
sub x10, x9, x10 sub x10, x15, x10
/* We can only guarantee 56 bits of precision. */ /* We can only guarantee 56 bits of precision. */
movn x9, #0xff00, lsl #48 movn x15, #0xff00, lsl #48
and x10, x9, x10 and x10, x15, x10
mul x10, x10, x14 mul x10, x10, x11
lsr x10, x10, x15 lsr x10, x10, x12
/* Use the kernel time to calculate the new timespec. */ /* Use the kernel time to calculate the new timespec. */
add x10, x12, x10 mov x11, #NSEC_PER_SEC_LO16
mov x14, #NSEC_PER_SEC_LO16 movk x11, #NSEC_PER_SEC_HI16, lsl #16
movk x14, #NSEC_PER_SEC_HI16, lsl #16 add x15, x10, x14
udiv x15, x10, x14 udiv x14, x15, x11
add x9, x15, x11 add x10, x13, x14
mul x14, x14, x15 mul x13, x14, x11
sub x10, x10, x14 sub x11, x15, x13
ret ret
.cfi_endproc .cfi_endproc
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment