sparc: Fix VDSO build with older binutils.

Older versions of bintutils do not allow symbol math across different
segments on sparc:

====================
Assembler messages:
99: Error: operation combines symbols in different segments
====================

This is controlled by whether or not DIFF_EXPR_OK is defined in
gas/config/tc-*.h and for sparc this was not the case until mid-2017.

So we have to patch between %stick and %tick another way.

Do what powerpc does and emit two versions of the relevant functions,
one using %tick and one using %stick, and patch the symbols in the
dynamic symbol table.

Fixes: 2f6c9bf31a ("sparc: Improve VDSO instruction patching.")
Reported-by: Meelis Roos <mroos@linux.ee>
Tested-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller
2018-10-25 10:36:19 -07:00
parent 44adbac8f7
commit caf539cd10
7 changed files with 337 additions and 64 deletions

View File

@@ -90,16 +90,15 @@ notrace static __always_inline u64 vread_tick(void)
{
u64 ret;
__asm__ __volatile__("1:\n\t"
"rd %%tick, %0\n\t"
".pushsection .tick_patch, \"a\"\n\t"
".word 1b - ., 1f - .\n\t"
".popsection\n\t"
".pushsection .tick_patch_replacement, \"ax\"\n\t"
"1:\n\t"
"rd %%asr24, %0\n\t"
".popsection\n"
: "=r" (ret));
__asm__ __volatile__("rd %%tick, %0" : "=r" (ret));
return ret;
}
notrace static __always_inline u64 vread_tick_stick(void)
{
u64 ret;
__asm__ __volatile__("rd %%asr24, %0" : "=r" (ret));
return ret;
}
#else
@@ -107,16 +106,18 @@ notrace static __always_inline u64 vread_tick(void)
{
register unsigned long long ret asm("o4");
__asm__ __volatile__("1:\n\t"
"rd %%tick, %L0\n\t"
"srlx %L0, 32, %H0\n\t"
".pushsection .tick_patch, \"a\"\n\t"
".word 1b - ., 1f - .\n\t"
".popsection\n\t"
".pushsection .tick_patch_replacement, \"ax\"\n\t"
"1:\n\t"
"rd %%asr24, %L0\n\t"
".popsection\n"
__asm__ __volatile__("rd %%tick, %L0\n\t"
"srlx %L0, 32, %H0"
: "=r" (ret));
return ret;
}
notrace static __always_inline u64 vread_tick_stick(void)
{
register unsigned long long ret asm("o4");
__asm__ __volatile__("rd %%asr24, %L0\n\t"
"srlx %L0, 32, %H0"
: "=r" (ret));
return ret;
}
@@ -132,6 +133,16 @@ notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
return v * vvar->clock.mult;
}
notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar)
{
u64 v;
u64 cycles;
cycles = vread_tick_stick();
v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
return v * vvar->clock.mult;
}
notrace static __always_inline int do_realtime(struct vvar_data *vvar,
struct timespec *ts)
{
@@ -152,6 +163,26 @@ notrace static __always_inline int do_realtime(struct vvar_data *vvar,
return 0;
}
notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
struct timespec *ts)
{
unsigned long seq;
u64 ns;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->wall_time_sec;
ns = vvar->wall_time_snsec;
ns += vgetsns_stick(vvar);
ns >>= vvar->clock.shift;
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
}
notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
struct timespec *ts)
{
@@ -172,6 +203,26 @@ notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
return 0;
}
notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
struct timespec *ts)
{
unsigned long seq;
u64 ns;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->monotonic_time_sec;
ns = vvar->monotonic_time_snsec;
ns += vgetsns_stick(vvar);
ns >>= vvar->clock.shift;
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0;
}
notrace static int do_realtime_coarse(struct vvar_data *vvar,
struct timespec *ts)
{
@@ -227,6 +278,31 @@ int
clock_gettime(clockid_t, struct timespec *)
__attribute__((weak, alias("__vdso_clock_gettime")));
notrace int
__vdso_clock_gettime_stick(clockid_t clock, struct timespec *ts)
{
struct vvar_data *vvd = get_vvar_data();
switch (clock) {
case CLOCK_REALTIME:
if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
break;
return do_realtime_stick(vvd, ts);
case CLOCK_MONOTONIC:
if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
break;
return do_monotonic_stick(vvd, ts);
case CLOCK_REALTIME_COARSE:
return do_realtime_coarse(vvd, ts);
case CLOCK_MONOTONIC_COARSE:
return do_monotonic_coarse(vvd, ts);
}
/*
* Unknown clock ID ? Fall back to the syscall.
*/
return vdso_fallback_gettime(clock, ts);
}
notrace int
__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
@@ -262,3 +338,36 @@ __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
int
gettimeofday(struct timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
notrace int
__vdso_gettimeofday_stick(struct timeval *tv, struct timezone *tz)
{
struct vvar_data *vvd = get_vvar_data();
if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
if (likely(tv != NULL)) {
union tstv_t {
struct timespec ts;
struct timeval tv;
} *tstv = (union tstv_t *) tv;
do_realtime_stick(vvd, &tstv->ts);
/*
* Assign before dividing to ensure that the division is
* done in the type of tv_usec, not tv_nsec.
*
* There cannot be > 1 billion usec in a second:
* do_realtime() has already distributed such overflow
* into tv_sec. So we can assign it to an int safely.
*/
tstv->tv.tv_usec = tstv->ts.tv_nsec;
tstv->tv.tv_usec /= 1000;
}
if (unlikely(tz != NULL)) {
/* Avoid memcpy. Some old compilers fail to inline it */
tz->tz_minuteswest = vvd->tz_minuteswest;
tz->tz_dsttime = vvd->tz_dsttime;
}
return 0;
}
return vdso_fallback_gettimeofday(tv, tz);
}