Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer type cleanups from Thomas Gleixner: "This series does a tree wide cleanup of types related to timers/timekeeping. - Get rid of cycles_t and use a plain u64. The type is not really helpful and caused more confusion than clarity - Get rid of the ktime union. The union has become useless as we use the scalar nanoseconds storage unconditionally now. The 32bit timespec alike storage got removed due to the Y2038 limitations some time ago. That leaves the odd union access around for no reason. Clean it up. Both changes have been done with coccinelle and a small amount of manual mopping up" * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: ktime: Get rid of ktime_equal() ktime: Cleanup ktime_set() usage ktime: Get rid of the union clocksource: Use a plain u64 instead of cycle_t
This commit is contained in:
@@ -92,10 +92,10 @@ static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void)
|
||||
return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
|
||||
}
|
||||
|
||||
static notrace cycle_t vread_pvclock(int *mode)
|
||||
static notrace u64 vread_pvclock(int *mode)
|
||||
{
|
||||
const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
|
||||
cycle_t ret;
|
||||
u64 ret;
|
||||
u64 last;
|
||||
u32 version;
|
||||
|
||||
@@ -142,9 +142,9 @@ static notrace cycle_t vread_pvclock(int *mode)
|
||||
}
|
||||
#endif
|
||||
|
||||
notrace static cycle_t vread_tsc(void)
|
||||
notrace static u64 vread_tsc(void)
|
||||
{
|
||||
cycle_t ret = (cycle_t)rdtsc_ordered();
|
||||
u64 ret = (u64)rdtsc_ordered();
|
||||
u64 last = gtod->cycle_last;
|
||||
|
||||
if (likely(ret >= last))
|
||||
|
@@ -768,7 +768,7 @@ struct kvm_arch {
|
||||
spinlock_t pvclock_gtod_sync_lock;
|
||||
bool use_master_clock;
|
||||
u64 master_kernel_ns;
|
||||
cycle_t master_cycle_now;
|
||||
u64 master_cycle_now;
|
||||
struct delayed_work kvmclock_update_work;
|
||||
struct delayed_work kvmclock_sync_work;
|
||||
|
||||
|
@@ -14,7 +14,7 @@ static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
|
||||
#endif
|
||||
|
||||
/* some helper functions for xen and kvm pv clock sources */
|
||||
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
|
||||
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
|
||||
u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
|
||||
void pvclock_set_flags(u8 flags);
|
||||
unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
|
||||
@@ -87,11 +87,10 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
|
||||
u64 tsc)
|
||||
u64 __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u64 tsc)
|
||||
{
|
||||
u64 delta = tsc - src->tsc_timestamp;
|
||||
cycle_t offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
|
||||
u64 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
|
||||
src->tsc_shift);
|
||||
return src->system_time + offset;
|
||||
}
|
||||
|
@@ -29,7 +29,7 @@ static inline cycles_t get_cycles(void)
|
||||
return rdtsc();
|
||||
}
|
||||
|
||||
extern struct system_counterval_t convert_art_to_tsc(cycle_t art);
|
||||
extern struct system_counterval_t convert_art_to_tsc(u64 art);
|
||||
|
||||
extern void tsc_init(void);
|
||||
extern void mark_tsc_unstable(char *reason);
|
||||
|
@@ -17,8 +17,8 @@ struct vsyscall_gtod_data {
|
||||
unsigned seq;
|
||||
|
||||
int vclock_mode;
|
||||
cycle_t cycle_last;
|
||||
cycle_t mask;
|
||||
u64 cycle_last;
|
||||
u64 mask;
|
||||
u32 mult;
|
||||
u32 shift;
|
||||
|
||||
|
@@ -247,7 +247,7 @@ void apbt_setup_secondary_clock(void) {}
|
||||
static int apbt_clocksource_register(void)
|
||||
{
|
||||
u64 start, now;
|
||||
cycle_t t1;
|
||||
u64 t1;
|
||||
|
||||
/* Start the counter, use timer 2 as source, timer 0/1 for event */
|
||||
dw_apb_clocksource_start(clocksource_apbt);
|
||||
@@ -355,7 +355,7 @@ unsigned long apbt_quick_calibrate(void)
|
||||
{
|
||||
int i, scale;
|
||||
u64 old, new;
|
||||
cycle_t t1, t2;
|
||||
u64 t1, t2;
|
||||
unsigned long khz = 0;
|
||||
u32 loop, shift;
|
||||
|
||||
|
@@ -133,9 +133,9 @@ static uint32_t __init ms_hyperv_platform(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cycle_t read_hv_clock(struct clocksource *arg)
|
||||
static u64 read_hv_clock(struct clocksource *arg)
|
||||
{
|
||||
cycle_t current_tick;
|
||||
u64 current_tick;
|
||||
/*
|
||||
* Read the partition counter to get the current tick count. This count
|
||||
* is set to 0 when the partition is created and is incremented in
|
||||
|
@@ -791,7 +791,7 @@ static union hpet_lock hpet __cacheline_aligned = {
|
||||
{ .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
|
||||
};
|
||||
|
||||
static cycle_t read_hpet(struct clocksource *cs)
|
||||
static u64 read_hpet(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
union hpet_lock old, new;
|
||||
@@ -802,7 +802,7 @@ static cycle_t read_hpet(struct clocksource *cs)
|
||||
* Read HPET directly if in NMI.
|
||||
*/
|
||||
if (in_nmi())
|
||||
return (cycle_t)hpet_readl(HPET_COUNTER);
|
||||
return (u64)hpet_readl(HPET_COUNTER);
|
||||
|
||||
/*
|
||||
* Read the current state of the lock and HPET value atomically.
|
||||
@@ -821,7 +821,7 @@ static cycle_t read_hpet(struct clocksource *cs)
|
||||
WRITE_ONCE(hpet.value, new.value);
|
||||
arch_spin_unlock(&hpet.lock);
|
||||
local_irq_restore(flags);
|
||||
return (cycle_t)new.value;
|
||||
return (u64)new.value;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
|
||||
@@ -843,15 +843,15 @@ contended:
|
||||
new.lockval = READ_ONCE(hpet.lockval);
|
||||
} while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
|
||||
|
||||
return (cycle_t)new.value;
|
||||
return (u64)new.value;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* For UP or 32-bit.
|
||||
*/
|
||||
static cycle_t read_hpet(struct clocksource *cs)
|
||||
static u64 read_hpet(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)hpet_readl(HPET_COUNTER);
|
||||
return (u64)hpet_readl(HPET_COUNTER);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -867,7 +867,7 @@ static struct clocksource clocksource_hpet = {
|
||||
static int hpet_clocksource_register(void)
|
||||
{
|
||||
u64 start, now;
|
||||
cycle_t t1;
|
||||
u64 t1;
|
||||
|
||||
/* Start the counter */
|
||||
hpet_restart_counter();
|
||||
|
@@ -32,7 +32,7 @@
|
||||
static int kvmclock __ro_after_init = 1;
|
||||
static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
|
||||
static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
|
||||
static cycle_t kvm_sched_clock_offset;
|
||||
static u64 kvm_sched_clock_offset;
|
||||
|
||||
static int parse_no_kvmclock(char *arg)
|
||||
{
|
||||
@@ -79,10 +79,10 @@ static int kvm_set_wallclock(const struct timespec *now)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static cycle_t kvm_clock_read(void)
|
||||
static u64 kvm_clock_read(void)
|
||||
{
|
||||
struct pvclock_vcpu_time_info *src;
|
||||
cycle_t ret;
|
||||
u64 ret;
|
||||
int cpu;
|
||||
|
||||
preempt_disable_notrace();
|
||||
@@ -93,12 +93,12 @@ static cycle_t kvm_clock_read(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
|
||||
static u64 kvm_clock_get_cycles(struct clocksource *cs)
|
||||
{
|
||||
return kvm_clock_read();
|
||||
}
|
||||
|
||||
static cycle_t kvm_sched_clock_read(void)
|
||||
static u64 kvm_sched_clock_read(void)
|
||||
{
|
||||
return kvm_clock_read() - kvm_sched_clock_offset;
|
||||
}
|
||||
|
@@ -71,10 +71,10 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
|
||||
return flags & valid_flags;
|
||||
}
|
||||
|
||||
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
||||
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
||||
{
|
||||
unsigned version;
|
||||
cycle_t ret;
|
||||
u64 ret;
|
||||
u64 last;
|
||||
u8 flags;
|
||||
|
||||
|
@@ -1101,9 +1101,9 @@ static void tsc_resume(struct clocksource *cs)
|
||||
* checking the result of read_tsc() - cycle_last for being negative.
|
||||
* That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
|
||||
*/
|
||||
static cycle_t read_tsc(struct clocksource *cs)
|
||||
static u64 read_tsc(struct clocksource *cs)
|
||||
{
|
||||
return (cycle_t)rdtsc_ordered();
|
||||
return (u64)rdtsc_ordered();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1192,7 +1192,7 @@ int unsynchronized_tsc(void)
|
||||
/*
|
||||
* Convert ART to TSC given numerator/denominator found in detect_art()
|
||||
*/
|
||||
struct system_counterval_t convert_art_to_tsc(cycle_t art)
|
||||
struct system_counterval_t convert_art_to_tsc(u64 art)
|
||||
{
|
||||
u64 tmp, res, rem;
|
||||
|
||||
|
@@ -1106,7 +1106,7 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
|
||||
now = ktime_get();
|
||||
remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
|
||||
if (ktime_to_ns(remaining) < 0)
|
||||
remaining = ktime_set(0, 0);
|
||||
remaining = 0;
|
||||
|
||||
ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
|
||||
tmcct = div64_u64(ns,
|
||||
@@ -2057,7 +2057,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
|
||||
apic->lapic_timer.tscdeadline = 0;
|
||||
if (apic_lvtt_oneshot(apic)) {
|
||||
apic->lapic_timer.tscdeadline = 0;
|
||||
apic->lapic_timer.target_expiration = ktime_set(0, 0);
|
||||
apic->lapic_timer.target_expiration = 0;
|
||||
}
|
||||
atomic_set(&apic->lapic_timer.pending, 0);
|
||||
}
|
||||
|
@@ -1131,8 +1131,8 @@ struct pvclock_gtod_data {
|
||||
|
||||
struct { /* extract of a clocksource struct */
|
||||
int vclock_mode;
|
||||
cycle_t cycle_last;
|
||||
cycle_t mask;
|
||||
u64 cycle_last;
|
||||
u64 mask;
|
||||
u32 mult;
|
||||
u32 shift;
|
||||
} clock;
|
||||
@@ -1572,9 +1572,9 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
static cycle_t read_tsc(void)
|
||||
static u64 read_tsc(void)
|
||||
{
|
||||
cycle_t ret = (cycle_t)rdtsc_ordered();
|
||||
u64 ret = (u64)rdtsc_ordered();
|
||||
u64 last = pvclock_gtod_data.clock.cycle_last;
|
||||
|
||||
if (likely(ret >= last))
|
||||
@@ -1592,7 +1592,7 @@ static cycle_t read_tsc(void)
|
||||
return last;
|
||||
}
|
||||
|
||||
static inline u64 vgettsc(cycle_t *cycle_now)
|
||||
static inline u64 vgettsc(u64 *cycle_now)
|
||||
{
|
||||
long v;
|
||||
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
||||
@@ -1603,7 +1603,7 @@ static inline u64 vgettsc(cycle_t *cycle_now)
|
||||
return v * gtod->clock.mult;
|
||||
}
|
||||
|
||||
static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
|
||||
static int do_monotonic_boot(s64 *t, u64 *cycle_now)
|
||||
{
|
||||
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
|
||||
unsigned long seq;
|
||||
@@ -1624,7 +1624,7 @@ static int do_monotonic_boot(s64 *t, cycle_t *cycle_now)
|
||||
}
|
||||
|
||||
/* returns true if host is using tsc clocksource */
|
||||
static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
|
||||
static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
|
||||
{
|
||||
/* checked again under seqlock below */
|
||||
if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
|
||||
|
@@ -916,7 +916,7 @@ static unsigned long lguest_tsc_khz(void)
|
||||
* If we can't use the TSC, the kernel falls back to our lower-priority
|
||||
* "lguest_clock", where we read the time value given to us by the Host.
|
||||
*/
|
||||
static cycle_t lguest_clock_read(struct clocksource *cs)
|
||||
static u64 lguest_clock_read(struct clocksource *cs)
|
||||
{
|
||||
unsigned long sec, nsec;
|
||||
|
||||
|
@@ -30,7 +30,7 @@
|
||||
|
||||
#define RTC_NAME "sgi_rtc"
|
||||
|
||||
static cycle_t uv_read_rtc(struct clocksource *cs);
|
||||
static u64 uv_read_rtc(struct clocksource *cs);
|
||||
static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
|
||||
static int uv_rtc_shutdown(struct clock_event_device *evt);
|
||||
|
||||
@@ -38,7 +38,7 @@ static struct clocksource clocksource_uv = {
|
||||
.name = RTC_NAME,
|
||||
.rating = 299,
|
||||
.read = uv_read_rtc,
|
||||
.mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
|
||||
.mask = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
@@ -296,7 +296,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
|
||||
* cachelines of it's own page. This allows faster simultaneous reads
|
||||
* from a given socket.
|
||||
*/
|
||||
static cycle_t uv_read_rtc(struct clocksource *cs)
|
||||
static u64 uv_read_rtc(struct clocksource *cs)
|
||||
{
|
||||
unsigned long offset;
|
||||
|
||||
@@ -305,7 +305,7 @@ static cycle_t uv_read_rtc(struct clocksource *cs)
|
||||
else
|
||||
offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
|
||||
|
||||
return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
|
||||
return (u64)uv_read_local_mmr(UVH_RTC | offset);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -39,10 +39,10 @@ static unsigned long xen_tsc_khz(void)
|
||||
return pvclock_tsc_khz(info);
|
||||
}
|
||||
|
||||
cycle_t xen_clocksource_read(void)
|
||||
u64 xen_clocksource_read(void)
|
||||
{
|
||||
struct pvclock_vcpu_time_info *src;
|
||||
cycle_t ret;
|
||||
u64 ret;
|
||||
|
||||
preempt_disable_notrace();
|
||||
src = &__this_cpu_read(xen_vcpu)->time;
|
||||
@@ -51,7 +51,7 @@ cycle_t xen_clocksource_read(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
|
||||
static u64 xen_clocksource_get_cycles(struct clocksource *cs)
|
||||
{
|
||||
return xen_clocksource_read();
|
||||
}
|
||||
|
@@ -67,7 +67,7 @@ void xen_init_irq_ops(void);
|
||||
void xen_setup_timer(int cpu);
|
||||
void xen_setup_runstate_info(int cpu);
|
||||
void xen_teardown_timer(int cpu);
|
||||
cycle_t xen_clocksource_read(void);
|
||||
u64 xen_clocksource_read(void);
|
||||
void xen_setup_cpu_clockevents(void);
|
||||
void __init xen_init_time_ops(void);
|
||||
void __init xen_hvm_init_time_ops(void);
|
||||
|
Reference in New Issue
Block a user