clocksource: Use a plain u64 instead of cycle_t

There is no point in having an extra type for extra confusion. u64 is
unambiguous.

Conversion was done with the following coccinelle script:

@rem@
@@
-typedef u64 cycle_t;

@fix@
typedef cycle_t;
@@
-cycle_t
+u64

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: John Stultz <john.stultz@linaro.org>
This commit is contained in:
Thomas Gleixner
2016-12-21 20:32:01 +01:00
parent 7c0f6ba682
commit a5a1d1c291
132 changed files with 320 additions and 327 deletions

View File

@@ -247,7 +247,7 @@ void apbt_setup_secondary_clock(void) {}
static int apbt_clocksource_register(void)
{
u64 start, now;
cycle_t t1;
u64 t1;
/* Start the counter, use timer 2 as source, timer 0/1 for event */
dw_apb_clocksource_start(clocksource_apbt);
@@ -355,7 +355,7 @@ unsigned long apbt_quick_calibrate(void)
{
int i, scale;
u64 old, new;
cycle_t t1, t2;
u64 t1, t2;
unsigned long khz = 0;
u32 loop, shift;

View File

@@ -133,9 +133,9 @@ static uint32_t __init ms_hyperv_platform(void)
return 0;
}
static cycle_t read_hv_clock(struct clocksource *arg)
static u64 read_hv_clock(struct clocksource *arg)
{
cycle_t current_tick;
u64 current_tick;
/*
* Read the partition counter to get the current tick count. This count
* is set to 0 when the partition is created and is incremented in

View File

@@ -791,7 +791,7 @@ static union hpet_lock hpet __cacheline_aligned = {
{ .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
};
static cycle_t read_hpet(struct clocksource *cs)
static u64 read_hpet(struct clocksource *cs)
{
unsigned long flags;
union hpet_lock old, new;
@@ -802,7 +802,7 @@ static cycle_t read_hpet(struct clocksource *cs)
* Read HPET directly if in NMI.
*/
if (in_nmi())
return (cycle_t)hpet_readl(HPET_COUNTER);
return (u64)hpet_readl(HPET_COUNTER);
/*
* Read the current state of the lock and HPET value atomically.
@@ -821,7 +821,7 @@ static cycle_t read_hpet(struct clocksource *cs)
WRITE_ONCE(hpet.value, new.value);
arch_spin_unlock(&hpet.lock);
local_irq_restore(flags);
return (cycle_t)new.value;
return (u64)new.value;
}
local_irq_restore(flags);
@@ -843,15 +843,15 @@ contended:
new.lockval = READ_ONCE(hpet.lockval);
} while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
return (cycle_t)new.value;
return (u64)new.value;
}
#else
/*
* For UP or 32-bit.
*/
static cycle_t read_hpet(struct clocksource *cs)
static u64 read_hpet(struct clocksource *cs)
{
return (cycle_t)hpet_readl(HPET_COUNTER);
return (u64)hpet_readl(HPET_COUNTER);
}
#endif
@@ -867,7 +867,7 @@ static struct clocksource clocksource_hpet = {
static int hpet_clocksource_register(void)
{
u64 start, now;
cycle_t t1;
u64 t1;
/* Start the counter */
hpet_restart_counter();

View File

@@ -32,7 +32,7 @@
static int kvmclock __ro_after_init = 1;
static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
static cycle_t kvm_sched_clock_offset;
static u64 kvm_sched_clock_offset;
static int parse_no_kvmclock(char *arg)
{
@@ -79,10 +79,10 @@ static int kvm_set_wallclock(const struct timespec *now)
return -1;
}
static cycle_t kvm_clock_read(void)
static u64 kvm_clock_read(void)
{
struct pvclock_vcpu_time_info *src;
cycle_t ret;
u64 ret;
int cpu;
preempt_disable_notrace();
@@ -93,12 +93,12 @@ static cycle_t kvm_clock_read(void)
return ret;
}
static cycle_t kvm_clock_get_cycles(struct clocksource *cs)
static u64 kvm_clock_get_cycles(struct clocksource *cs)
{
return kvm_clock_read();
}
static cycle_t kvm_sched_clock_read(void)
static u64 kvm_sched_clock_read(void)
{
return kvm_clock_read() - kvm_sched_clock_offset;
}

View File

@@ -71,10 +71,10 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
return flags & valid_flags;
}
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
{
unsigned version;
cycle_t ret;
u64 ret;
u64 last;
u8 flags;

View File

@@ -1101,9 +1101,9 @@ static void tsc_resume(struct clocksource *cs)
* checking the result of read_tsc() - cycle_last for being negative.
* That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
*/
static cycle_t read_tsc(struct clocksource *cs)
static u64 read_tsc(struct clocksource *cs)
{
return (cycle_t)rdtsc_ordered();
return (u64)rdtsc_ordered();
}
/*
@@ -1192,7 +1192,7 @@ int unsynchronized_tsc(void)
/*
* Convert ART to TSC given numerator/denominator found in detect_art()
*/
struct system_counterval_t convert_art_to_tsc(cycle_t art)
struct system_counterval_t convert_art_to_tsc(u64 art)
{
u64 tmp, res, rem;