Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timer core update from Thomas Gleixner:
 - Bug fixes (one for a longstanding dead loop issue)
 - Rework of time related vsyscalls
 - Alarm timer updates
 - Jiffies updates to remove compile time dependencies

* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  timekeeping: Cast raw_interval to u64 to avoid shift overflow
  timers: Fix endless looping between cascade() and internal_add_timer()
  time/jiffies: bring back unconditional LATCH definition
  time: Convert x86_64 to using new update_vsyscall
  time: Only do nanosecond rounding on GENERIC_TIME_VSYSCALL_OLD systems
  time: Introduce new GENERIC_TIME_VSYSCALL
  time: Convert CONFIG_GENERIC_TIME_VSYSCALL to CONFIG_GENERIC_TIME_VSYSCALL_OLD
  time: Move update_vsyscall definitions to timekeeper_internal.h
  time: Move timekeeper structure to timekeeper_internal.h for vsyscall changes
  jiffies: Remove compile time assumptions about CLOCK_TICK_RATE
  jiffies: Kill unused TICK_USEC_TO_NSEC
  alarmtimer: Rename alarmtimer_remove to alarmtimer_dequeue
  alarmtimer: Remove unused helpers & defines
  alarmtimer: Use hrtimer per-alarm instead of per-base
  alarmtimer: Implement minimum alarm interval for allowing suspend
This commit is contained in:
Linus Torvalds
2012-10-12 22:17:48 +09:00
20 changed files with 288 additions and 264 deletions

View File

@@ -21,7 +21,6 @@ enum alarmtimer_restart {
#define ALARMTIMER_STATE_INACTIVE 0x00
#define ALARMTIMER_STATE_ENQUEUED 0x01
#define ALARMTIMER_STATE_CALLBACK 0x02
/**
* struct alarm - Alarm timer structure
@@ -35,6 +34,7 @@ enum alarmtimer_restart {
*/
struct alarm {
struct timerqueue_node node;
struct hrtimer timer;
enum alarmtimer_restart (*function)(struct alarm *, ktime_t now);
enum alarmtimer_type type;
int state;
@@ -43,39 +43,12 @@ struct alarm {
void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
void alarm_start(struct alarm *alarm, ktime_t start);
int alarm_start(struct alarm *alarm, ktime_t start);
int alarm_try_to_cancel(struct alarm *alarm);
int alarm_cancel(struct alarm *alarm);
u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
/*
* A alarmtimer is active, when it is enqueued into timerqueue or the
* callback function is running.
*/
static inline int alarmtimer_active(const struct alarm *timer)
{
return timer->state != ALARMTIMER_STATE_INACTIVE;
}
/*
* Helper function to check, whether the timer is on one of the queues
*/
static inline int alarmtimer_is_queued(struct alarm *timer)
{
return timer->state & ALARMTIMER_STATE_ENQUEUED;
}
/*
* Helper function to check, whether the timer is running the callback
* function
*/
static inline int alarmtimer_callback_running(struct alarm *timer)
{
return timer->state & ALARMTIMER_STATE_CALLBACK;
}
/* Provide way to access the rtc device being used by alarmtimers */
struct rtc_device *alarmtimer_get_rtcdev(void);

View File

@@ -319,22 +319,6 @@ static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz)
__clocksource_updatefreq_scale(cs, 1000, khz);
}
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void
update_vsyscall(struct timespec *ts, struct timespec *wtm,
struct clocksource *c, u32 mult);
extern void update_vsyscall_tz(void);
#else
static inline void
update_vsyscall(struct timespec *ts, struct timespec *wtm,
struct clocksource *c, u32 mult)
{
}
static inline void update_vsyscall_tz(void)
{
}
#endif
extern void timekeeping_notify(struct clocksource *clock);

View File

@@ -51,31 +51,17 @@
#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
+ ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
#ifdef CLOCK_TICK_RATE
/* LATCH is used in the interval timer and ftape setup. */
# define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
/*
* HZ is the requested value. However the CLOCK_TICK_RATE may not allow
* for exactly HZ. So SHIFTED_HZ is high res HZ ("<< 8" is for accuracy)
*/
# define SHIFTED_HZ (SH_DIV(CLOCK_TICK_RATE, LATCH, 8))
#else
# define SHIFTED_HZ (HZ << 8)
#endif
extern int register_refined_jiffies(long clock_tick_rate);
/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
#define TICK_NSEC (SH_DIV(1000000UL * 1000, SHIFTED_HZ, 8))
#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
/*
* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming SHIFTED_HZ and
* a value TUSEC for TICK_USEC (can be set bij adjtimex)
*/
#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV(TUSEC * USER_HZ * 1000, SHIFTED_HZ, 8))
/* some arch's have a small-data section that can be accessed register-relative
* but that can only take up to, say, 4-byte variables. jiffies being part of
* an 8-byte variable may not be correctly accessed unless we force the issue

View File

@@ -0,0 +1,108 @@
/*
* You SHOULD NOT be including this unless you're vsyscall
* handling code or timekeeping internal code!
*/
#ifndef _LINUX_TIMEKEEPER_INTERNAL_H
#define _LINUX_TIMEKEEPER_INTERNAL_H
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/time.h>
/* Structure holding internal timekeeping values. */
struct timekeeper {
/* Current clocksource used for timekeeping. */
struct clocksource *clock;
/* NTP adjusted clock multiplier */
u32 mult;
/* The shift value of the current clocksource. */
u32 shift;
/* Number of clock cycles in one NTP interval. */
cycle_t cycle_interval;
/* Number of clock shifted nano seconds in one NTP interval. */
u64 xtime_interval;
/* shifted nano seconds left over when rounding cycle_interval */
s64 xtime_remainder;
/* Raw nano seconds accumulated per NTP interval. */
u32 raw_interval;
/* Current CLOCK_REALTIME time in seconds */
u64 xtime_sec;
/* Clock shifted nano seconds */
u64 xtime_nsec;
/* Difference between accumulated time and NTP time in ntp
* shifted nano seconds. */
s64 ntp_error;
/* Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds. */
u32 ntp_error_shift;
/*
* wall_to_monotonic is what we need to add to xtime (or xtime corrected
* for sub jiffie times) to get to monotonic time. Monotonic is pegged
* at zero at system boot time, so wall_to_monotonic will be negative,
* however, we will ALWAYS keep the tv_nsec part positive so we can use
* the usual normalization.
*
* wall_to_monotonic is moved after resume from suspend for the
* monotonic time not to jump. We need to add total_sleep_time to
* wall_to_monotonic to get the real boot based time offset.
*
* - wall_to_monotonic is no longer the boot time, getboottime must be
* used instead.
*/
struct timespec wall_to_monotonic;
/* Offset clock monotonic -> clock realtime */
ktime_t offs_real;
/* time spent in suspend */
struct timespec total_sleep_time;
/* Offset clock monotonic -> clock boottime */
ktime_t offs_boot;
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
struct timespec raw_time;
/* Seqlock for all timekeeper values */
seqlock_t lock;
};
static inline struct timespec tk_xtime(struct timekeeper *tk)
{
struct timespec ts;
ts.tv_sec = tk->xtime_sec;
ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
return ts;
}
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void update_vsyscall(struct timekeeper *tk);
extern void update_vsyscall_tz(void);
#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
struct clocksource *c, u32 mult);
extern void update_vsyscall_tz(void);
static inline void update_vsyscall(struct timekeeper *tk)
{
struct timespec xt;
xt = tk_xtime(tk);
update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
}
#else
static inline void update_vsyscall(struct timekeeper *tk)
{
}
static inline void update_vsyscall_tz(void)
{
}
#endif
#endif /* _LINUX_TIMEKEEPER_INTERNAL_H */