Merge branch 'timers/urgent' into timers/core
Pick up urgent fixes to apply dependent cleanup patch
This commit is contained in:
@@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
|
||||
u32 *prog_ids,
|
||||
u32 request_cnt)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
for (; *prog; prog++) {
|
||||
if (*prog == &dummy_bpf_prog.prog)
|
||||
continue;
|
||||
prog_ids[i] = (*prog)->aux->id;
|
||||
if (++i == request_cnt) {
|
||||
prog++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return !!(*prog);
|
||||
}
|
||||
|
||||
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
__u32 __user *prog_ids, u32 cnt)
|
||||
{
|
||||
struct bpf_prog **prog;
|
||||
unsigned long err = 0;
|
||||
u32 i = 0, *ids;
|
||||
bool nospc;
|
||||
u32 *ids;
|
||||
|
||||
/* users of this function are doing:
|
||||
* cnt = bpf_prog_array_length();
|
||||
@@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
return -ENOMEM;
|
||||
rcu_read_lock();
|
||||
prog = rcu_dereference(progs)->progs;
|
||||
for (; *prog; prog++) {
|
||||
if (*prog == &dummy_bpf_prog.prog)
|
||||
continue;
|
||||
ids[i] = (*prog)->aux->id;
|
||||
if (++i == cnt) {
|
||||
prog++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
nospc = !!(*prog);
|
||||
nospc = bpf_prog_array_copy_core(prog, ids, cnt);
|
||||
rcu_read_unlock();
|
||||
err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
|
||||
kfree(ids);
|
||||
@@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
|
||||
}
|
||||
|
||||
int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
|
||||
__u32 __user *prog_ids, u32 request_cnt,
|
||||
__u32 __user *prog_cnt)
|
||||
u32 *prog_ids, u32 request_cnt,
|
||||
u32 *prog_cnt)
|
||||
{
|
||||
struct bpf_prog **prog;
|
||||
u32 cnt = 0;
|
||||
|
||||
if (array)
|
||||
cnt = bpf_prog_array_length(array);
|
||||
|
||||
if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
|
||||
return -EFAULT;
|
||||
*prog_cnt = cnt;
|
||||
|
||||
/* return early if user requested only program count or nothing to copy */
|
||||
if (!request_cnt || !cnt)
|
||||
return 0;
|
||||
|
||||
return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
|
||||
/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
|
||||
prog = rcu_dereference_check(array, 1)->progs;
|
||||
return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
|
||||
: 0;
|
||||
}
|
||||
|
||||
static void bpf_prog_free_deferred(struct work_struct *work)
|
||||
|
@@ -1442,9 +1442,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (attr->value_size > KMALLOC_MAX_SIZE)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
err = bpf_tcp_ulp_register();
|
||||
if (err && err != -EEXIST)
|
||||
return ERR_PTR(err);
|
||||
|
@@ -119,23 +119,20 @@ int get_callchain_buffers(int event_max_stack)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (count > 1) {
|
||||
/* If the allocation failed, give up */
|
||||
if (!callchain_cpus_entries)
|
||||
err = -ENOMEM;
|
||||
/*
|
||||
* If requesting per event more than the global cap,
|
||||
* return a different error to help userspace figure
|
||||
* this out.
|
||||
*
|
||||
* And also do it here so that we have &callchain_mutex held.
|
||||
*/
|
||||
if (event_max_stack > sysctl_perf_event_max_stack)
|
||||
err = -EOVERFLOW;
|
||||
/*
|
||||
* If requesting per event more than the global cap,
|
||||
* return a different error to help userspace figure
|
||||
* this out.
|
||||
*
|
||||
* And also do it here so that we have &callchain_mutex held.
|
||||
*/
|
||||
if (event_max_stack > sysctl_perf_event_max_stack) {
|
||||
err = -EOVERFLOW;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = alloc_callchain_buffers();
|
||||
if (count == 1)
|
||||
err = alloc_callchain_buffers();
|
||||
exit:
|
||||
if (err)
|
||||
atomic_dec(&nr_callchain_events);
|
||||
|
@@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task,
|
||||
},
|
||||
};
|
||||
|
||||
if (!sched_in && task->state == TASK_RUNNING)
|
||||
switch_event.event_id.header.misc |=
|
||||
PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
|
||||
|
||||
perf_iterate_sb(perf_event_switch_output,
|
||||
&switch_event,
|
||||
NULL);
|
||||
@@ -10205,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
|
||||
* __u16 sample size limit.
|
||||
*/
|
||||
if (attr->sample_stack_user >= USHRT_MAX)
|
||||
ret = -EINVAL;
|
||||
return -EINVAL;
|
||||
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
|
||||
ret = -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!attr->sample_max_stack)
|
||||
|
@@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
|
||||
if (!s)
|
||||
continue;
|
||||
|
||||
#ifdef CONFIG_DEBUG_KMEMLEAK
|
||||
/* Clear stale pointers from reused stack. */
|
||||
memset(s->addr, 0, THREAD_SIZE);
|
||||
#endif
|
||||
|
||||
tsk->stack_vm_area = s;
|
||||
return s->addr;
|
||||
}
|
||||
|
@@ -2428,7 +2428,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
|
||||
struct kprobe_blacklist_entry *ent =
|
||||
list_entry(v, struct kprobe_blacklist_entry, list);
|
||||
|
||||
seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
|
||||
seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
|
||||
(void *)ent->end_addr, (void *)ent->start_addr);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -113,8 +113,10 @@ void *klp_shadow_get(void *obj, unsigned long id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(klp_shadow_get);
|
||||
|
||||
static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
|
||||
size_t size, gfp_t gfp_flags, bool warn_on_exist)
|
||||
static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id,
|
||||
size_t size, gfp_t gfp_flags,
|
||||
klp_shadow_ctor_t ctor, void *ctor_data,
|
||||
bool warn_on_exist)
|
||||
{
|
||||
struct klp_shadow *new_shadow;
|
||||
void *shadow_data;
|
||||
@@ -125,18 +127,15 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
|
||||
if (shadow_data)
|
||||
goto exists;
|
||||
|
||||
/* Allocate a new shadow variable for use inside the lock below */
|
||||
/*
|
||||
* Allocate a new shadow variable. Fill it with zeroes by default.
|
||||
* More complex setting can be done by @ctor function. But it is
|
||||
* called only when the buffer is really used (under klp_shadow_lock).
|
||||
*/
|
||||
new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags);
|
||||
if (!new_shadow)
|
||||
return NULL;
|
||||
|
||||
new_shadow->obj = obj;
|
||||
new_shadow->id = id;
|
||||
|
||||
/* Initialize the shadow variable if data provided */
|
||||
if (data)
|
||||
memcpy(new_shadow->data, data, size);
|
||||
|
||||
/* Look for <obj, id> again under the lock */
|
||||
spin_lock_irqsave(&klp_shadow_lock, flags);
|
||||
shadow_data = klp_shadow_get(obj, id);
|
||||
@@ -150,6 +149,22 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
|
||||
goto exists;
|
||||
}
|
||||
|
||||
new_shadow->obj = obj;
|
||||
new_shadow->id = id;
|
||||
|
||||
if (ctor) {
|
||||
int err;
|
||||
|
||||
err = ctor(obj, new_shadow->data, ctor_data);
|
||||
if (err) {
|
||||
spin_unlock_irqrestore(&klp_shadow_lock, flags);
|
||||
kfree(new_shadow);
|
||||
pr_err("Failed to construct shadow variable <%p, %lx> (%d)\n",
|
||||
obj, id, err);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* No <obj, id> found, so attach the newly allocated one */
|
||||
hash_add_rcu(klp_shadow_hash, &new_shadow->node,
|
||||
(unsigned long)new_shadow->obj);
|
||||
@@ -170,26 +185,32 @@ exists:
|
||||
* klp_shadow_alloc() - allocate and add a new shadow variable
|
||||
* @obj: pointer to parent object
|
||||
* @id: data identifier
|
||||
* @data: pointer to data to attach to parent
|
||||
* @size: size of attached data
|
||||
* @gfp_flags: GFP mask for allocation
|
||||
* @ctor: custom constructor to initialize the shadow data (optional)
|
||||
* @ctor_data: pointer to any data needed by @ctor (optional)
|
||||
*
|
||||
* Allocates @size bytes for new shadow variable data using @gfp_flags
|
||||
* and copies @size bytes from @data into the new shadow variable's own
|
||||
* data space. If @data is NULL, @size bytes are still allocated, but
|
||||
* no copy is performed. The new shadow variable is then added to the
|
||||
* global hashtable.
|
||||
* Allocates @size bytes for new shadow variable data using @gfp_flags.
|
||||
* The data are zeroed by default. They are further initialized by @ctor
|
||||
* function if it is not NULL. The new shadow variable is then added
|
||||
* to the global hashtable.
|
||||
*
|
||||
* If an existing <obj, id> shadow variable can be found, this routine
|
||||
* will issue a WARN, exit early and return NULL.
|
||||
* If an existing <obj, id> shadow variable can be found, this routine will
|
||||
* issue a WARN, exit early and return NULL.
|
||||
*
|
||||
* This function guarantees that the constructor function is called only when
|
||||
* the variable did not exist before. The cost is that @ctor is called
|
||||
* in atomic context under a spin lock.
|
||||
*
|
||||
* Return: the shadow variable data element, NULL on duplicate or
|
||||
* failure.
|
||||
*/
|
||||
void *klp_shadow_alloc(void *obj, unsigned long id, void *data,
|
||||
size_t size, gfp_t gfp_flags)
|
||||
void *klp_shadow_alloc(void *obj, unsigned long id,
|
||||
size_t size, gfp_t gfp_flags,
|
||||
klp_shadow_ctor_t ctor, void *ctor_data)
|
||||
{
|
||||
return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true);
|
||||
return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags,
|
||||
ctor, ctor_data, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(klp_shadow_alloc);
|
||||
|
||||
@@ -197,37 +218,51 @@ EXPORT_SYMBOL_GPL(klp_shadow_alloc);
|
||||
* klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable
|
||||
* @obj: pointer to parent object
|
||||
* @id: data identifier
|
||||
* @data: pointer to data to attach to parent
|
||||
* @size: size of attached data
|
||||
* @gfp_flags: GFP mask for allocation
|
||||
* @ctor: custom constructor to initialize the shadow data (optional)
|
||||
* @ctor_data: pointer to any data needed by @ctor (optional)
|
||||
*
|
||||
* Returns a pointer to existing shadow data if an <obj, id> shadow
|
||||
* variable is already present. Otherwise, it creates a new shadow
|
||||
* variable like klp_shadow_alloc().
|
||||
*
|
||||
* This function guarantees that only one shadow variable exists with
|
||||
* the given @id for the given @obj. It also guarantees that the shadow
|
||||
* variable will be initialized by the given @data only when it did not
|
||||
* exist before.
|
||||
* This function guarantees that only one shadow variable exists with the given
|
||||
* @id for the given @obj. It also guarantees that the constructor function
|
||||
* will be called only when the variable did not exist before. The cost is
|
||||
* that @ctor is called in atomic context under a spin lock.
|
||||
*
|
||||
* Return: the shadow variable data element, NULL on failure.
|
||||
*/
|
||||
void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data,
|
||||
size_t size, gfp_t gfp_flags)
|
||||
void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
|
||||
size_t size, gfp_t gfp_flags,
|
||||
klp_shadow_ctor_t ctor, void *ctor_data)
|
||||
{
|
||||
return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false);
|
||||
return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags,
|
||||
ctor, ctor_data, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc);
|
||||
|
||||
static void klp_shadow_free_struct(struct klp_shadow *shadow,
|
||||
klp_shadow_dtor_t dtor)
|
||||
{
|
||||
hash_del_rcu(&shadow->node);
|
||||
if (dtor)
|
||||
dtor(shadow->obj, shadow->data);
|
||||
kfree_rcu(shadow, rcu_head);
|
||||
}
|
||||
|
||||
/**
|
||||
* klp_shadow_free() - detach and free a <obj, id> shadow variable
|
||||
* @obj: pointer to parent object
|
||||
* @id: data identifier
|
||||
* @dtor: custom callback that can be used to unregister the variable
|
||||
* and/or free data that the shadow variable points to (optional)
|
||||
*
|
||||
* This function releases the memory for this <obj, id> shadow variable
|
||||
* instance, callers should stop referencing it accordingly.
|
||||
*/
|
||||
void klp_shadow_free(void *obj, unsigned long id)
|
||||
void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
|
||||
{
|
||||
struct klp_shadow *shadow;
|
||||
unsigned long flags;
|
||||
@@ -239,8 +274,7 @@ void klp_shadow_free(void *obj, unsigned long id)
|
||||
(unsigned long)obj) {
|
||||
|
||||
if (klp_shadow_match(shadow, obj, id)) {
|
||||
hash_del_rcu(&shadow->node);
|
||||
kfree_rcu(shadow, rcu_head);
|
||||
klp_shadow_free_struct(shadow, dtor);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -252,11 +286,13 @@ EXPORT_SYMBOL_GPL(klp_shadow_free);
|
||||
/**
|
||||
* klp_shadow_free_all() - detach and free all <*, id> shadow variables
|
||||
* @id: data identifier
|
||||
* @dtor: custom callback that can be used to unregister the variable
|
||||
* and/or free data that the shadow variable points to (optional)
|
||||
*
|
||||
* This function releases the memory for all <*, id> shadow variable
|
||||
* instances, callers should stop referencing them accordingly.
|
||||
*/
|
||||
void klp_shadow_free_all(unsigned long id)
|
||||
void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
|
||||
{
|
||||
struct klp_shadow *shadow;
|
||||
unsigned long flags;
|
||||
@@ -266,10 +302,8 @@ void klp_shadow_free_all(unsigned long id)
|
||||
|
||||
/* Delete all <*, id> from hash */
|
||||
hash_for_each(klp_shadow_hash, i, shadow, node) {
|
||||
if (klp_shadow_match(shadow, shadow->obj, id)) {
|
||||
hash_del_rcu(&shadow->node);
|
||||
kfree_rcu(shadow, rcu_head);
|
||||
}
|
||||
if (klp_shadow_match(shadow, shadow->obj, id))
|
||||
klp_shadow_free_struct(shadow, dtor);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&klp_shadow_lock, flags);
|
||||
|
@@ -1472,7 +1472,8 @@ static ssize_t module_sect_show(struct module_attribute *mattr,
|
||||
{
|
||||
struct module_sect_attr *sattr =
|
||||
container_of(mattr, struct module_sect_attr, mattr);
|
||||
return sprintf(buf, "0x%pK\n", (void *)sattr->address);
|
||||
return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
|
||||
(void *)sattr->address : NULL);
|
||||
}
|
||||
|
||||
static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
|
||||
|
@@ -704,24 +704,6 @@ static const struct bin_table bin_net_netfilter_table[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct bin_table bin_net_irda_table[] = {
|
||||
{ CTL_INT, NET_IRDA_DISCOVERY, "discovery" },
|
||||
{ CTL_STR, NET_IRDA_DEVNAME, "devname" },
|
||||
{ CTL_INT, NET_IRDA_DEBUG, "debug" },
|
||||
{ CTL_INT, NET_IRDA_FAST_POLL, "fast_poll_increase" },
|
||||
{ CTL_INT, NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" },
|
||||
{ CTL_INT, NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" },
|
||||
{ CTL_INT, NET_IRDA_SLOT_TIMEOUT, "slot_timeout" },
|
||||
{ CTL_INT, NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" },
|
||||
{ CTL_INT, NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" },
|
||||
{ CTL_INT, NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" },
|
||||
{ CTL_INT, NET_IRDA_MAX_TX_WINDOW, "max_tx_window" },
|
||||
{ CTL_INT, NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" },
|
||||
{ CTL_INT, NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" },
|
||||
{ CTL_INT, NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" },
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct bin_table bin_net_table[] = {
|
||||
{ CTL_DIR, NET_CORE, "core", bin_net_core_table },
|
||||
/* NET_ETHER not used */
|
||||
@@ -743,7 +725,7 @@ static const struct bin_table bin_net_table[] = {
|
||||
{ CTL_DIR, NET_LLC, "llc", bin_net_llc_table },
|
||||
{ CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table },
|
||||
/* NET_DCCP "dccp" no longer used */
|
||||
{ CTL_DIR, NET_IRDA, "irda", bin_net_irda_table },
|
||||
/* NET_IRDA "irda" no longer used */
|
||||
{ CTL_INT, 2089, "nf_conntrack_max" },
|
||||
{}
|
||||
};
|
||||
|
@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
|
||||
static int watchdog_running;
|
||||
static atomic_t watchdog_reset_pending;
|
||||
|
||||
static void inline clocksource_watchdog_lock(unsigned long *flags)
|
||||
{
|
||||
spin_lock_irqsave(&watchdog_lock, *flags);
|
||||
}
|
||||
|
||||
static void inline clocksource_watchdog_unlock(unsigned long *flags)
|
||||
{
|
||||
spin_unlock_irqrestore(&watchdog_lock, *flags);
|
||||
}
|
||||
|
||||
static int clocksource_watchdog_kthread(void *data);
|
||||
static void __clocksource_change_rating(struct clocksource *cs, int rating);
|
||||
|
||||
@@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs)
|
||||
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
|
||||
cs->flags |= CLOCK_SOURCE_UNSTABLE;
|
||||
|
||||
/*
|
||||
* If the clocksource is registered clocksource_watchdog_kthread() will
|
||||
* re-rate and re-select.
|
||||
*/
|
||||
if (list_empty(&cs->list)) {
|
||||
cs->rating = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (cs->mark_unstable)
|
||||
cs->mark_unstable(cs);
|
||||
|
||||
/* kick clocksource_watchdog_kthread() */
|
||||
if (finished_booting)
|
||||
schedule_work(&watchdog_work);
|
||||
}
|
||||
@@ -153,10 +173,8 @@ static void __clocksource_unstable(struct clocksource *cs)
|
||||
* clocksource_mark_unstable - mark clocksource unstable via watchdog
|
||||
* @cs: clocksource to be marked unstable
|
||||
*
|
||||
* This function is called instead of clocksource_change_rating from
|
||||
* cpu hotplug code to avoid a deadlock between the clocksource mutex
|
||||
* and the cpu hotplug mutex. It defers the update of the clocksource
|
||||
* to the watchdog thread.
|
||||
* This function is called by the x86 TSC code to mark clocksources as unstable;
|
||||
* it defers demotion and re-selection to a kthread.
|
||||
*/
|
||||
void clocksource_mark_unstable(struct clocksource *cs)
|
||||
{
|
||||
@@ -164,7 +182,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
|
||||
|
||||
spin_lock_irqsave(&watchdog_lock, flags);
|
||||
if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
|
||||
if (list_empty(&cs->wd_list))
|
||||
if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
|
||||
list_add(&cs->wd_list, &watchdog_list);
|
||||
__clocksource_unstable(cs);
|
||||
}
|
||||
@@ -319,9 +337,8 @@ static void clocksource_resume_watchdog(void)
|
||||
|
||||
static void clocksource_enqueue_watchdog(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
INIT_LIST_HEAD(&cs->wd_list);
|
||||
|
||||
spin_lock_irqsave(&watchdog_lock, flags);
|
||||
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
|
||||
/* cs is a clocksource to be watched. */
|
||||
list_add(&cs->wd_list, &watchdog_list);
|
||||
@@ -331,7 +348,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
|
||||
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
|
||||
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
|
||||
}
|
||||
spin_unlock_irqrestore(&watchdog_lock, flags);
|
||||
}
|
||||
|
||||
static void clocksource_select_watchdog(bool fallback)
|
||||
@@ -373,9 +389,6 @@ static void clocksource_select_watchdog(bool fallback)
|
||||
|
||||
static void clocksource_dequeue_watchdog(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&watchdog_lock, flags);
|
||||
if (cs != watchdog) {
|
||||
if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
|
||||
/* cs is a watched clocksource. */
|
||||
@@ -384,21 +397,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
|
||||
clocksource_stop_watchdog();
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&watchdog_lock, flags);
|
||||
}
|
||||
|
||||
static int __clocksource_watchdog_kthread(void)
|
||||
{
|
||||
struct clocksource *cs, *tmp;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(unstable);
|
||||
int select = 0;
|
||||
|
||||
spin_lock_irqsave(&watchdog_lock, flags);
|
||||
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
|
||||
if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
|
||||
list_del_init(&cs->wd_list);
|
||||
list_add(&cs->wd_list, &unstable);
|
||||
__clocksource_change_rating(cs, 0);
|
||||
select = 1;
|
||||
}
|
||||
if (cs->flags & CLOCK_SOURCE_RESELECT) {
|
||||
@@ -410,11 +421,6 @@ static int __clocksource_watchdog_kthread(void)
|
||||
clocksource_stop_watchdog();
|
||||
spin_unlock_irqrestore(&watchdog_lock, flags);
|
||||
|
||||
/* Needs to be done outside of watchdog lock */
|
||||
list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
|
||||
list_del_init(&cs->wd_list);
|
||||
__clocksource_change_rating(cs, 0);
|
||||
}
|
||||
return select;
|
||||
}
|
||||
|
||||
@@ -447,6 +453,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
|
||||
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
|
||||
void clocksource_mark_unstable(struct clocksource *cs) { }
|
||||
|
||||
static void inline clocksource_watchdog_lock(unsigned long *flags) { }
|
||||
static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
|
||||
|
||||
#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
|
||||
|
||||
/**
|
||||
@@ -779,14 +788,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
|
||||
*/
|
||||
int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* Initialize mult/shift and max_idle_ns */
|
||||
__clocksource_update_freq_scale(cs, scale, freq);
|
||||
|
||||
/* Add clocksource to the clocksource list */
|
||||
mutex_lock(&clocksource_mutex);
|
||||
|
||||
clocksource_watchdog_lock(&flags);
|
||||
clocksource_enqueue(cs);
|
||||
clocksource_enqueue_watchdog(cs);
|
||||
clocksource_watchdog_unlock(&flags);
|
||||
|
||||
clocksource_select();
|
||||
clocksource_select_watchdog(false);
|
||||
mutex_unlock(&clocksource_mutex);
|
||||
@@ -808,8 +822,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
|
||||
*/
|
||||
void clocksource_change_rating(struct clocksource *cs, int rating)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&clocksource_mutex);
|
||||
clocksource_watchdog_lock(&flags);
|
||||
__clocksource_change_rating(cs, rating);
|
||||
clocksource_watchdog_unlock(&flags);
|
||||
|
||||
clocksource_select();
|
||||
clocksource_select_watchdog(false);
|
||||
mutex_unlock(&clocksource_mutex);
|
||||
@@ -821,6 +840,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
|
||||
*/
|
||||
static int clocksource_unbind(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (clocksource_is_watchdog(cs)) {
|
||||
/* Select and try to install a replacement watchdog. */
|
||||
clocksource_select_watchdog(true);
|
||||
@@ -834,8 +855,12 @@ static int clocksource_unbind(struct clocksource *cs)
|
||||
if (curr_clocksource == cs)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
clocksource_watchdog_lock(&flags);
|
||||
clocksource_dequeue_watchdog(cs);
|
||||
list_del_init(&cs->list);
|
||||
clocksource_watchdog_unlock(&flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -90,6 +90,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
|
||||
.clockid = CLOCK_REALTIME,
|
||||
.get_time = &ktime_get_real,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_BOOTTIME,
|
||||
.clockid = CLOCK_BOOTTIME,
|
||||
.get_time = &ktime_get_boottime,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_TAI,
|
||||
.clockid = CLOCK_TAI,
|
||||
@@ -105,6 +110,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
|
||||
.clockid = CLOCK_REALTIME,
|
||||
.get_time = &ktime_get_real,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_BOOTTIME_SOFT,
|
||||
.clockid = CLOCK_BOOTTIME,
|
||||
.get_time = &ktime_get_boottime,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_TAI_SOFT,
|
||||
.clockid = CLOCK_TAI,
|
||||
@@ -119,7 +129,7 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
|
||||
|
||||
[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
|
||||
[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
|
||||
[CLOCK_BOOTTIME] = HRTIMER_BASE_MONOTONIC,
|
||||
[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
|
||||
[CLOCK_TAI] = HRTIMER_BASE_TAI,
|
||||
};
|
||||
|
||||
@@ -571,12 +581,14 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
|
||||
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
|
||||
{
|
||||
ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
|
||||
ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
|
||||
ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
|
||||
|
||||
ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
|
||||
offs_real, offs_tai);
|
||||
offs_real, offs_boot, offs_tai);
|
||||
|
||||
base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
|
||||
base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
|
||||
base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
|
||||
|
||||
return now;
|
||||
|
@@ -1205,10 +1205,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
|
||||
u64 *newval, u64 *oldval)
|
||||
{
|
||||
u64 now;
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
|
||||
ret = cpu_timer_sample_group(clock_idx, tsk, &now);
|
||||
|
||||
if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) {
|
||||
if (oldval && ret != -EINVAL) {
|
||||
/*
|
||||
* We are setting itimer. The *oldval is absolute and we update
|
||||
* it to be relative, *newval argument is relative and we update
|
||||
|
@@ -83,8 +83,6 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
|
||||
case CLOCK_BOOTTIME:
|
||||
get_monotonic_boottime64(tp);
|
||||
break;
|
||||
case CLOCK_MONOTONIC_ACTIVE:
|
||||
ktime_get_active_ts64(tp);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -252,16 +252,15 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
|
||||
static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
|
||||
{
|
||||
timekeeping_clocktai64(tp);
|
||||
get_monotonic_boottime64(tp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int posix_get_monotonic_active(clockid_t which_clock,
|
||||
struct timespec64 *tp)
|
||||
static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
|
||||
{
|
||||
ktime_get_active_ts64(tp);
|
||||
timekeeping_clocktai64(tp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1327,9 +1326,19 @@ static const struct k_clock clock_tai = {
|
||||
.timer_arm = common_hrtimer_arm,
|
||||
};
|
||||
|
||||
static const struct k_clock clock_monotonic_active = {
|
||||
static const struct k_clock clock_boottime = {
|
||||
.clock_getres = posix_get_hrtimer_res,
|
||||
.clock_get = posix_get_monotonic_active,
|
||||
.clock_get = posix_get_boottime,
|
||||
.nsleep = common_nsleep,
|
||||
.timer_create = common_timer_create,
|
||||
.timer_set = common_timer_set,
|
||||
.timer_get = common_timer_get,
|
||||
.timer_del = common_timer_del,
|
||||
.timer_rearm = common_hrtimer_rearm,
|
||||
.timer_forward = common_hrtimer_forward,
|
||||
.timer_remaining = common_hrtimer_remaining,
|
||||
.timer_try_to_cancel = common_hrtimer_try_to_cancel,
|
||||
.timer_arm = common_hrtimer_arm,
|
||||
};
|
||||
|
||||
static const struct k_clock * const posix_clocks[] = {
|
||||
@@ -1340,11 +1349,10 @@ static const struct k_clock * const posix_clocks[] = {
|
||||
[CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
|
||||
[CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
|
||||
[CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
|
||||
[CLOCK_BOOTTIME] = &clock_monotonic,
|
||||
[CLOCK_BOOTTIME] = &clock_boottime,
|
||||
[CLOCK_REALTIME_ALARM] = &alarm_clock,
|
||||
[CLOCK_BOOTTIME_ALARM] = &alarm_clock,
|
||||
[CLOCK_TAI] = &clock_tai,
|
||||
[CLOCK_MONOTONIC_ACTIVE] = &clock_monotonic_active,
|
||||
};
|
||||
|
||||
static const struct k_clock *clockid_to_kclock(const clockid_t id)
|
||||
|
@@ -419,19 +419,6 @@ void tick_suspend_local(void)
|
||||
clockevents_shutdown(td->evtdev);
|
||||
}
|
||||
|
||||
static void tick_forward_next_period(void)
|
||||
{
|
||||
ktime_t delta, now = ktime_get();
|
||||
u64 n;
|
||||
|
||||
delta = ktime_sub(now, tick_next_period);
|
||||
n = ktime_divns(delta, tick_period);
|
||||
tick_next_period += n * tick_period;
|
||||
if (tick_next_period < now)
|
||||
tick_next_period += tick_period;
|
||||
tick_sched_forward_next_period();
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_resume_local - Resume the local tick device
|
||||
*
|
||||
@@ -444,8 +431,6 @@ void tick_resume_local(void)
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
bool broadcast = tick_resume_check_broadcast();
|
||||
|
||||
tick_forward_next_period();
|
||||
|
||||
clockevents_tick_resume(td->evtdev);
|
||||
if (!broadcast) {
|
||||
if (td->mode == TICKDEV_MODE_PERIODIC)
|
||||
|
@@ -141,12 +141,6 @@ static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
|
||||
static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); }
|
||||
#endif /* !(BROADCAST && ONESHOT) */
|
||||
|
||||
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
|
||||
extern void tick_sched_forward_next_period(void);
|
||||
#else
|
||||
static inline void tick_sched_forward_next_period(void) { }
|
||||
#endif
|
||||
|
||||
/* NO_HZ_FULL internal */
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
extern void tick_nohz_init(void);
|
||||
|
@@ -82,16 +82,15 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
|
||||
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
|
||||
!tick_device_is_functional(dev)) {
|
||||
|
||||
printk(KERN_INFO "Clockevents: "
|
||||
"could not switch to one-shot mode:");
|
||||
pr_info("Clockevents: could not switch to one-shot mode:");
|
||||
if (!dev) {
|
||||
printk(" no tick device\n");
|
||||
pr_cont(" no tick device\n");
|
||||
} else {
|
||||
if (!tick_device_is_functional(dev))
|
||||
printk(" %s is not functional.\n", dev->name);
|
||||
pr_cont(" %s is not functional.\n", dev->name);
|
||||
else
|
||||
printk(" %s does not support one-shot mode.\n",
|
||||
dev->name);
|
||||
pr_cont(" %s does not support one-shot mode.\n",
|
||||
dev->name);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -51,15 +51,6 @@ struct tick_sched *tick_get_tick_sched(int cpu)
|
||||
*/
|
||||
static ktime_t last_jiffies_update;
|
||||
|
||||
/*
|
||||
* Called after resume. Make sure that jiffies are not fast forwarded due to
|
||||
* clock monotonic being forwarded by the suspended time.
|
||||
*/
|
||||
void tick_sched_forward_next_period(void)
|
||||
{
|
||||
last_jiffies_update = tick_next_period;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called with interrupts disabled !
|
||||
*/
|
||||
@@ -804,12 +795,12 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
hrtimer_set_expires(&ts->sched_timer, tick);
|
||||
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
|
||||
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
|
||||
else
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
|
||||
hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
|
||||
} else {
|
||||
hrtimer_set_expires(&ts->sched_timer, tick);
|
||||
tick_program_event(tick, 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void tick_nohz_retain_tick(struct tick_sched *ts)
|
||||
|
@@ -138,12 +138,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
|
||||
|
||||
static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
|
||||
{
|
||||
/* Update both bases so mono and raw stay coupled. */
|
||||
tk->tkr_mono.base += delta;
|
||||
tk->tkr_raw.base += delta;
|
||||
|
||||
/* Accumulate time spent in suspend */
|
||||
tk->time_suspended += delta;
|
||||
tk->offs_boot = ktime_add(tk->offs_boot, delta);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -473,6 +468,36 @@ u64 ktime_get_raw_fast_ns(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
|
||||
|
||||
/**
|
||||
* ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
|
||||
*
|
||||
* To keep it NMI safe since we're accessing from tracing, we're not using a
|
||||
* separate timekeeper with updates to monotonic clock and boot offset
|
||||
* protected with seqlocks. This has the following minor side effects:
|
||||
*
|
||||
* (1) Its possible that a timestamp be taken after the boot offset is updated
|
||||
* but before the timekeeper is updated. If this happens, the new boot offset
|
||||
* is added to the old timekeeping making the clock appear to update slightly
|
||||
* earlier:
|
||||
* CPU 0 CPU 1
|
||||
* timekeeping_inject_sleeptime64()
|
||||
* __timekeeping_inject_sleeptime(tk, delta);
|
||||
* timestamp();
|
||||
* timekeeping_update(tk, TK_CLEAR_NTP...);
|
||||
*
|
||||
* (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
|
||||
* partially updated. Since the tk->offs_boot update is a rare event, this
|
||||
* should be a rare occurrence which postprocessing should be able to handle.
|
||||
*/
|
||||
u64 notrace ktime_get_boot_fast_ns(void)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
|
||||
return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
|
||||
|
||||
|
||||
/*
|
||||
* See comment for __ktime_get_fast_ns() vs. timestamp ordering
|
||||
*/
|
||||
@@ -764,6 +789,7 @@ EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
|
||||
|
||||
static ktime_t *offsets[TK_OFFS_MAX] = {
|
||||
[TK_OFFS_REAL] = &tk_core.timekeeper.offs_real,
|
||||
[TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot,
|
||||
[TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai,
|
||||
};
|
||||
|
||||
@@ -860,39 +886,6 @@ void ktime_get_ts64(struct timespec64 *ts)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ktime_get_ts64);
|
||||
|
||||
/**
|
||||
* ktime_get_active_ts64 - Get the active non-suspended monotonic clock
|
||||
* @ts: pointer to timespec variable
|
||||
*
|
||||
* The function calculates the monotonic clock from the realtime clock and
|
||||
* the wall_to_monotonic offset, subtracts the accumulated suspend time and
|
||||
* stores the result in normalized timespec64 format in the variable
|
||||
* pointed to by @ts.
|
||||
*/
|
||||
void ktime_get_active_ts64(struct timespec64 *ts)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
struct timespec64 tomono, tsusp;
|
||||
u64 nsec, nssusp;
|
||||
unsigned int seq;
|
||||
|
||||
WARN_ON(timekeeping_suspended);
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&tk_core.seq);
|
||||
ts->tv_sec = tk->xtime_sec;
|
||||
nsec = timekeeping_get_ns(&tk->tkr_mono);
|
||||
tomono = tk->wall_to_monotonic;
|
||||
nssusp = tk->time_suspended;
|
||||
} while (read_seqcount_retry(&tk_core.seq, seq));
|
||||
|
||||
ts->tv_sec += tomono.tv_sec;
|
||||
ts->tv_nsec = 0;
|
||||
timespec64_add_ns(ts, nsec + tomono.tv_nsec);
|
||||
tsusp = ns_to_timespec64(nssusp);
|
||||
*ts = timespec64_sub(*ts, tsusp);
|
||||
}
|
||||
|
||||
/**
|
||||
* ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
|
||||
*
|
||||
@@ -1593,6 +1586,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
|
||||
return;
|
||||
}
|
||||
tk_xtime_add(tk, delta);
|
||||
tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
|
||||
tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
|
||||
tk_debug_account_sleep_time(delta);
|
||||
}
|
||||
@@ -2125,7 +2119,7 @@ out:
|
||||
void getboottime64(struct timespec64 *ts)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
ktime_t t = ktime_sub(tk->offs_real, tk->time_suspended);
|
||||
ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
|
||||
|
||||
*ts = ktime_to_timespec64(t);
|
||||
}
|
||||
@@ -2139,13 +2133,6 @@ unsigned long get_seconds(void)
|
||||
}
|
||||
EXPORT_SYMBOL(get_seconds);
|
||||
|
||||
struct timespec __current_kernel_time(void)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
|
||||
return timespec64_to_timespec(tk_xtime(tk));
|
||||
}
|
||||
|
||||
struct timespec64 current_kernel_time64(void)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
@@ -2195,6 +2182,7 @@ void do_timer(unsigned long ticks)
|
||||
* ktime_get_update_offsets_now - hrtimer helper
|
||||
* @cwsseq: pointer to check and store the clock was set sequence number
|
||||
* @offs_real: pointer to storage for monotonic -> realtime offset
|
||||
* @offs_boot: pointer to storage for monotonic -> boottime offset
|
||||
* @offs_tai: pointer to storage for monotonic -> clock tai offset
|
||||
*
|
||||
* Returns current monotonic time and updates the offsets if the
|
||||
@@ -2204,7 +2192,7 @@ void do_timer(unsigned long ticks)
|
||||
* Called from hrtimer_interrupt() or retrigger_next_event()
|
||||
*/
|
||||
ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
|
||||
ktime_t *offs_tai)
|
||||
ktime_t *offs_boot, ktime_t *offs_tai)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
unsigned int seq;
|
||||
@@ -2221,6 +2209,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
|
||||
if (*cwsseq != tk->clock_was_set_seq) {
|
||||
*cwsseq = tk->clock_was_set_seq;
|
||||
*offs_real = tk->offs_real;
|
||||
*offs_boot = tk->offs_boot;
|
||||
*offs_tai = tk->offs_tai;
|
||||
}
|
||||
|
||||
|
@@ -6,6 +6,7 @@
|
||||
*/
|
||||
extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
|
||||
ktime_t *offs_real,
|
||||
ktime_t *offs_boot,
|
||||
ktime_t *offs_tai);
|
||||
|
||||
extern int timekeeping_valid_for_hres(void);
|
||||
|
@@ -977,6 +977,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
|
||||
{
|
||||
struct perf_event_query_bpf __user *uquery = info;
|
||||
struct perf_event_query_bpf query = {};
|
||||
u32 *ids, prog_cnt, ids_len;
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
@@ -985,16 +986,32 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&query, uquery, sizeof(query)))
|
||||
return -EFAULT;
|
||||
if (query.ids_len > BPF_TRACE_MAX_PROGS)
|
||||
|
||||
ids_len = query.ids_len;
|
||||
if (ids_len > BPF_TRACE_MAX_PROGS)
|
||||
return -E2BIG;
|
||||
ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
|
||||
if (!ids)
|
||||
return -ENOMEM;
|
||||
/*
|
||||
* The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
|
||||
* is required when user only wants to check for uquery->prog_cnt.
|
||||
* There is no need to check for it since the case is handled
|
||||
* gracefully in bpf_prog_array_copy_info.
|
||||
*/
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
|
||||
uquery->ids,
|
||||
query.ids_len,
|
||||
&uquery->prog_cnt);
|
||||
ids,
|
||||
ids_len,
|
||||
&prog_cnt);
|
||||
mutex_unlock(&bpf_event_mutex);
|
||||
|
||||
if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
|
||||
copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
|
||||
ret = -EFAULT;
|
||||
|
||||
kfree(ids);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -1165,7 +1165,7 @@ static struct {
|
||||
{ trace_clock, "perf", 1 },
|
||||
{ ktime_get_mono_fast_ns, "mono", 1 },
|
||||
{ ktime_get_raw_fast_ns, "mono_raw", 1 },
|
||||
{ ktime_get_mono_fast_ns, "boot", 1 },
|
||||
{ ktime_get_boot_fast_ns, "boot", 1 },
|
||||
ARCH_TRACE_CLOCKS
|
||||
};
|
||||
|
||||
|
@@ -356,7 +356,7 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
|
||||
__field( unsigned int, seqnum )
|
||||
),
|
||||
|
||||
F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n",
|
||||
F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n",
|
||||
__entry->seqnum,
|
||||
__entry->tv_sec,
|
||||
__entry->tv_nsec,
|
||||
|
@@ -1499,14 +1499,14 @@ static int process_preds(struct trace_event_call *call,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!nr_preds) {
|
||||
prog = NULL;
|
||||
} else {
|
||||
prog = predicate_parse(filter_string, nr_parens, nr_preds,
|
||||
if (!nr_preds)
|
||||
return -EINVAL;
|
||||
|
||||
prog = predicate_parse(filter_string, nr_parens, nr_preds,
|
||||
parse_pred, call, pe);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
}
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
|
||||
rcu_assign_pointer(filter->prog, prog);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -512,8 +512,6 @@ static int __register_trace_kprobe(struct trace_kprobe *tk)
|
||||
if (ret == 0)
|
||||
tk->tp.flags |= TP_FLAG_REGISTERED;
|
||||
else {
|
||||
pr_warn("Could not insert probe at %s+%lu: %d\n",
|
||||
trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret);
|
||||
if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) {
|
||||
pr_warn("This probe might be able to register after target module is loaded. Continue.\n");
|
||||
ret = 0;
|
||||
|
Reference in New Issue
Block a user