Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - fsnotify fix - poll() timeout fix - a few scripts/ tweaks - debugobjects updates - the (small) ocfs2 queue - Minor fixes to kernel/padata.c - Maybe half of the MM queue * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits) mm, page_alloc: restore the original nodemask if the fast path allocation failed mm, page_alloc: uninline the bad page part of check_new_page() mm, page_alloc: don't duplicate code in free_pcp_prepare mm, page_alloc: defer debugging checks of pages allocated from the PCP mm, page_alloc: defer debugging checks of freed pages until a PCP drain cpuset: use static key better and convert to new API mm, page_alloc: inline pageblock lookup in page free fast paths mm, page_alloc: remove unnecessary variable from free_pcppages_bulk mm, page_alloc: pull out side effects from free_pages_check mm, page_alloc: un-inline the bad part of free_pages_check mm, page_alloc: check multiple page fields with a single branch mm, page_alloc: remove field from alloc_context mm, page_alloc: avoid looking up the first zone in a zonelist twice mm, page_alloc: shortcut watermark checks for order-0 pages mm, page_alloc: reduce cost of fair zone allocation policy retry mm, page_alloc: shorten the page allocator fast path mm, page_alloc: check once if a zone has isolated pageblocks mm, page_alloc: move __GFP_HARDWALL modifications out of the fastpath mm, page_alloc: simplify last cpupid reset mm, page_alloc: remove unnecessary initialisation from __alloc_pages_nodemask() ...
This commit is contained in:
@@ -61,7 +61,7 @@
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
|
||||
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
|
||||
|
||||
/* See "Frequency meter" comments, below. */
|
||||
|
||||
@@ -2528,27 +2528,27 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
|
||||
* GFP_KERNEL - any node in enclosing hardwalled cpuset ok
|
||||
* GFP_USER - only nodes in current tasks mems allowed ok.
|
||||
*/
|
||||
int __cpuset_node_allowed(int node, gfp_t gfp_mask)
|
||||
bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
|
||||
{
|
||||
struct cpuset *cs; /* current cpuset ancestors */
|
||||
int allowed; /* is allocation in zone z allowed? */
|
||||
unsigned long flags;
|
||||
|
||||
if (in_interrupt())
|
||||
return 1;
|
||||
return true;
|
||||
if (node_isset(node, current->mems_allowed))
|
||||
return 1;
|
||||
return true;
|
||||
/*
|
||||
* Allow tasks that have access to memory reserves because they have
|
||||
* been OOM killed to get memory anywhere.
|
||||
*/
|
||||
if (unlikely(test_thread_flag(TIF_MEMDIE)))
|
||||
return 1;
|
||||
return true;
|
||||
if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (current->flags & PF_EXITING) /* Let dying task have memory */
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
/* Not hardwall and node outside mems_allowed: scan up cpusets */
|
||||
spin_lock_irqsave(&callback_lock, flags);
|
||||
@@ -2591,13 +2591,7 @@ int __cpuset_node_allowed(int node, gfp_t gfp_mask)
|
||||
|
||||
static int cpuset_spread_node(int *rotor)
|
||||
{
|
||||
int node;
|
||||
|
||||
node = next_node(*rotor, current->mems_allowed);
|
||||
if (node == MAX_NUMNODES)
|
||||
node = first_node(current->mems_allowed);
|
||||
*rotor = node;
|
||||
return node;
|
||||
return *rotor = next_node_in(*rotor, current->mems_allowed);
|
||||
}
|
||||
|
||||
int cpuset_mem_spread_node(void)
|
||||
|
@@ -1410,7 +1410,7 @@ static int __init crash_save_vmcoreinfo_init(void)
|
||||
VMCOREINFO_STRUCT_SIZE(list_head);
|
||||
VMCOREINFO_SIZE(nodemask_t);
|
||||
VMCOREINFO_OFFSET(page, flags);
|
||||
VMCOREINFO_OFFSET(page, _count);
|
||||
VMCOREINFO_OFFSET(page, _refcount);
|
||||
VMCOREINFO_OFFSET(page, mapping);
|
||||
VMCOREINFO_OFFSET(page, lru);
|
||||
VMCOREINFO_OFFSET(page, _mapcount);
|
||||
|
138
kernel/padata.c
138
kernel/padata.c
@@ -606,33 +606,6 @@ out_replace:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* padata_set_cpumasks - Set both parallel and serial cpumasks. The first
|
||||
* one is used by parallel workers and the second one
|
||||
* by the wokers doing serialization.
|
||||
*
|
||||
* @pinst: padata instance
|
||||
* @pcpumask: the cpumask to use for parallel workers
|
||||
* @cbcpumask: the cpumsak to use for serial workers
|
||||
*/
|
||||
int padata_set_cpumasks(struct padata_instance *pinst, cpumask_var_t pcpumask,
|
||||
cpumask_var_t cbcpumask)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&pinst->lock);
|
||||
get_online_cpus();
|
||||
|
||||
err = __padata_set_cpumasks(pinst, pcpumask, cbcpumask);
|
||||
|
||||
put_online_cpus();
|
||||
mutex_unlock(&pinst->lock);
|
||||
|
||||
return err;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(padata_set_cpumasks);
|
||||
|
||||
/**
|
||||
* padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
|
||||
* equivalent to @cpumask.
|
||||
@@ -674,6 +647,43 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL(padata_set_cpumask);
|
||||
|
||||
/**
|
||||
* padata_start - start the parallel processing
|
||||
*
|
||||
* @pinst: padata instance to start
|
||||
*/
|
||||
int padata_start(struct padata_instance *pinst)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&pinst->lock);
|
||||
|
||||
if (pinst->flags & PADATA_INVALID)
|
||||
err = -EINVAL;
|
||||
|
||||
__padata_start(pinst);
|
||||
|
||||
mutex_unlock(&pinst->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(padata_start);
|
||||
|
||||
/**
|
||||
* padata_stop - stop the parallel processing
|
||||
*
|
||||
* @pinst: padata instance to stop
|
||||
*/
|
||||
void padata_stop(struct padata_instance *pinst)
|
||||
{
|
||||
mutex_lock(&pinst->lock);
|
||||
__padata_stop(pinst);
|
||||
mutex_unlock(&pinst->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(padata_stop);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
|
||||
{
|
||||
struct parallel_data *pd;
|
||||
@@ -694,42 +704,6 @@ static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* padata_add_cpu - add a cpu to one or both(parallel and serial)
|
||||
* padata cpumasks.
|
||||
*
|
||||
* @pinst: padata instance
|
||||
* @cpu: cpu to add
|
||||
* @mask: bitmask of flags specifying to which cpumask @cpu shuld be added.
|
||||
* The @mask may be any combination of the following flags:
|
||||
* PADATA_CPU_SERIAL - serial cpumask
|
||||
* PADATA_CPU_PARALLEL - parallel cpumask
|
||||
*/
|
||||
|
||||
int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&pinst->lock);
|
||||
|
||||
get_online_cpus();
|
||||
if (mask & PADATA_CPU_SERIAL)
|
||||
cpumask_set_cpu(cpu, pinst->cpumask.cbcpu);
|
||||
if (mask & PADATA_CPU_PARALLEL)
|
||||
cpumask_set_cpu(cpu, pinst->cpumask.pcpu);
|
||||
|
||||
err = __padata_add_cpu(pinst, cpu);
|
||||
put_online_cpus();
|
||||
|
||||
mutex_unlock(&pinst->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(padata_add_cpu);
|
||||
|
||||
static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
|
||||
{
|
||||
struct parallel_data *pd = NULL;
|
||||
@@ -789,43 +763,6 @@ int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
|
||||
}
|
||||
EXPORT_SYMBOL(padata_remove_cpu);
|
||||
|
||||
/**
|
||||
* padata_start - start the parallel processing
|
||||
*
|
||||
* @pinst: padata instance to start
|
||||
*/
|
||||
int padata_start(struct padata_instance *pinst)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&pinst->lock);
|
||||
|
||||
if (pinst->flags & PADATA_INVALID)
|
||||
err =-EINVAL;
|
||||
|
||||
__padata_start(pinst);
|
||||
|
||||
mutex_unlock(&pinst->lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(padata_start);
|
||||
|
||||
/**
|
||||
* padata_stop - stop the parallel processing
|
||||
*
|
||||
* @pinst: padata instance to stop
|
||||
*/
|
||||
void padata_stop(struct padata_instance *pinst)
|
||||
{
|
||||
mutex_lock(&pinst->lock);
|
||||
__padata_stop(pinst);
|
||||
mutex_unlock(&pinst->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(padata_stop);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
|
||||
{
|
||||
return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
|
||||
@@ -1091,7 +1028,6 @@ err_free_inst:
|
||||
err:
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(padata_alloc);
|
||||
|
||||
/**
|
||||
* padata_free - free a padata instance
|
||||
|
@@ -380,29 +380,9 @@ void destroy_rcu_head(struct rcu_head *head)
|
||||
debug_object_free(head, &rcuhead_debug_descr);
|
||||
}
|
||||
|
||||
/*
|
||||
* fixup_activate is called when:
|
||||
* - an active object is activated
|
||||
* - an unknown object is activated (might be a statically initialized object)
|
||||
* Activation is performed internally by call_rcu().
|
||||
*/
|
||||
static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
static bool rcuhead_is_static_object(void *addr)
|
||||
{
|
||||
struct rcu_head *head = addr;
|
||||
|
||||
switch (state) {
|
||||
|
||||
case ODEBUG_STATE_NOTAVAILABLE:
|
||||
/*
|
||||
* This is not really a fixup. We just make sure that it is
|
||||
* tracked in the object tracker.
|
||||
*/
|
||||
debug_object_init(head, &rcuhead_debug_descr);
|
||||
debug_object_activate(head, &rcuhead_debug_descr);
|
||||
return 0;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -440,7 +420,7 @@ EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
|
||||
|
||||
struct debug_obj_descr rcuhead_debug_descr = {
|
||||
.name = "rcu_head",
|
||||
.fixup_activate = rcuhead_fixup_activate,
|
||||
.is_static_object = rcuhead_is_static_object,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
|
||||
#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
|
||||
|
@@ -1521,6 +1521,13 @@ static struct ctl_table vm_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_jiffies,
|
||||
},
|
||||
{
|
||||
.procname = "stat_refresh",
|
||||
.data = NULL,
|
||||
.maxlen = 0,
|
||||
.mode = 0600,
|
||||
.proc_handler = vmstat_refresh,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_MMU
|
||||
{
|
||||
|
@@ -334,7 +334,7 @@ static void *hrtimer_debug_hint(void *addr)
|
||||
* fixup_init is called when:
|
||||
* - an active object is initialized
|
||||
*/
|
||||
static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
|
||||
static bool hrtimer_fixup_init(void *addr, enum debug_obj_state state)
|
||||
{
|
||||
struct hrtimer *timer = addr;
|
||||
|
||||
@@ -342,30 +342,25 @@ static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
hrtimer_cancel(timer);
|
||||
debug_object_init(timer, &hrtimer_debug_descr);
|
||||
return 1;
|
||||
return true;
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* fixup_activate is called when:
|
||||
* - an active object is activated
|
||||
* - an unknown object is activated (might be a statically initialized object)
|
||||
* - an unknown non-static object is activated
|
||||
*/
|
||||
static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
static bool hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
{
|
||||
switch (state) {
|
||||
|
||||
case ODEBUG_STATE_NOTAVAILABLE:
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
WARN_ON(1);
|
||||
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -373,7 +368,7 @@ static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
* fixup_free is called when:
|
||||
* - an active object is freed
|
||||
*/
|
||||
static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
|
||||
static bool hrtimer_fixup_free(void *addr, enum debug_obj_state state)
|
||||
{
|
||||
struct hrtimer *timer = addr;
|
||||
|
||||
@@ -381,9 +376,9 @@ static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
hrtimer_cancel(timer);
|
||||
debug_object_free(timer, &hrtimer_debug_descr);
|
||||
return 1;
|
||||
return true;
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -769,3 +769,24 @@ struct timespec timespec_add_safe(const struct timespec lhs,
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add two timespec64 values and do a safety check for overflow.
|
||||
* It's assumed that both values are valid (>= 0).
|
||||
* And, each timespec64 is in normalized form.
|
||||
*/
|
||||
struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
|
||||
const struct timespec64 rhs)
|
||||
{
|
||||
struct timespec64 res;
|
||||
|
||||
set_normalized_timespec64(&res, lhs.tv_sec + rhs.tv_sec,
|
||||
lhs.tv_nsec + rhs.tv_nsec);
|
||||
|
||||
if (unlikely(res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)) {
|
||||
res.tv_sec = TIME64_MAX;
|
||||
res.tv_nsec = 0;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@@ -489,11 +489,19 @@ static void *timer_debug_hint(void *addr)
|
||||
return ((struct timer_list *) addr)->function;
|
||||
}
|
||||
|
||||
static bool timer_is_static_object(void *addr)
|
||||
{
|
||||
struct timer_list *timer = addr;
|
||||
|
||||
return (timer->entry.pprev == NULL &&
|
||||
timer->entry.next == TIMER_ENTRY_STATIC);
|
||||
}
|
||||
|
||||
/*
|
||||
* fixup_init is called when:
|
||||
* - an active object is initialized
|
||||
*/
|
||||
static int timer_fixup_init(void *addr, enum debug_obj_state state)
|
||||
static bool timer_fixup_init(void *addr, enum debug_obj_state state)
|
||||
{
|
||||
struct timer_list *timer = addr;
|
||||
|
||||
@@ -501,9 +509,9 @@ static int timer_fixup_init(void *addr, enum debug_obj_state state)
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
del_timer_sync(timer);
|
||||
debug_object_init(timer, &timer_debug_descr);
|
||||
return 1;
|
||||
return true;
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -516,36 +524,22 @@ static void stub_timer(unsigned long data)
|
||||
/*
|
||||
* fixup_activate is called when:
|
||||
* - an active object is activated
|
||||
* - an unknown object is activated (might be a statically initialized object)
|
||||
* - an unknown non-static object is activated
|
||||
*/
|
||||
static int timer_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
static bool timer_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
{
|
||||
struct timer_list *timer = addr;
|
||||
|
||||
switch (state) {
|
||||
|
||||
case ODEBUG_STATE_NOTAVAILABLE:
|
||||
/*
|
||||
* This is not really a fixup. The timer was
|
||||
* statically initialized. We just make sure that it
|
||||
* is tracked in the object tracker.
|
||||
*/
|
||||
if (timer->entry.pprev == NULL &&
|
||||
timer->entry.next == TIMER_ENTRY_STATIC) {
|
||||
debug_object_init(timer, &timer_debug_descr);
|
||||
debug_object_activate(timer, &timer_debug_descr);
|
||||
return 0;
|
||||
} else {
|
||||
setup_timer(timer, stub_timer, 0);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
setup_timer(timer, stub_timer, 0);
|
||||
return true;
|
||||
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
WARN_ON(1);
|
||||
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -553,7 +547,7 @@ static int timer_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
* fixup_free is called when:
|
||||
* - an active object is freed
|
||||
*/
|
||||
static int timer_fixup_free(void *addr, enum debug_obj_state state)
|
||||
static bool timer_fixup_free(void *addr, enum debug_obj_state state)
|
||||
{
|
||||
struct timer_list *timer = addr;
|
||||
|
||||
@@ -561,9 +555,9 @@ static int timer_fixup_free(void *addr, enum debug_obj_state state)
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
del_timer_sync(timer);
|
||||
debug_object_free(timer, &timer_debug_descr);
|
||||
return 1;
|
||||
return true;
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -571,32 +565,23 @@ static int timer_fixup_free(void *addr, enum debug_obj_state state)
|
||||
* fixup_assert_init is called when:
|
||||
* - an untracked/uninit-ed object is found
|
||||
*/
|
||||
static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
|
||||
static bool timer_fixup_assert_init(void *addr, enum debug_obj_state state)
|
||||
{
|
||||
struct timer_list *timer = addr;
|
||||
|
||||
switch (state) {
|
||||
case ODEBUG_STATE_NOTAVAILABLE:
|
||||
if (timer->entry.next == TIMER_ENTRY_STATIC) {
|
||||
/*
|
||||
* This is not really a fixup. The timer was
|
||||
* statically initialized. We just make sure that it
|
||||
* is tracked in the object tracker.
|
||||
*/
|
||||
debug_object_init(timer, &timer_debug_descr);
|
||||
return 0;
|
||||
} else {
|
||||
setup_timer(timer, stub_timer, 0);
|
||||
return 1;
|
||||
}
|
||||
setup_timer(timer, stub_timer, 0);
|
||||
return true;
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static struct debug_obj_descr timer_debug_descr = {
|
||||
.name = "timer_list",
|
||||
.debug_hint = timer_debug_hint,
|
||||
.is_static_object = timer_is_static_object,
|
||||
.fixup_init = timer_fixup_init,
|
||||
.fixup_activate = timer_fixup_activate,
|
||||
.fixup_free = timer_fixup_free,
|
||||
|
@@ -433,11 +433,18 @@ static void *work_debug_hint(void *addr)
|
||||
return ((struct work_struct *) addr)->func;
|
||||
}
|
||||
|
||||
static bool work_is_static_object(void *addr)
|
||||
{
|
||||
struct work_struct *work = addr;
|
||||
|
||||
return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
|
||||
}
|
||||
|
||||
/*
|
||||
* fixup_init is called when:
|
||||
* - an active object is initialized
|
||||
*/
|
||||
static int work_fixup_init(void *addr, enum debug_obj_state state)
|
||||
static bool work_fixup_init(void *addr, enum debug_obj_state state)
|
||||
{
|
||||
struct work_struct *work = addr;
|
||||
|
||||
@@ -445,42 +452,9 @@ static int work_fixup_init(void *addr, enum debug_obj_state state)
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
cancel_work_sync(work);
|
||||
debug_object_init(work, &work_debug_descr);
|
||||
return 1;
|
||||
return true;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* fixup_activate is called when:
|
||||
* - an active object is activated
|
||||
* - an unknown object is activated (might be a statically initialized object)
|
||||
*/
|
||||
static int work_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
{
|
||||
struct work_struct *work = addr;
|
||||
|
||||
switch (state) {
|
||||
|
||||
case ODEBUG_STATE_NOTAVAILABLE:
|
||||
/*
|
||||
* This is not really a fixup. The work struct was
|
||||
* statically initialized. We just make sure that it
|
||||
* is tracked in the object tracker.
|
||||
*/
|
||||
if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
|
||||
debug_object_init(work, &work_debug_descr);
|
||||
debug_object_activate(work, &work_debug_descr);
|
||||
return 0;
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
WARN_ON(1);
|
||||
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -488,7 +462,7 @@ static int work_fixup_activate(void *addr, enum debug_obj_state state)
|
||||
* fixup_free is called when:
|
||||
* - an active object is freed
|
||||
*/
|
||||
static int work_fixup_free(void *addr, enum debug_obj_state state)
|
||||
static bool work_fixup_free(void *addr, enum debug_obj_state state)
|
||||
{
|
||||
struct work_struct *work = addr;
|
||||
|
||||
@@ -496,17 +470,17 @@ static int work_fixup_free(void *addr, enum debug_obj_state state)
|
||||
case ODEBUG_STATE_ACTIVE:
|
||||
cancel_work_sync(work);
|
||||
debug_object_free(work, &work_debug_descr);
|
||||
return 1;
|
||||
return true;
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static struct debug_obj_descr work_debug_descr = {
|
||||
.name = "work_struct",
|
||||
.debug_hint = work_debug_hint,
|
||||
.is_static_object = work_is_static_object,
|
||||
.fixup_init = work_fixup_init,
|
||||
.fixup_activate = work_fixup_activate,
|
||||
.fixup_free = work_fixup_free,
|
||||
};
|
||||
|
||||
|
Reference in New Issue
Block a user