Merge branch 'akpm' (final batch from Andrew)

Merge third patch-bumb from Andrew Morton:
 "This wraps me up for -rc1.
   - Lots of misc stuff and things which were deferred/missed from
     patchbombings 1 & 2.
   - ocfs2 things
   - lib/scatterlist
   - hfsplus
   - fatfs
   - documentation
   - signals
   - procfs
   - lockdep
   - coredump
   - seqfile core
   - kexec
   - Tejun's large IDR tree reworkings
   - ipmi
   - partitions
   - nbd
   - random() things
   - kfifo
   - tools/testing/selftests updates
   - Sasha's large and pointless hlist cleanup"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (163 commits)
  hlist: drop the node parameter from iterators
  kcmp: make it depend on CHECKPOINT_RESTORE
  selftests: add a simple doc
  tools/testing/selftests/Makefile: rearrange targets
  selftests/efivarfs: add create-read test
  selftests/efivarfs: add empty file creation test
  selftests: add tests for efivarfs
  kfifo: fix kfifo_alloc() and kfifo_init()
  kfifo: move kfifo.c from kernel/ to lib/
  arch Kconfig: centralise CONFIG_ARCH_NO_VIRT_TO_BUS
  w1: add support for DS2413 Dual Channel Addressable Switch
  memstick: move the dereference below the NULL test
  drivers/pps/clients/pps-gpio.c: use devm_kzalloc
  Documentation/DMA-API-HOWTO.txt: fix typo
  include/linux/eventfd.h: fix incorrect filename is a comment
  mtd: mtd_stresstest: use prandom_bytes()
  mtd: mtd_subpagetest: convert to use prandom library
  mtd: mtd_speedtest: use prandom_bytes
  mtd: mtd_pagetest: convert to use prandom library
  mtd: mtd_oobtest: convert to use prandom library
  ...
This commit is contained in:
Linus Torvalds
2013-02-27 20:58:09 -08:00
429 changed files with 5230 additions and 3497 deletions

View File

@@ -7,7 +7,7 @@ obj-y = fork.o exec_domain.o panic.o printk.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
notifier.o ksysfs.o cred.o \
async.o range.o groups.o lglock.o smpboot.o
@@ -25,9 +25,7 @@ endif
obj-y += sched/
obj-y += power/
ifeq ($(CONFIG_CHECKPOINT_RESTORE),y)
obj-$(CONFIG_X86) += kcmp.o
endif
obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
obj-$(CONFIG_FREEZER) += freezer.o
obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o

View File

@@ -554,7 +554,6 @@ static struct css_set *find_existing_css_set(
{
int i;
struct cgroupfs_root *root = cgrp->root;
struct hlist_node *node;
struct css_set *cg;
unsigned long key;
@@ -577,7 +576,7 @@ static struct css_set *find_existing_css_set(
}
key = css_set_hash(template);
hash_for_each_possible(css_set_table, cg, node, hlist, key) {
hash_for_each_possible(css_set_table, cg, hlist, key) {
if (!compare_css_sets(cg, oldcg, cgrp, template))
continue;
@@ -1611,7 +1610,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
struct cgroupfs_root *existing_root;
const struct cred *cred;
int i;
struct hlist_node *node;
struct css_set *cg;
BUG_ON(sb->s_root != NULL);
@@ -1666,7 +1664,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
/* Link the top cgroup in this hierarchy into all
* the css_set objects */
write_lock(&css_set_lock);
hash_for_each(css_set_table, i, node, cg, hlist)
hash_for_each(css_set_table, i, cg, hlist)
link_css_set(&tmp_cg_links, cg, root_cgrp);
write_unlock(&css_set_lock);
@@ -4493,7 +4491,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
{
struct cgroup_subsys_state *css;
int i, ret;
struct hlist_node *node, *tmp;
struct hlist_node *tmp;
struct css_set *cg;
unsigned long key;
@@ -4561,7 +4559,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
* this is all done under the css_set_lock.
*/
write_lock(&css_set_lock);
hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) {
hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
/* skip entries that we already rehashed */
if (cg->subsys[ss->subsys_id])
continue;
@@ -4571,7 +4569,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
cg->subsys[ss->subsys_id] = css;
/* recompute hash and restore entry */
key = css_set_hash(cg->subsys);
hash_add(css_set_table, node, key);
hash_add(css_set_table, &cg->hlist, key);
}
write_unlock(&css_set_lock);
@@ -4618,10 +4616,8 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
offline_css(ss, dummytop);
ss->active = 0;
if (ss->use_id) {
idr_remove_all(&ss->idr);
if (ss->use_id)
idr_destroy(&ss->idr);
}
/* deassign the subsys_id */
subsys[ss->subsys_id] = NULL;
@@ -5322,7 +5318,7 @@ EXPORT_SYMBOL_GPL(free_css_id);
static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
{
struct css_id *newid;
int myid, error, size;
int ret, size;
BUG_ON(!ss->use_id);
@@ -5330,35 +5326,24 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
newid = kzalloc(size, GFP_KERNEL);
if (!newid)
return ERR_PTR(-ENOMEM);
/* get id */
if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
error = -ENOMEM;
goto err_out;
}
idr_preload(GFP_KERNEL);
spin_lock(&ss->id_lock);
/* Don't use 0. allocates an ID of 1-65535 */
error = idr_get_new_above(&ss->idr, newid, 1, &myid);
ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
spin_unlock(&ss->id_lock);
idr_preload_end();
/* Returns error when there are no free spaces for new ID.*/
if (error) {
error = -ENOSPC;
if (ret < 0)
goto err_out;
}
if (myid > CSS_ID_MAX)
goto remove_idr;
newid->id = myid;
newid->id = ret;
newid->depth = depth;
return newid;
remove_idr:
error = -ENOSPC;
spin_lock(&ss->id_lock);
idr_remove(&ss->idr, myid);
spin_unlock(&ss->id_lock);
err_out:
kfree(newid);
return ERR_PTR(error);
return ERR_PTR(ret);
}

View File

@@ -5126,7 +5126,6 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct perf_event *event;
struct hlist_node *node;
struct hlist_head *head;
rcu_read_lock();
@@ -5134,7 +5133,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
if (!head)
goto end;
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_event(event, nr, data, regs);
}
@@ -5419,7 +5418,6 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
{
struct perf_sample_data data;
struct perf_event *event;
struct hlist_node *node;
struct perf_raw_record raw = {
.size = entry_size,
@@ -5429,7 +5427,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
perf_sample_data_init(&data, addr, 0);
data.raw = &raw;
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
@@ -5965,13 +5963,9 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type)
pmu->name = name;
if (type < 0) {
int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
if (!err)
goto free_pdc;
err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
if (err) {
ret = err;
type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
if (type < 0) {
ret = type;
goto free_pdc;
}
}

View File

@@ -20,6 +20,7 @@
#include <linux/tsacct_kern.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/freezer.h>
#include <linux/binfmts.h>
#include <linux/nsproxy.h>
#include <linux/pid_namespace.h>
@@ -31,7 +32,6 @@
#include <linux/mempolicy.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
#include <linux/freezer.h>
#include <linux/cgroup.h>
#include <linux/syscalls.h>
#include <linux/signal.h>
@@ -485,7 +485,7 @@ static void exit_mm(struct task_struct * tsk)
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!self.task) /* see coredump_finish() */
break;
schedule();
freezable_schedule();
}
__set_task_state(tsk, TASK_RUNNING);
down_read(&mm->mmap_sem);
@@ -835,7 +835,7 @@ void do_exit(long code)
/*
* Make sure we are holding no locks:
*/
debug_check_no_locks_held(tsk);
debug_check_no_locks_held();
/*
* We can do this unlocked here. The futex code uses this flag
* just to verify whether the pi state cleanup has been done

View File

@@ -1861,10 +1861,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
exit_sem(current);
}
if (new_nsproxy) {
if (new_nsproxy)
switch_task_namespaces(current, new_nsproxy);
new_nsproxy = NULL;
}
task_lock(current);
@@ -1894,9 +1892,6 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
}
}
if (new_nsproxy)
put_nsproxy(new_nsproxy);
bad_unshare_cleanup_cred:
if (new_cred)
put_cred(new_cred);

View File

@@ -229,6 +229,8 @@ out:
}
static void kimage_free_page_list(struct list_head *list);
static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
unsigned long nr_segments,
struct kexec_segment __user *segments)
@@ -242,8 +244,6 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
if (result)
goto out;
*rimage = image;
/*
* Find a location for the control code buffer, and add it
* the vector of segments so that it's pages will also be
@@ -254,22 +254,22 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
get_order(KEXEC_CONTROL_PAGE_SIZE));
if (!image->control_code_page) {
printk(KERN_ERR "Could not allocate control_code_buffer\n");
goto out;
goto out_free;
}
image->swap_page = kimage_alloc_control_pages(image, 0);
if (!image->swap_page) {
printk(KERN_ERR "Could not allocate swap buffer\n");
goto out;
goto out_free;
}
result = 0;
out:
if (result == 0)
*rimage = image;
else
kfree(image);
*rimage = image;
return 0;
out_free:
kimage_free_page_list(&image->control_pages);
kfree(image);
out:
return result;
}
@@ -316,7 +316,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
mend = mstart + image->segment[i].memsz - 1;
/* Ensure we are within the crash kernel limits */
if ((mstart < crashk_res.start) || (mend > crashk_res.end))
goto out;
goto out_free;
}
/*
@@ -329,16 +329,15 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
get_order(KEXEC_CONTROL_PAGE_SIZE));
if (!image->control_code_page) {
printk(KERN_ERR "Could not allocate control_code_buffer\n");
goto out;
goto out_free;
}
result = 0;
out:
if (result == 0)
*rimage = image;
else
kfree(image);
*rimage = image;
return 0;
out_free:
kfree(image);
out:
return result;
}
@@ -503,8 +502,6 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
break;
if (hole_end > crashk_res.end)
break;
/* See if I overlap any of the segments */
for (i = 0; i < image->nr_segments; i++) {
unsigned long mstart, mend;
@@ -1514,6 +1511,8 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(page, _count);
VMCOREINFO_OFFSET(page, mapping);
VMCOREINFO_OFFSET(page, lru);
VMCOREINFO_OFFSET(page, _mapcount);
VMCOREINFO_OFFSET(page, private);
VMCOREINFO_OFFSET(pglist_data, node_zones);
VMCOREINFO_OFFSET(pglist_data, nr_zones);
#ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1536,6 +1535,11 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_NUMBER(PG_lru);
VMCOREINFO_NUMBER(PG_private);
VMCOREINFO_NUMBER(PG_swapcache);
VMCOREINFO_NUMBER(PG_slab);
#ifdef CONFIG_MEMORY_FAILURE
VMCOREINFO_NUMBER(PG_hwpoison);
#endif
VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
arch_crash_save_vmcoreinfo();
update_vmcoreinfo_note();

View File

@@ -1,609 +0,0 @@
/*
* A generic kernel FIFO implementation
*
* Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/log2.h>
#include <linux/uaccess.h>
#include <linux/kfifo.h>
/*
* internal helper to calculate the unused elements in a fifo
*/
static inline unsigned int kfifo_unused(struct __kfifo *fifo)
{
return (fifo->mask + 1) - (fifo->in - fifo->out);
}
int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
size_t esize, gfp_t gfp_mask)
{
/*
* round down to the next power of 2, since our 'let the indices
* wrap' technique works only in this case.
*/
if (!is_power_of_2(size))
size = rounddown_pow_of_two(size);
fifo->in = 0;
fifo->out = 0;
fifo->esize = esize;
if (size < 2) {
fifo->data = NULL;
fifo->mask = 0;
return -EINVAL;
}
fifo->data = kmalloc(size * esize, gfp_mask);
if (!fifo->data) {
fifo->mask = 0;
return -ENOMEM;
}
fifo->mask = size - 1;
return 0;
}
EXPORT_SYMBOL(__kfifo_alloc);
void __kfifo_free(struct __kfifo *fifo)
{
kfree(fifo->data);
fifo->in = 0;
fifo->out = 0;
fifo->esize = 0;
fifo->data = NULL;
fifo->mask = 0;
}
EXPORT_SYMBOL(__kfifo_free);
int __kfifo_init(struct __kfifo *fifo, void *buffer,
unsigned int size, size_t esize)
{
size /= esize;
if (!is_power_of_2(size))
size = rounddown_pow_of_two(size);
fifo->in = 0;
fifo->out = 0;
fifo->esize = esize;
fifo->data = buffer;
if (size < 2) {
fifo->mask = 0;
return -EINVAL;
}
fifo->mask = size - 1;
return 0;
}
EXPORT_SYMBOL(__kfifo_init);
static void kfifo_copy_in(struct __kfifo *fifo, const void *src,
unsigned int len, unsigned int off)
{
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
unsigned int l;
off &= fifo->mask;
if (esize != 1) {
off *= esize;
size *= esize;
len *= esize;
}
l = min(len, size - off);
memcpy(fifo->data + off, src, l);
memcpy(fifo->data, src + l, len - l);
/*
* make sure that the data in the fifo is up to date before
* incrementing the fifo->in index counter
*/
smp_wmb();
}
unsigned int __kfifo_in(struct __kfifo *fifo,
const void *buf, unsigned int len)
{
unsigned int l;
l = kfifo_unused(fifo);
if (len > l)
len = l;
kfifo_copy_in(fifo, buf, len, fifo->in);
fifo->in += len;
return len;
}
EXPORT_SYMBOL(__kfifo_in);
static void kfifo_copy_out(struct __kfifo *fifo, void *dst,
unsigned int len, unsigned int off)
{
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
unsigned int l;
off &= fifo->mask;
if (esize != 1) {
off *= esize;
size *= esize;
len *= esize;
}
l = min(len, size - off);
memcpy(dst, fifo->data + off, l);
memcpy(dst + l, fifo->data, len - l);
/*
* make sure that the data is copied before
* incrementing the fifo->out index counter
*/
smp_wmb();
}
unsigned int __kfifo_out_peek(struct __kfifo *fifo,
void *buf, unsigned int len)
{
unsigned int l;
l = fifo->in - fifo->out;
if (len > l)
len = l;
kfifo_copy_out(fifo, buf, len, fifo->out);
return len;
}
EXPORT_SYMBOL(__kfifo_out_peek);
unsigned int __kfifo_out(struct __kfifo *fifo,
void *buf, unsigned int len)
{
len = __kfifo_out_peek(fifo, buf, len);
fifo->out += len;
return len;
}
EXPORT_SYMBOL(__kfifo_out);
static unsigned long kfifo_copy_from_user(struct __kfifo *fifo,
const void __user *from, unsigned int len, unsigned int off,
unsigned int *copied)
{
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
unsigned int l;
unsigned long ret;
off &= fifo->mask;
if (esize != 1) {
off *= esize;
size *= esize;
len *= esize;
}
l = min(len, size - off);
ret = copy_from_user(fifo->data + off, from, l);
if (unlikely(ret))
ret = DIV_ROUND_UP(ret + len - l, esize);
else {
ret = copy_from_user(fifo->data, from + l, len - l);
if (unlikely(ret))
ret = DIV_ROUND_UP(ret, esize);
}
/*
* make sure that the data in the fifo is up to date before
* incrementing the fifo->in index counter
*/
smp_wmb();
*copied = len - ret;
/* return the number of elements which are not copied */
return ret;
}
int __kfifo_from_user(struct __kfifo *fifo, const void __user *from,
unsigned long len, unsigned int *copied)
{
unsigned int l;
unsigned long ret;
unsigned int esize = fifo->esize;
int err;
if (esize != 1)
len /= esize;
l = kfifo_unused(fifo);
if (len > l)
len = l;
ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied);
if (unlikely(ret)) {
len -= ret;
err = -EFAULT;
} else
err = 0;
fifo->in += len;
return err;
}
EXPORT_SYMBOL(__kfifo_from_user);
static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
unsigned int len, unsigned int off, unsigned int *copied)
{
unsigned int l;
unsigned long ret;
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
off &= fifo->mask;
if (esize != 1) {
off *= esize;
size *= esize;
len *= esize;
}
l = min(len, size - off);
ret = copy_to_user(to, fifo->data + off, l);
if (unlikely(ret))
ret = DIV_ROUND_UP(ret + len - l, esize);
else {
ret = copy_to_user(to + l, fifo->data, len - l);
if (unlikely(ret))
ret = DIV_ROUND_UP(ret, esize);
}
/*
* make sure that the data is copied before
* incrementing the fifo->out index counter
*/
smp_wmb();
*copied = len - ret;
/* return the number of elements which are not copied */
return ret;
}
int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
unsigned long len, unsigned int *copied)
{
unsigned int l;
unsigned long ret;
unsigned int esize = fifo->esize;
int err;
if (esize != 1)
len /= esize;
l = fifo->in - fifo->out;
if (len > l)
len = l;
ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied);
if (unlikely(ret)) {
len -= ret;
err = -EFAULT;
} else
err = 0;
fifo->out += len;
return err;
}
EXPORT_SYMBOL(__kfifo_to_user);
static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
int nents, unsigned int len)
{
int n;
unsigned int l;
unsigned int off;
struct page *page;
if (!nents)
return 0;
if (!len)
return 0;
n = 0;
page = virt_to_page(buf);
off = offset_in_page(buf);
l = 0;
while (len >= l + PAGE_SIZE - off) {
struct page *npage;
l += PAGE_SIZE;
buf += PAGE_SIZE;
npage = virt_to_page(buf);
if (page_to_phys(page) != page_to_phys(npage) - l) {
sg_set_page(sgl, page, l - off, off);
sgl = sg_next(sgl);
if (++n == nents || sgl == NULL)
return n;
page = npage;
len -= l - off;
l = off = 0;
}
}
sg_set_page(sgl, page, len, off);
return n + 1;
}
static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
int nents, unsigned int len, unsigned int off)
{
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
unsigned int l;
unsigned int n;
off &= fifo->mask;
if (esize != 1) {
off *= esize;
size *= esize;
len *= esize;
}
l = min(len, size - off);
n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
return n;
}
unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len)
{
unsigned int l;
l = kfifo_unused(fifo);
if (len > l)
len = l;
return setup_sgl(fifo, sgl, nents, len, fifo->in);
}
EXPORT_SYMBOL(__kfifo_dma_in_prepare);
unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len)
{
unsigned int l;
l = fifo->in - fifo->out;
if (len > l)
len = l;
return setup_sgl(fifo, sgl, nents, len, fifo->out);
}
EXPORT_SYMBOL(__kfifo_dma_out_prepare);
unsigned int __kfifo_max_r(unsigned int len, size_t recsize)
{
unsigned int max = (1 << (recsize << 3)) - 1;
if (len > max)
return max;
return len;
}
EXPORT_SYMBOL(__kfifo_max_r);
#define __KFIFO_PEEK(data, out, mask) \
((data)[(out) & (mask)])
/*
* __kfifo_peek_n internal helper function for determinate the length of
* the next record in the fifo
*/
static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
{
unsigned int l;
unsigned int mask = fifo->mask;
unsigned char *data = fifo->data;
l = __KFIFO_PEEK(data, fifo->out, mask);
if (--recsize)
l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8;
return l;
}
#define __KFIFO_POKE(data, in, mask, val) \
( \
(data)[(in) & (mask)] = (unsigned char)(val) \
)
/*
* __kfifo_poke_n internal helper function for storeing the length of
* the record into the fifo
*/
static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
{
unsigned int mask = fifo->mask;
unsigned char *data = fifo->data;
__KFIFO_POKE(data, fifo->in, mask, n);
if (recsize > 1)
__KFIFO_POKE(data, fifo->in + 1, mask, n >> 8);
}
unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize)
{
return __kfifo_peek_n(fifo, recsize);
}
EXPORT_SYMBOL(__kfifo_len_r);
unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf,
unsigned int len, size_t recsize)
{
if (len + recsize > kfifo_unused(fifo))
return 0;
__kfifo_poke_n(fifo, len, recsize);
kfifo_copy_in(fifo, buf, len, fifo->in + recsize);
fifo->in += len + recsize;
return len;
}
EXPORT_SYMBOL(__kfifo_in_r);
static unsigned int kfifo_out_copy_r(struct __kfifo *fifo,
void *buf, unsigned int len, size_t recsize, unsigned int *n)
{
*n = __kfifo_peek_n(fifo, recsize);
if (len > *n)
len = *n;
kfifo_copy_out(fifo, buf, len, fifo->out + recsize);
return len;
}
unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
unsigned int len, size_t recsize)
{
unsigned int n;
if (fifo->in == fifo->out)
return 0;
return kfifo_out_copy_r(fifo, buf, len, recsize, &n);
}
EXPORT_SYMBOL(__kfifo_out_peek_r);
unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
unsigned int len, size_t recsize)
{
unsigned int n;
if (fifo->in == fifo->out)
return 0;
len = kfifo_out_copy_r(fifo, buf, len, recsize, &n);
fifo->out += n + recsize;
return len;
}
EXPORT_SYMBOL(__kfifo_out_r);
void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
{
unsigned int n;
n = __kfifo_peek_n(fifo, recsize);
fifo->out += n + recsize;
}
EXPORT_SYMBOL(__kfifo_skip_r);
int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
unsigned long len, unsigned int *copied, size_t recsize)
{
unsigned long ret;
len = __kfifo_max_r(len, recsize);
if (len + recsize > kfifo_unused(fifo)) {
*copied = 0;
return 0;
}
__kfifo_poke_n(fifo, len, recsize);
ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied);
if (unlikely(ret)) {
*copied = 0;
return -EFAULT;
}
fifo->in += len + recsize;
return 0;
}
EXPORT_SYMBOL(__kfifo_from_user_r);
int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
unsigned long len, unsigned int *copied, size_t recsize)
{
unsigned long ret;
unsigned int n;
if (fifo->in == fifo->out) {
*copied = 0;
return 0;
}
n = __kfifo_peek_n(fifo, recsize);
if (len > n)
len = n;
ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied);
if (unlikely(ret)) {
*copied = 0;
return -EFAULT;
}
fifo->out += n + recsize;
return 0;
}
EXPORT_SYMBOL(__kfifo_to_user_r);
unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
if (!nents)
BUG();
len = __kfifo_max_r(len, recsize);
if (len + recsize > kfifo_unused(fifo))
return 0;
return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
}
EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
unsigned int len, size_t recsize)
{
len = __kfifo_max_r(len, recsize);
__kfifo_poke_n(fifo, len, recsize);
fifo->in += len + recsize;
}
EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
{
if (!nents)
BUG();
len = __kfifo_max_r(len, recsize);
if (len + recsize > fifo->in - fifo->out)
return 0;
return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
}
EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
{
unsigned int len;
len = __kfifo_peek_n(fifo, recsize);
fifo->out += len + recsize;
}
EXPORT_SYMBOL(__kfifo_dma_out_finish_r);

View File

@@ -334,11 +334,10 @@ static inline void reset_kprobe_instance(void)
struct kprobe __kprobes *get_kprobe(void *addr)
{
struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p;
head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
hlist_for_each_entry_rcu(p, node, head, hlist) {
hlist_for_each_entry_rcu(p, head, hlist) {
if (p->addr == addr)
return p;
}
@@ -799,7 +798,6 @@ out:
static void __kprobes optimize_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p;
unsigned int i;
@@ -810,7 +808,7 @@ static void __kprobes optimize_all_kprobes(void)
kprobes_allow_optimization = true;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
hlist_for_each_entry_rcu(p, head, hlist)
if (!kprobe_disabled(p))
optimize_kprobe(p);
}
@@ -821,7 +819,6 @@ static void __kprobes optimize_all_kprobes(void)
static void __kprobes unoptimize_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p;
unsigned int i;
@@ -832,7 +829,7 @@ static void __kprobes unoptimize_all_kprobes(void)
kprobes_allow_optimization = false;
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) {
hlist_for_each_entry_rcu(p, head, hlist) {
if (!kprobe_disabled(p))
unoptimize_kprobe(p, false);
}
@@ -1148,7 +1145,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
{
struct kretprobe_instance *ri;
struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp;
struct hlist_node *tmp;
unsigned long hash, flags = 0;
if (unlikely(!kprobes_initialized))
@@ -1159,12 +1156,12 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
hash = hash_ptr(tk, KPROBE_HASH_BITS);
head = &kretprobe_inst_table[hash];
kretprobe_table_lock(hash, &flags);
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task == tk)
recycle_rp_inst(ri, &empty_rp);
}
kretprobe_table_unlock(hash, &flags);
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
@@ -1173,9 +1170,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
static inline void free_rp_inst(struct kretprobe *rp)
{
struct kretprobe_instance *ri;
struct hlist_node *pos, *next;
struct hlist_node *next;
hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
@@ -1185,14 +1182,14 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
{
unsigned long flags, hash;
struct kretprobe_instance *ri;
struct hlist_node *pos, *next;
struct hlist_node *next;
struct hlist_head *head;
/* No race here */
for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
kretprobe_table_lock(hash, &flags);
head = &kretprobe_inst_table[hash];
hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
hlist_for_each_entry_safe(ri, next, head, hlist) {
if (ri->rp == rp)
ri->rp = NULL;
}
@@ -2028,7 +2025,6 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
{
struct module *mod = data;
struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p;
unsigned int i;
int checkcore = (val == MODULE_STATE_GOING);
@@ -2045,7 +2041,7 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
mutex_lock(&kprobe_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
hlist_for_each_entry_rcu(p, head, hlist)
if (within_module_init((unsigned long)p->addr, mod) ||
(checkcore &&
within_module_core((unsigned long)p->addr, mod))) {
@@ -2192,7 +2188,6 @@ static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
{
struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p, *kp;
const char *sym = NULL;
unsigned int i = *(loff_t *) v;
@@ -2201,7 +2196,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
head = &kprobe_table[i];
preempt_disable();
hlist_for_each_entry_rcu(p, node, head, hlist) {
hlist_for_each_entry_rcu(p, head, hlist) {
sym = kallsyms_lookup((unsigned long)p->addr, NULL,
&offset, &modname, namebuf);
if (kprobe_aggrprobe(p)) {
@@ -2236,7 +2231,6 @@ static const struct file_operations debugfs_kprobes_operations = {
static void __kprobes arm_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p;
unsigned int i;
@@ -2249,7 +2243,7 @@ static void __kprobes arm_all_kprobes(void)
/* Arming kprobes doesn't optimize kprobe itself */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
hlist_for_each_entry_rcu(p, head, hlist)
if (!kprobe_disabled(p))
arm_kprobe(p);
}
@@ -2265,7 +2259,6 @@ already_enabled:
static void __kprobes disarm_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
struct kprobe *p;
unsigned int i;
@@ -2282,7 +2275,7 @@ static void __kprobes disarm_all_kprobes(void)
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) {
hlist_for_each_entry_rcu(p, head, hlist) {
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
disarm_kprobe(p, false);
}

View File

@@ -4088,7 +4088,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
static void print_held_locks_bug(struct task_struct *curr)
static void print_held_locks_bug(void)
{
if (!debug_locks_off())
return;
@@ -4097,22 +4097,21 @@ static void print_held_locks_bug(struct task_struct *curr)
printk("\n");
printk("=====================================\n");
printk("[ BUG: lock held at task exit time! ]\n");
printk("[ BUG: %s/%d still has locks held! ]\n",
current->comm, task_pid_nr(current));
print_kernel_ident();
printk("-------------------------------------\n");
printk("%s/%d is exiting with locks still held!\n",
curr->comm, task_pid_nr(curr));
lockdep_print_held_locks(curr);
lockdep_print_held_locks(current);
printk("\nstack backtrace:\n");
dump_stack();
}
void debug_check_no_locks_held(struct task_struct *task)
void debug_check_no_locks_held(void)
{
if (unlikely(task->lockdep_depth > 0))
print_held_locks_bug(task);
if (unlikely(current->lockdep_depth > 0))
print_held_locks_bug();
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
void debug_show_all_locks(void)
{

View File

@@ -350,10 +350,9 @@ void disable_pid_allocation(struct pid_namespace *ns)
struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
{
struct hlist_node *elem;
struct upid *pnr;
hlist_for_each_entry_rcu(pnr, elem,
hlist_for_each_entry_rcu(pnr,
&pid_hash[pid_hashfn(nr, ns)], pid_chain)
if (pnr->nr == nr && pnr->ns == ns)
return container_of(pnr, struct pid,

View File

@@ -552,24 +552,22 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
return -EAGAIN;
spin_lock_init(&new_timer->it_lock);
retry:
if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) {
error = -EAGAIN;
goto out;
}
idr_preload(GFP_KERNEL);
spin_lock_irq(&idr_lock);
error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
error = idr_alloc(&posix_timers_id, new_timer, 0, 0, GFP_NOWAIT);
spin_unlock_irq(&idr_lock);
if (error) {
if (error == -EAGAIN)
goto retry;
idr_preload_end();
if (error < 0) {
/*
* Weird looking, but we return EAGAIN if the IDR is
* full (proper POSIX return value for this)
*/
error = -EAGAIN;
if (error == -ENOSPC)
error = -EAGAIN;
goto out;
}
new_timer_id = error;
it_id_set = IT_ID_SET;
new_timer->it_id = (timer_t) new_timer_id;

View File

@@ -1752,9 +1752,8 @@ EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
{
struct preempt_notifier *notifier;
struct hlist_node *node;
hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
notifier->ops->sched_in(notifier, raw_smp_processor_id());
}
@@ -1763,9 +1762,8 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
struct task_struct *next)
{
struct preempt_notifier *notifier;
struct hlist_node *node;
hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
notifier->ops->sched_out(notifier, next);
}

View File

@@ -1157,11 +1157,11 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
static void print_fatal_signal(int signr)
{
struct pt_regs *regs = signal_pt_regs();
printk("%s/%d: potentially unexpected fatal signal %d.\n",
printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n",
current->comm, task_pid_nr(current), signr);
#if defined(__i386__) && !defined(__arch_um__)
printk("code at %08lx: ", regs->ip);
printk(KERN_INFO "code at %08lx: ", regs->ip);
{
int i;
for (i = 0; i < 16; i++) {
@@ -1169,11 +1169,11 @@ static void print_fatal_signal(int signr)
if (get_user(insn, (unsigned char *)(regs->ip + i)))
break;
printk("%02x ", insn);
printk(KERN_CONT "%02x ", insn);
}
}
printk(KERN_CONT "\n");
#endif
printk("\n");
preempt_disable();
show_regs(regs);
preempt_enable();
@@ -2996,7 +2996,8 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
if (info->si_code >= 0 || info->si_code == SI_TKILL) {
if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
(task_pid_vnr(current) != pid)) {
/* We used to allow any < 0 si_code */
WARN_ON_ONCE(info->si_code < 0);
return -EPERM;
@@ -3045,7 +3046,8 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
/* Not even root can pretend to send signals from the kernel.
* Nor can they impersonate a kill()/tgkill(), which adds source info.
*/
if (info->si_code >= 0 || info->si_code == SI_TKILL) {
if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
(task_pid_vnr(current) != pid)) {
/* We used to allow any < 0 si_code */
WARN_ON_ONCE(info->si_code < 0);
return -EPERM;

View File

@@ -131,7 +131,7 @@ static int smpboot_thread_fn(void *data)
continue;
}
BUG_ON(td->cpu != smp_processor_id());
//BUG_ON(td->cpu != smp_processor_id());
/* Check for state change setup */
switch (td->status) {

View File

@@ -2185,11 +2185,6 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
static void argv_cleanup(struct subprocess_info *info)
{
argv_free(info->argv);
}
static int __orderly_poweroff(void)
{
int argc;
@@ -2209,9 +2204,8 @@ static int __orderly_poweroff(void)
}
ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC,
NULL, argv_cleanup, NULL);
if (ret == -ENOMEM)
argv_free(argv);
NULL, NULL, NULL);
argv_free(argv);
return ret;
}

View File

@@ -2095,7 +2095,7 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
static void validate_coredump_safety(void)
{
#ifdef CONFIG_COREDUMP
if (suid_dumpable == SUID_DUMPABLE_SAFE &&
if (suid_dumpable == SUID_DUMP_ROOT &&
core_pattern[0] != '/' && core_pattern[0] != '|') {
printk(KERN_WARNING "Unsafe core_pattern used with "\
"suid_dumpable=2. Pipe handler or fully qualified "\

View File

@@ -1171,9 +1171,10 @@ static ssize_t bin_dn_node_address(struct file *file,
/* Convert the decnet address to binary */
result = -EIO;
nodep = strchr(buf, '.') + 1;
nodep = strchr(buf, '.');
if (!nodep)
goto out;
++nodep;
area = simple_strtoul(buf, NULL, 10);
node = simple_strtoul(nodep, NULL, 10);

View File

@@ -762,7 +762,6 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
{
struct ftrace_profile *rec;
struct hlist_head *hhd;
struct hlist_node *n;
unsigned long key;
key = hash_long(ip, ftrace_profile_bits);
@@ -771,7 +770,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
if (hlist_empty(hhd))
return NULL;
hlist_for_each_entry_rcu(rec, n, hhd, node) {
hlist_for_each_entry_rcu(rec, hhd, node) {
if (rec->ip == ip)
return rec;
}
@@ -1133,7 +1132,6 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
unsigned long key;
struct ftrace_func_entry *entry;
struct hlist_head *hhd;
struct hlist_node *n;
if (ftrace_hash_empty(hash))
return NULL;
@@ -1145,7 +1143,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
hhd = &hash->buckets[key];
hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
hlist_for_each_entry_rcu(entry, hhd, hlist) {
if (entry->ip == ip)
return entry;
}
@@ -1202,7 +1200,7 @@ remove_hash_entry(struct ftrace_hash *hash,
static void ftrace_hash_clear(struct ftrace_hash *hash)
{
struct hlist_head *hhd;
struct hlist_node *tp, *tn;
struct hlist_node *tn;
struct ftrace_func_entry *entry;
int size = 1 << hash->size_bits;
int i;
@@ -1212,7 +1210,7 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
for (i = 0; i < size; i++) {
hhd = &hash->buckets[i];
hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
hlist_for_each_entry_safe(entry, tn, hhd, hlist)
free_hash_entry(hash, entry);
}
FTRACE_WARN_ON(hash->count);
@@ -1275,7 +1273,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
{
struct ftrace_func_entry *entry;
struct ftrace_hash *new_hash;
struct hlist_node *tp;
int size;
int ret;
int i;
@@ -1290,7 +1287,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
size = 1 << hash->size_bits;
for (i = 0; i < size; i++) {
hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
ret = add_hash_entry(new_hash, entry->ip);
if (ret < 0)
goto free_hash;
@@ -1316,7 +1313,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
struct ftrace_hash **dst, struct ftrace_hash *src)
{
struct ftrace_func_entry *entry;
struct hlist_node *tp, *tn;
struct hlist_node *tn;
struct hlist_head *hhd;
struct ftrace_hash *old_hash;
struct ftrace_hash *new_hash;
@@ -1362,7 +1359,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
size = 1 << src->size_bits;
for (i = 0; i < size; i++) {
hhd = &src->buckets[i];
hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
if (bits > 0)
key = hash_long(entry->ip, bits);
else
@@ -2901,7 +2898,6 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
{
struct ftrace_func_probe *entry;
struct hlist_head *hhd;
struct hlist_node *n;
unsigned long key;
key = hash_long(ip, FTRACE_HASH_BITS);
@@ -2917,7 +2913,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
* on the hash. rcu_read_lock is too dangerous here.
*/
preempt_disable_notrace();
hlist_for_each_entry_rcu(entry, n, hhd, node) {
hlist_for_each_entry_rcu(entry, hhd, node) {
if (entry->ip == ip)
entry->ops->func(ip, parent_ip, &entry->data);
}
@@ -3068,7 +3064,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data, int flags)
{
struct ftrace_func_probe *entry;
struct hlist_node *n, *tmp;
struct hlist_node *tmp;
char str[KSYM_SYMBOL_LEN];
int type = MATCH_FULL;
int i, len = 0;
@@ -3091,7 +3087,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
struct hlist_head *hhd = &ftrace_func_hash[i];
hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
hlist_for_each_entry_safe(entry, tmp, hhd, node) {
/* break up if statements for readability */
if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)

View File

@@ -739,12 +739,11 @@ static int task_state_char(unsigned long state)
struct trace_event *ftrace_find_event(int type)
{
struct trace_event *event;
struct hlist_node *n;
unsigned key;
key = type & (EVENT_HASHSIZE - 1);
hlist_for_each_entry(event, n, &event_hash[key], node) {
hlist_for_each_entry(event, &event_hash[key], node) {
if (event->type == type)
return event;
}

View File

@@ -192,12 +192,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
static struct tracepoint_entry *get_tracepoint(const char *name)
{
struct hlist_head *head;
struct hlist_node *node;
struct tracepoint_entry *e;
u32 hash = jhash(name, strlen(name), 0);
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
hlist_for_each_entry(e, node, head, hlist) {
hlist_for_each_entry(e, head, hlist) {
if (!strcmp(name, e->name))
return e;
}
@@ -211,13 +210,12 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
static struct tracepoint_entry *add_tracepoint(const char *name)
{
struct hlist_head *head;
struct hlist_node *node;
struct tracepoint_entry *e;
size_t name_len = strlen(name) + 1;
u32 hash = jhash(name, name_len-1, 0);
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
hlist_for_each_entry(e, node, head, hlist) {
hlist_for_each_entry(e, head, hlist) {
if (!strcmp(name, e->name)) {
printk(KERN_NOTICE
"tracepoint %s busy\n", name);

View File

@@ -34,11 +34,11 @@ EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
void fire_user_return_notifiers(void)
{
struct user_return_notifier *urn;
struct hlist_node *tmp1, *tmp2;
struct hlist_node *tmp2;
struct hlist_head *head;
head = &get_cpu_var(return_notifier_list);
hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link)
hlist_for_each_entry_safe(urn, tmp2, head, link)
urn->on_user_return(urn);
put_cpu_var(return_notifier_list);
}

View File

@@ -105,9 +105,8 @@ static void uid_hash_remove(struct user_struct *up)
static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
struct hlist_node *h;
hlist_for_each_entry(user, h, hashent, uidhash_node) {
hlist_for_each_entry(user, hashent, uidhash_node) {
if (uid_eq(user->uid, uid)) {
atomic_inc(&user->__count);
return user;

View File

@@ -30,7 +30,7 @@ static struct uts_namespace *create_uts_ns(void)
/*
* Clone a new ns copying an original utsname, setting refcount to 1
* @old_ns: namespace to clone
* Return NULL on error (failure to kmalloc), new ns otherwise
* Return ERR_PTR(-ENOMEM) on error (failure to kmalloc), new ns otherwise
*/
static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
struct uts_namespace *old_ns)

View File

@@ -15,6 +15,8 @@
#include <linux/sysctl.h>
#include <linux/wait.h>
#ifdef CONFIG_PROC_SYSCTL
static void *get_uts(ctl_table *table, int write)
{
char *which = table->data;
@@ -38,7 +40,6 @@ static void put_uts(ctl_table *table, int write, void *which)
up_write(&uts_sem);
}
#ifdef CONFIG_PROC_SYSCTL
/*
* Special case of dostring for the UTS structure. This has locks
* to observe. Should this be in kernel/sys.c ????

View File

@@ -251,8 +251,8 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
for ((pool) = &std_worker_pools(cpu)[0]; \
(pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
#define for_each_busy_worker(worker, i, pos, pool) \
hash_for_each(pool->busy_hash, i, pos, worker, hentry)
#define for_each_busy_worker(worker, i, pool) \
hash_for_each(pool->busy_hash, i, worker, hentry)
static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
unsigned int sw)
@@ -909,9 +909,8 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
struct work_struct *work)
{
struct worker *worker;
struct hlist_node *tmp;
hash_for_each_possible(pool->busy_hash, worker, tmp, hentry,
hash_for_each_possible(pool->busy_hash, worker, hentry,
(unsigned long)work)
if (worker->current_work == work &&
worker->current_func == work->func)
@@ -1626,7 +1625,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
static void rebind_workers(struct worker_pool *pool)
{
struct worker *worker, *n;
struct hlist_node *pos;
int i;
lockdep_assert_held(&pool->assoc_mutex);
@@ -1648,7 +1646,7 @@ static void rebind_workers(struct worker_pool *pool)
}
/* rebind busy workers */
for_each_busy_worker(worker, i, pos, pool) {
for_each_busy_worker(worker, i, pool) {
struct work_struct *rebind_work = &worker->rebind_work;
struct workqueue_struct *wq;
@@ -3423,7 +3421,6 @@ static void wq_unbind_fn(struct work_struct *work)
int cpu = smp_processor_id();
struct worker_pool *pool;
struct worker *worker;
struct hlist_node *pos;
int i;
for_each_std_worker_pool(pool, cpu) {
@@ -3442,7 +3439,7 @@ static void wq_unbind_fn(struct work_struct *work)
list_for_each_entry(worker, &pool->idle_list, entry)
worker->flags |= WORKER_UNBOUND;
for_each_busy_worker(worker, i, pos, pool)
for_each_busy_worker(worker, i, pool)
worker->flags |= WORKER_UNBOUND;
pool->flags |= POOL_DISASSOCIATED;