Merge tag 'uninit-macro-v5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
Pull uninitialized_var() macro removal from Kees Cook: "This is long overdue, and has hidden too many bugs over the years. The series has several "by hand" fixes, and then a trivial treewide replacement. - Clean up non-trivial uses of uninitialized_var() - Update documentation and checkpatch for uninitialized_var() removal - Treewide removal of uninitialized_var()" * tag 'uninit-macro-v5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux: compiler: Remove uninitialized_var() macro treewide: Remove uninitialized_var() usage checkpatch: Remove awareness of uninitialized_var() macro mm/debug_vm_pgtable: Remove uninitialized_var() usage f2fs: Eliminate usage of uninitialized_var() macro media: sur40: Remove uninitialized_var() usage KVM: PPC: Book3S PR: Remove uninitialized_var() usage clk: spear: Remove uninitialized_var() usage clk: st: Remove uninitialized_var() usage spi: davinci: Remove uninitialized_var() usage ide: Remove uninitialized_var() usage rtlwifi: rtl8192cu: Remove uninitialized_var() usage b43: Remove uninitialized_var() usage drbd: Remove uninitialized_var() usage x86/mm/numa: Remove uninitialized_var() usage docs: deprecated.rst: Add uninitialized_var()
This commit is contained in:
@@ -111,7 +111,7 @@ static void async_run_entry_fn(struct work_struct *work)
|
||||
struct async_entry *entry =
|
||||
container_of(work, struct async_entry, work);
|
||||
unsigned long flags;
|
||||
ktime_t uninitialized_var(calltime), delta, rettime;
|
||||
ktime_t calltime, delta, rettime;
|
||||
|
||||
/* 1) run (and print duration) */
|
||||
if (initcall_debug && system_state < SYSTEM_RUNNING) {
|
||||
@@ -287,7 +287,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
|
||||
*/
|
||||
void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain)
|
||||
{
|
||||
ktime_t uninitialized_var(starttime), delta, endtime;
|
||||
ktime_t starttime, delta, endtime;
|
||||
|
||||
if (initcall_debug && system_state < SYSTEM_RUNNING) {
|
||||
pr_debug("async_waiting @ %i\n", task_pid_nr(current));
|
||||
|
@@ -1800,7 +1800,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
|
||||
{
|
||||
struct audit_buffer *ab;
|
||||
struct timespec64 t;
|
||||
unsigned int uninitialized_var(serial);
|
||||
unsigned int serial;
|
||||
|
||||
if (audit_initialized != AUDIT_INITIALIZED)
|
||||
return NULL;
|
||||
|
@@ -591,7 +591,7 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
|
||||
int this_cpu, old_cpu;
|
||||
char *cp, *cp2, *cphold = NULL, replaced_byte = ' ';
|
||||
char *moreprompt = "more> ";
|
||||
unsigned long uninitialized_var(flags);
|
||||
unsigned long flags;
|
||||
|
||||
/* Serialize kdb_printf if multiple cpus try to write at once.
|
||||
* But if any cpu goes recursive in kdb, just print the output,
|
||||
|
@@ -882,7 +882,7 @@ static int device_dma_allocations(struct device *dev, struct dma_debug_entry **o
|
||||
static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
struct dma_debug_entry *uninitialized_var(entry);
|
||||
struct dma_debug_entry *entry;
|
||||
int count;
|
||||
|
||||
if (dma_debug_disabled())
|
||||
|
@@ -11585,7 +11585,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
struct perf_event *group_leader = NULL, *output_event = NULL;
|
||||
struct perf_event *event, *sibling;
|
||||
struct perf_event_attr attr;
|
||||
struct perf_event_context *ctx, *uninitialized_var(gctx);
|
||||
struct perf_event_context *ctx, *gctx;
|
||||
struct file *event_file = NULL;
|
||||
struct fd group = {NULL, 0};
|
||||
struct task_struct *task = NULL;
|
||||
|
@@ -2189,7 +2189,7 @@ static void handle_swbp(struct pt_regs *regs)
|
||||
{
|
||||
struct uprobe *uprobe;
|
||||
unsigned long bp_vaddr;
|
||||
int uninitialized_var(is_swbp);
|
||||
int is_swbp;
|
||||
|
||||
bp_vaddr = uprobe_get_swbp_addr(regs);
|
||||
if (bp_vaddr == get_trampoline_vaddr())
|
||||
|
@@ -93,7 +93,7 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
struct signal_struct *sig = tsk->signal;
|
||||
bool group_dead = thread_group_leader(tsk);
|
||||
struct sighand_struct *sighand;
|
||||
struct tty_struct *uninitialized_var(tty);
|
||||
struct tty_struct *tty;
|
||||
u64 utime, stime;
|
||||
|
||||
sighand = rcu_dereference_check(tsk->sighand,
|
||||
|
@@ -1305,7 +1305,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
|
||||
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
|
||||
{
|
||||
int err;
|
||||
u32 uninitialized_var(curval);
|
||||
u32 curval;
|
||||
|
||||
if (unlikely(should_fail_futex(true)))
|
||||
return -EFAULT;
|
||||
@@ -1475,7 +1475,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
|
||||
*/
|
||||
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
|
||||
{
|
||||
u32 uninitialized_var(curval), newval;
|
||||
u32 curval, newval;
|
||||
struct task_struct *new_owner;
|
||||
bool postunlock = false;
|
||||
DEFINE_WAKE_Q(wake_q);
|
||||
@@ -2325,7 +2325,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
|
||||
struct task_struct *argowner)
|
||||
{
|
||||
struct futex_pi_state *pi_state = q->pi_state;
|
||||
u32 uval, uninitialized_var(curval), newval;
|
||||
u32 uval, curval, newval;
|
||||
struct task_struct *oldowner, *newowner;
|
||||
u32 newtid;
|
||||
int ret, err = 0;
|
||||
@@ -2942,7 +2942,7 @@ uaddr_faulted:
|
||||
*/
|
||||
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
|
||||
{
|
||||
u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
|
||||
u32 curval, uval, vpid = task_pid_vnr(current);
|
||||
union futex_key key = FUTEX_KEY_INIT;
|
||||
struct futex_hash_bucket *hb;
|
||||
struct futex_q *top_waiter;
|
||||
@@ -3417,7 +3417,7 @@ err_unlock:
|
||||
static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
|
||||
bool pi, bool pending_op)
|
||||
{
|
||||
u32 uval, uninitialized_var(nval), mval;
|
||||
u32 uval, nval, mval;
|
||||
int err;
|
||||
|
||||
/* Futex address must be 32bit aligned */
|
||||
@@ -3547,7 +3547,7 @@ static void exit_robust_list(struct task_struct *curr)
|
||||
struct robust_list_head __user *head = curr->robust_list;
|
||||
struct robust_list __user *entry, *next_entry, *pending;
|
||||
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
|
||||
unsigned int uninitialized_var(next_pi);
|
||||
unsigned int next_pi;
|
||||
unsigned long futex_offset;
|
||||
int rc;
|
||||
|
||||
@@ -3847,7 +3847,7 @@ static void compat_exit_robust_list(struct task_struct *curr)
|
||||
struct compat_robust_list_head __user *head = curr->compat_robust_list;
|
||||
struct robust_list __user *entry, *next_entry, *pending;
|
||||
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
|
||||
unsigned int uninitialized_var(next_pi);
|
||||
unsigned int next_pi;
|
||||
compat_uptr_t uentry, next_uentry, upending;
|
||||
compat_long_t futex_offset;
|
||||
int rc;
|
||||
|
@@ -1723,7 +1723,7 @@ static int noop_count(struct lock_list *entry, void *data)
|
||||
static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
|
||||
{
|
||||
unsigned long count = 0;
|
||||
struct lock_list *uninitialized_var(target_entry);
|
||||
struct lock_list *target_entry;
|
||||
|
||||
__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
|
||||
|
||||
@@ -1749,7 +1749,7 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
|
||||
static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
|
||||
{
|
||||
unsigned long count = 0;
|
||||
struct lock_list *uninitialized_var(target_entry);
|
||||
struct lock_list *target_entry;
|
||||
|
||||
__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
|
||||
|
||||
@@ -1804,7 +1804,7 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
|
||||
struct lock_trace **const trace)
|
||||
{
|
||||
int ret;
|
||||
struct lock_list *uninitialized_var(target_entry);
|
||||
struct lock_list *target_entry;
|
||||
struct lock_list src_entry = {
|
||||
.class = hlock_class(src),
|
||||
.parent = NULL,
|
||||
@@ -1842,7 +1842,7 @@ static noinline int
|
||||
check_redundant(struct held_lock *src, struct held_lock *target)
|
||||
{
|
||||
int ret;
|
||||
struct lock_list *uninitialized_var(target_entry);
|
||||
struct lock_list *target_entry;
|
||||
struct lock_list src_entry = {
|
||||
.class = hlock_class(src),
|
||||
.parent = NULL,
|
||||
@@ -2244,8 +2244,8 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
|
||||
{
|
||||
unsigned long usage_mask = 0, forward_mask, backward_mask;
|
||||
enum lock_usage_bit forward_bit = 0, backward_bit = 0;
|
||||
struct lock_list *uninitialized_var(target_entry1);
|
||||
struct lock_list *uninitialized_var(target_entry);
|
||||
struct lock_list *target_entry1;
|
||||
struct lock_list *target_entry;
|
||||
struct lock_list this, that;
|
||||
int ret;
|
||||
|
||||
@@ -3438,7 +3438,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
|
||||
{
|
||||
int ret;
|
||||
struct lock_list root;
|
||||
struct lock_list *uninitialized_var(target_entry);
|
||||
struct lock_list *target_entry;
|
||||
|
||||
root.parent = NULL;
|
||||
root.class = hlock_class(this);
|
||||
@@ -3465,7 +3465,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
|
||||
{
|
||||
int ret;
|
||||
struct lock_list root;
|
||||
struct lock_list *uninitialized_var(target_entry);
|
||||
struct lock_list *target_entry;
|
||||
|
||||
root.parent = NULL;
|
||||
root.class = hlock_class(this);
|
||||
|
@@ -577,7 +577,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
|
||||
*/
|
||||
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
|
||||
{
|
||||
struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
DEFINE_WAIT(wait);
|
||||
struct rb_irq_work *work;
|
||||
int ret = 0;
|
||||
|
Reference in New Issue
Block a user