Merge 5.10.141 into android12-5.10-lts

Changes in 5.10.141
	mm: Force TLB flush for PFNMAP mappings before unlink_file_vma()
	x86/nospec: Unwreck the RSB stuffing
	x86/nospec: Fix i386 RSB stuffing
	crypto: lib - remove unneeded selection of XOR_BLOCKS
	s390/mm: do not trigger write fault when vma does not allow VM_WRITE
	kbuild: Fix include path in scripts/Makefile.modpost
	Bluetooth: L2CAP: Fix build errors in some archs
	Revert "PCI/portdrv: Don't disable AER reporting in get_port_device_capability()"
	HID: steam: Prevent NULL pointer dereference in steam_{recv,send}_report
	udmabuf: Set the DMA mask for the udmabuf device (v2)
	media: pvrusb2: fix memory leak in pvr_probe
	HID: hidraw: fix memory leak in hidraw_release()
	net: fix refcount bug in sk_psock_get (2)
	fbdev: fb_pm2fb: Avoid potential divide by zero error
	ftrace: Fix NULL pointer dereference in is_ftrace_trampoline when ftrace is dead
	bpf: Don't redirect packets with invalid pkt_len
	mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse
	mmc: mtk-sd: Clear interrupts when cqe off/disable
	drm/amd/display: Avoid MPC infinite loop
	drm/amd/display: For stereo keep "FLIP_ANY_FRAME"
	drm/amd/display: clear optc underflow before turn off odm clock
	neigh: fix possible DoS due to net iface start/stop loop
	s390/hypfs: avoid error message under KVM
	drm/amd/pm: add missing ->fini_microcode interface for Sienna Cichlid
	drm/amd/display: Fix pixel clock programming
	drm/amdgpu: Increase tlb flush timeout for sriov
	netfilter: conntrack: NF_CONNTRACK_PROCFS should no longer default to y
	lib/vdso: Mark do_hres_timens() and do_coarse_timens() __always_inline()
	kprobes: don't call disarm_kprobe() for disabled kprobes
	io_uring: disable polling pollfree files
	xfs: remove infinite loop when reserving free block pool
	xfs: always succeed at setting the reserve pool size
	xfs: fix overfilling of reserve pool
	xfs: fix soft lockup via spinning in filestream ag selection loop
	xfs: revert "xfs: actually bump warning counts when we send warnings"
	net/af_packet: check len when min_header_len equals to 0
	net: neigh: don't call kfree_skb() under spin_lock_irqsave()
	Linux 5.10.141

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I8b6a1e0bd31df051b90433857f126c183771d367
This commit is contained in:
Greg Kroah-Hartman
2022-09-07 09:44:58 +02:00
47 changed files with 324 additions and 152 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 140 SUBLEVEL = 141
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
int rc; int rc;
if (diag204_probe()) { if (diag204_probe()) {
pr_err("The hardware system does not support hypfs\n"); pr_info("The hardware system does not support hypfs\n");
return -ENODATA; return -ENODATA;
} }

View File

@@ -496,9 +496,9 @@ fail_hypfs_sprp_exit:
hypfs_vm_exit(); hypfs_vm_exit();
fail_hypfs_diag_exit: fail_hypfs_diag_exit:
hypfs_diag_exit(); hypfs_diag_exit();
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
fail_dbfs_exit: fail_dbfs_exit:
hypfs_dbfs_exit(); hypfs_dbfs_exit();
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
return rc; return rc;
} }
device_initcall(hypfs_init) device_initcall(hypfs_init)

View File

@@ -429,7 +429,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
flags = FAULT_FLAG_DEFAULT; flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs)) if (user_mode(regs))
flags |= FAULT_FLAG_USER; flags |= FAULT_FLAG_USER;
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) if ((trans_exc_code & store_indication) == 0x400)
access = VM_WRITE;
if (access == VM_WRITE)
flags |= FAULT_FLAG_WRITE; flags |= FAULT_FLAG_WRITE;
mmap_read_lock(mm); mmap_read_lock(mm);

View File

@@ -35,33 +35,56 @@
#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
/* /*
* Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
*/
#define __FILL_RETURN_SLOT \
ANNOTATE_INTRA_FUNCTION_CALL; \
call 772f; \
int3; \
772:
/*
* Stuff the entire RSB.
*
* Google experimented with loop-unrolling and this turned out to be * Google experimented with loop-unrolling and this turned out to be
* the optimal version — two calls, each with their own speculation * the optimal version — two calls, each with their own speculation
* trap should their return address end up getting used, in a loop. * trap should their return address end up getting used, in a loop.
*/ */
#define __FILL_RETURN_BUFFER(reg, nr, sp) \ #ifdef CONFIG_X86_64
mov $(nr/2), reg; \ #define __FILL_RETURN_BUFFER(reg, nr) \
771: \ mov $(nr/2), reg; \
ANNOTATE_INTRA_FUNCTION_CALL; \ 771: \
call 772f; \ __FILL_RETURN_SLOT \
773: /* speculation trap */ \ __FILL_RETURN_SLOT \
UNWIND_HINT_EMPTY; \ add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
pause; \ dec reg; \
lfence; \ jnz 771b; \
jmp 773b; \ /* barrier for jnz misprediction */ \
772: \ lfence;
ANNOTATE_INTRA_FUNCTION_CALL; \ #else
call 774f; \ /*
775: /* speculation trap */ \ * i386 doesn't unconditionally have LFENCE, as such it can't
UNWIND_HINT_EMPTY; \ * do a loop.
pause; \ */
lfence; \ #define __FILL_RETURN_BUFFER(reg, nr) \
jmp 775b; \ .rept nr; \
774: \ __FILL_RETURN_SLOT; \
add $(BITS_PER_LONG/8) * 2, sp; \ .endr; \
dec reg; \ add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
jnz 771b; \ #endif
/* barrier for jnz misprediction */ \
/*
* Stuff a single RSB slot.
*
* To mitigate Post-Barrier RSB speculation, one CALL instruction must be
* forced to retire before letting a RET instruction execute.
*
* On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
* before this point.
*/
#define __FILL_ONE_RETURN \
__FILL_RETURN_SLOT \
add $(BITS_PER_LONG/8), %_ASM_SP; \
lfence; lfence;
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
@@ -120,28 +143,15 @@
#endif #endif
.endm .endm
.macro ISSUE_UNBALANCED_RET_GUARD
ANNOTATE_INTRA_FUNCTION_CALL
call .Lunbalanced_ret_guard_\@
int3
.Lunbalanced_ret_guard_\@:
add $(BITS_PER_LONG/8), %_ASM_SP
lfence
.endm
/* /*
* A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
* monstrosity above, manually. * monstrosity above, manually.
*/ */
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
.ifb \ftr2 ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
.else __stringify(__FILL_ONE_RETURN), \ftr2
ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
.endif
__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
.Lunbalanced_\@:
ISSUE_UNBALANCED_RET_GUARD
.Lskip_rsb_\@: .Lskip_rsb_\@:
.endm .endm

View File

@@ -6197,6 +6197,7 @@ const struct file_operations binder_fops = {
.open = binder_open, .open = binder_open,
.flush = binder_flush, .flush = binder_flush,
.release = binder_release, .release = binder_release,
.may_pollfree = true,
}; };
DEFINE_SHOW_ATTRIBUTE(state); DEFINE_SHOW_ATTRIBUTE(state);

View File

@@ -327,7 +327,23 @@ static struct miscdevice udmabuf_misc = {
static int __init udmabuf_dev_init(void) static int __init udmabuf_dev_init(void)
{ {
return misc_register(&udmabuf_misc); int ret;
ret = misc_register(&udmabuf_misc);
if (ret < 0) {
pr_err("Could not initialize udmabuf device\n");
return ret;
}
ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
DMA_BIT_MASK(64));
if (ret < 0) {
pr_err("Could not setup DMA mask for udmabuf device\n");
misc_deregister(&udmabuf_misc);
return ret;
}
return 0;
} }
static void __exit udmabuf_dev_exit(void) static void __exit udmabuf_dev_exit(void)

View File

@@ -283,7 +283,7 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST AMDGPU_CP_KIQ_IRQ_LAST
}; };
#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ #define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */

View File

@@ -371,6 +371,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq; uint32_t seq;
uint16_t queried_pasid; uint16_t queried_pasid;
bool ret; bool ret;
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring; struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -389,7 +390,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock); spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) { if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
return -ETIME; return -ETIME;

View File

@@ -839,6 +839,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint32_t seq; uint32_t seq;
uint16_t queried_pasid; uint16_t queried_pasid;
bool ret; bool ret;
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring; struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq; struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -878,7 +879,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock); spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
if (r < 1) { if (r < 1) {
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r); dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
up_read(&adev->reset_sem); up_read(&adev->reset_sem);

View File

@@ -546,9 +546,11 @@ static void dce112_get_pix_clk_dividers_helper (
switch (pix_clk_params->color_depth) { switch (pix_clk_params->color_depth) {
case COLOR_DEPTH_101010: case COLOR_DEPTH_101010:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2; actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break; break;
case COLOR_DEPTH_121212: case COLOR_DEPTH_121212:
actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2; actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
break; break;
case COLOR_DEPTH_161616: case COLOR_DEPTH_161616:
actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2; actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;

View File

@@ -125,6 +125,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) { while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == dpp_id) if (tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc; return tmp_mpcc;
/* avoid circular linked list */
ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
if (tmp_mpcc == tmp_mpcc->mpcc_bot)
break;
tmp_mpcc = tmp_mpcc->mpcc_bot; tmp_mpcc = tmp_mpcc->mpcc_bot;
} }
return NULL; return NULL;

View File

@@ -464,6 +464,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
OTG_CLOCK_ON, 1, OTG_CLOCK_ON, 1,
1, 1000); 1, 1000);
} else { } else {
//last chance to clear underflow, otherwise, it will always there due to clock is off.
if (optc->funcs->is_optc_underflow_occurred(optc) == true)
optc->funcs->clear_optc_underflow(optc);
REG_UPDATE_2(OTG_CLOCK_CONTROL, REG_UPDATE_2(OTG_CLOCK_CONTROL,
OTG_CLOCK_GATE_DIS, 0, OTG_CLOCK_GATE_DIS, 0,
OTG_CLOCK_EN, 0); OTG_CLOCK_EN, 0);

View File

@@ -533,6 +533,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
while (tmp_mpcc != NULL) { while (tmp_mpcc != NULL) {
if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id) if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
return tmp_mpcc; return tmp_mpcc;
/* avoid circular linked list */
ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
if (tmp_mpcc == tmp_mpcc->mpcc_bot)
break;
tmp_mpcc = tmp_mpcc->mpcc_bot; tmp_mpcc = tmp_mpcc->mpcc_bot;
} }
return NULL; return NULL;

View File

@@ -86,7 +86,7 @@ bool hubp3_program_surface_flip_and_addr(
VMID, address->vmid); VMID, address->vmid);
if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) { if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) {
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1); REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0);
REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1); REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1);
} else { } else {

View File

@@ -2759,6 +2759,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.dump_pptable = sienna_cichlid_dump_pptable, .dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode, .init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode, .load_microcode = smu_v11_0_load_microcode,
.fini_microcode = smu_v11_0_fini_microcode,
.init_smc_tables = sienna_cichlid_init_smc_tables, .init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables, .fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power, .init_power = smu_v11_0_init_power,

View File

@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
int ret; int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0]; r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
if (!r) {
hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
return -EINVAL;
}
if (hid_report_len(r) < 64) if (hid_report_len(r) < 64)
return -EINVAL; return -EINVAL;
@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
int ret; int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0]; r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
if (!r) {
hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
return -EINVAL;
}
if (hid_report_len(r) < 64) if (hid_report_len(r) < 64)
return -EINVAL; return -EINVAL;

View File

@@ -346,10 +346,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
unsigned int minor = iminor(inode); unsigned int minor = iminor(inode);
struct hidraw_list *list = file->private_data; struct hidraw_list *list = file->private_data;
unsigned long flags; unsigned long flags;
int i;
mutex_lock(&minors_lock); mutex_lock(&minors_lock);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags); spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
for (i = list->tail; i < list->head; i++)
kfree(list->buffer[i].value);
list_del(&list->node); list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list); kfree(list);

View File

@@ -2610,6 +2610,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
del_timer_sync(&hdw->encoder_run_timer); del_timer_sync(&hdw->encoder_run_timer);
del_timer_sync(&hdw->encoder_wait_timer); del_timer_sync(&hdw->encoder_wait_timer);
flush_work(&hdw->workpoll); flush_work(&hdw->workpoll);
v4l2_device_unregister(&hdw->v4l2_dev);
usb_free_urb(hdw->ctl_read_urb); usb_free_urb(hdw->ctl_read_urb);
usb_free_urb(hdw->ctl_write_urb); usb_free_urb(hdw->ctl_write_urb);
kfree(hdw->ctl_read_buffer); kfree(hdw->ctl_read_buffer);

View File

@@ -2293,6 +2293,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
/* disable busy check */ /* disable busy check */
sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL); sdr_clr_bits(host->base + MSDC_PATCH_BIT1, MSDC_PB1_BUSY_CHECK_SEL);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
if (recovery) { if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL, sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1); MSDC_DMA_CTRL_STOP, 1);
@@ -2693,11 +2696,14 @@ static int __maybe_unused msdc_suspend(struct device *dev)
{ {
struct mmc_host *mmc = dev_get_drvdata(dev); struct mmc_host *mmc = dev_get_drvdata(dev);
int ret; int ret;
u32 val;
if (mmc->caps2 & MMC_CAP2_CQE) { if (mmc->caps2 & MMC_CAP2_CQE) {
ret = cqhci_suspend(mmc); ret = cqhci_suspend(mmc);
if (ret) if (ret)
return ret; return ret;
val = readl(((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT);
writel(val, ((struct msdc_host *)mmc_priv(mmc))->base + MSDC_INT);
} }
return pm_runtime_force_suspend(dev); return pm_runtime_force_suspend(dev);

View File

@@ -222,8 +222,15 @@ static int get_port_device_capability(struct pci_dev *dev)
#ifdef CONFIG_PCIEAER #ifdef CONFIG_PCIEAER
if (dev->aer_cap && pci_aer_available() && if (dev->aer_cap && pci_aer_available() &&
(pcie_ports_native || host->native_aer)) (pcie_ports_native || host->native_aer)) {
services |= PCIE_PORT_SERVICE_AER; services |= PCIE_PORT_SERVICE_AER;
/*
* Disable AER on this port in case it's been enabled by the
* BIOS (the AER service driver will enable it when necessary).
*/
pci_disable_pcie_error_reporting(dev);
}
#endif #endif
/* /*

View File

@@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
return -EINVAL; return -EINVAL;
} }
if (!var->pixclock) {
DPRINTK("pixclock is zero\n");
return -EINVAL;
}
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) { if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
DPRINTK("pixclock too high (%ldKHz)\n", DPRINTK("pixclock too high (%ldKHz)\n",
PICOS2KHZ(var->pixclock)); PICOS2KHZ(var->pixclock));

View File

@@ -5198,6 +5198,11 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
bool cancel = false; bool cancel = false;
if (req->file->f_op->may_pollfree) {
spin_lock_irq(&ctx->completion_lock);
return -EOPNOTSUPP;
}
INIT_HLIST_NODE(&req->hash_node); INIT_HLIST_NODE(&req->hash_node);
io_init_poll_iocb(poll, mask, wake_func); io_init_poll_iocb(poll, mask, wake_func);
poll->file = req->file; poll->file = req->file;

View File

@@ -248,6 +248,7 @@ static const struct file_operations signalfd_fops = {
.poll = signalfd_poll, .poll = signalfd_poll,
.read = signalfd_read, .read = signalfd_read,
.llseek = noop_llseek, .llseek = noop_llseek,
.may_pollfree = true,
}; };
static int do_signalfd4(int ufd, sigset_t *mask, int flags) static int do_signalfd4(int ufd, sigset_t *mask, int flags)

View File

@@ -128,11 +128,12 @@ xfs_filestream_pick_ag(
if (!pag->pagf_init) { if (!pag->pagf_init) {
err = xfs_alloc_pagf_init(mp, NULL, ag, trylock); err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
if (err) { if (err) {
xfs_perag_put(pag); if (err != -EAGAIN) {
if (err != -EAGAIN) xfs_perag_put(pag);
return err; return err;
}
/* Couldn't lock the AGF, skip this AG. */ /* Couldn't lock the AGF, skip this AG. */
continue; goto next_ag;
} }
} }

View File

@@ -376,46 +376,36 @@ xfs_reserve_blocks(
* If the request is larger than the current reservation, reserve the * If the request is larger than the current reservation, reserve the
* blocks before we update the reserve counters. Sample m_fdblocks and * blocks before we update the reserve counters. Sample m_fdblocks and
* perform a partial reservation if the request exceeds free space. * perform a partial reservation if the request exceeds free space.
*
* The code below estimates how many blocks it can request from
* fdblocks to stash in the reserve pool. This is a classic TOCTOU
* race since fdblocks updates are not always coordinated via
* m_sb_lock. Set the reserve size even if there's not enough free
* space to fill it because mod_fdblocks will refill an undersized
* reserve when it can.
*/ */
error = -ENOSPC; free = percpu_counter_sum(&mp->m_fdblocks) -
do { xfs_fdblocks_unavailable(mp);
free = percpu_counter_sum(&mp->m_fdblocks) - delta = request - mp->m_resblks;
mp->m_alloc_set_aside; mp->m_resblks = request;
if (free <= 0) if (delta > 0 && free > 0) {
break;
delta = request - mp->m_resblks;
lcounter = free - delta;
if (lcounter < 0)
/* We can't satisfy the request, just get what we can */
fdblks_delta = free;
else
fdblks_delta = delta;
/* /*
* We'll either succeed in getting space from the free block * We'll either succeed in getting space from the free block
* count or we'll get an ENOSPC. If we get a ENOSPC, it means * count or we'll get an ENOSPC. Don't set the reserved flag
* things changed while we were calculating fdblks_delta and so * here - we don't want to reserve the extra reserve blocks
* we should try again to see if there is anything left to * from the reserve.
* reserve.
* *
* Don't set the reserved flag here - we don't want to reserve * The desired reserve size can change after we drop the lock.
* the extra reserve blocks from the reserve..... * Use mod_fdblocks to put the space into the reserve or into
* fdblocks as appropriate.
*/ */
fdblks_delta = min(free, delta);
spin_unlock(&mp->m_sb_lock); spin_unlock(&mp->m_sb_lock);
error = xfs_mod_fdblocks(mp, -fdblks_delta, 0); error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
if (!error)
xfs_mod_fdblocks(mp, fdblks_delta, 0);
spin_lock(&mp->m_sb_lock); spin_lock(&mp->m_sb_lock);
} while (error == -ENOSPC);
/*
* Update the reserve counters if blocks have been successfully
* allocated.
*/
if (!error && fdblks_delta) {
mp->m_resblks += fdblks_delta;
mp->m_resblks_avail += fdblks_delta;
} }
out: out:
if (outval) { if (outval) {
outval->resblks = mp->m_resblks; outval->resblks = mp->m_resblks;

View File

@@ -406,6 +406,14 @@ extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
xfs_agnumber_t *maxagi); xfs_agnumber_t *maxagi);
extern void xfs_unmountfs(xfs_mount_t *); extern void xfs_unmountfs(xfs_mount_t *);
/* Accessor added for 5.10.y backport */
static inline uint64_t
xfs_fdblocks_unavailable(
struct xfs_mount *mp)
{
return mp->m_alloc_set_aside;
}
extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
bool reserved); bool reserved);
extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta); extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);

View File

@@ -615,7 +615,6 @@ xfs_dqresv_check(
return QUOTA_NL_ISOFTLONGWARN; return QUOTA_NL_ISOFTLONGWARN;
} }
res->warnings++;
return QUOTA_NL_ISOFTWARN; return QUOTA_NL_ISOFTWARN;
} }

View File

@@ -1893,6 +1893,7 @@ struct file_operations {
struct file *file_out, loff_t pos_out, struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags); loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int); int (*fadvise)(struct file *, loff_t, loff_t, int);
bool may_pollfree;
ANDROID_KABI_RESERVE(1); ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(2);

View File

@@ -43,12 +43,15 @@ struct anon_vma {
atomic_t refcount; atomic_t refcount;
/* /*
* Count of child anon_vmas and VMAs which points to this anon_vma. * Count of child anon_vmas. Equals to the count of all anon_vmas that
* have ->parent pointing to this one, including itself.
* *
* This counter is used for making decision about reusing anon_vma * This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone. * instead of forking new one. See comments in function anon_vma_clone.
*/ */
unsigned degree; unsigned long num_children;
/* Count of VMAs whose ->anon_vma pointer points to this object. */
unsigned long num_active_vmas;
struct anon_vma *parent; /* Parent of this anon_vma */ struct anon_vma *parent; /* Parent of this anon_vma */

View File

@@ -2245,6 +2245,14 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
#endif /* NET_SKBUFF_DATA_USES_OFFSET */ #endif /* NET_SKBUFF_DATA_USES_OFFSET */
static inline void skb_assert_len(struct sk_buff *skb)
{
#ifdef CONFIG_DEBUG_NET
if (WARN_ONCE(!skb->len, "%s\n", __func__))
DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
#endif /* CONFIG_DEBUG_NET */
}
/* /*
* Add data to an sk_buff * Add data to an sk_buff
*/ */

View File

@@ -281,7 +281,8 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start)
static inline struct sk_psock *sk_psock(const struct sock *sk) static inline struct sk_psock *sk_psock(const struct sock *sk)
{ {
return rcu_dereference_sk_user_data(sk); return __rcu_dereference_sk_user_data_with_flags(sk,
SK_USER_DATA_PSOCK);
} }
static inline void sk_psock_queue_msg(struct sk_psock *psock, static inline void sk_psock_queue_msg(struct sk_psock *psock,

View File

@@ -548,14 +548,26 @@ enum sk_pacing {
SK_PACING_FQ = 2, SK_PACING_FQ = 2,
}; };
/* Pointer stored in sk_user_data might not be suitable for copying /* flag bits in sk_user_data
* when cloning the socket. For instance, it can point to a reference *
* counted object. sk_user_data bottom bit is set if pointer must not * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might
* be copied. * not be suitable for copying when cloning the socket. For instance,
* it can point to a reference counted object. sk_user_data bottom
* bit is set if pointer must not be copied.
*
* - SK_USER_DATA_BPF: Mark whether sk_user_data field is
* managed/owned by a BPF reuseport array. This bit should be set
* when sk_user_data's sk is added to the bpf's reuseport_array.
*
* - SK_USER_DATA_PSOCK: Mark whether pointer stored in
* sk_user_data points to psock type. This bit should be set
* when sk_user_data is assigned to a psock object.
*/ */
#define SK_USER_DATA_NOCOPY 1UL #define SK_USER_DATA_NOCOPY 1UL
#define SK_USER_DATA_BPF 2UL /* Managed by BPF */ #define SK_USER_DATA_BPF 2UL
#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF) #define SK_USER_DATA_PSOCK 4UL
#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\
SK_USER_DATA_PSOCK)
/** /**
* sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
@@ -568,24 +580,40 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk)
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
/**
* __rcu_dereference_sk_user_data_with_flags - return the pointer
* only if argument flags all has been set in sk_user_data. Otherwise
* return NULL
*
* @sk: socket
* @flags: flag bits
*/
static inline void *
__rcu_dereference_sk_user_data_with_flags(const struct sock *sk,
uintptr_t flags)
{
uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk));
WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK);
if ((sk_user_data & flags) == flags)
return (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
return NULL;
}
#define rcu_dereference_sk_user_data(sk) \ #define rcu_dereference_sk_user_data(sk) \
__rcu_dereference_sk_user_data_with_flags(sk, 0)
#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \
({ \ ({ \
void *__tmp = rcu_dereference(__sk_user_data((sk))); \ uintptr_t __tmp1 = (uintptr_t)(ptr), \
(void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \ __tmp2 = (uintptr_t)(flags); \
WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \
WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \
rcu_assign_pointer(__sk_user_data((sk)), \
__tmp1 | __tmp2); \
}) })
#define rcu_assign_sk_user_data(sk, ptr) \ #define rcu_assign_sk_user_data(sk, ptr) \
({ \ __rcu_assign_sk_user_data_with_flags(sk, ptr, 0)
uintptr_t __tmp = (uintptr_t)(ptr); \
WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
rcu_assign_pointer(__sk_user_data((sk)), __tmp); \
})
#define rcu_assign_sk_user_data_nocopy(sk, ptr) \
({ \
uintptr_t __tmp = (uintptr_t)(ptr); \
WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \
rcu_assign_pointer(__sk_user_data((sk)), \
__tmp | SK_USER_DATA_NOCOPY); \
})
/* /*
* SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK

View File

@@ -1786,11 +1786,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
/* Try to disarm and disable this/parent probe */ /* Try to disarm and disable this/parent probe */
if (p == orig_p || aggr_kprobe_disabled(orig_p)) { if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
/* /*
* If kprobes_all_disarmed is set, orig_p * Don't be lazy here. Even if 'kprobes_all_disarmed'
* should have already been disarmed, so * is false, 'orig_p' might not have been armed yet.
* skip unneed disarming process. * Note arm_all_kprobes() __tries__ to arm all kprobes
* on the best effort basis.
*/ */
if (!kprobes_all_disarmed) { if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
ret = disarm_kprobe(orig_p, true); ret = disarm_kprobe(orig_p, true);
if (ret) { if (ret) {
p->flags &= ~KPROBE_FLAG_DISABLED; p->flags &= ~KPROBE_FLAG_DISABLED;

View File

@@ -2899,6 +2899,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_startup_enable(command); ftrace_startup_enable(command);
/*
* If ftrace is in an undefined state, we just remove ops from list
* to prevent the NULL pointer, instead of totally rolling it back and
* free trampoline, because those actions could cause further damage.
*/
if (unlikely(ftrace_disabled)) {
__unregister_ftrace_function(ops);
return -ENODEV;
}
ops->flags &= ~FTRACE_OPS_FL_ADDING; ops->flags &= ~FTRACE_OPS_FL_ADDING;
return 0; return 0;

View File

@@ -33,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_LIB_CHACHA_GENERIC config CRYPTO_LIB_CHACHA_GENERIC
tristate tristate
select XOR_BLOCKS
help help
This symbol can be depended upon by arch implementations of the This symbol can be depended upon by arch implementations of the
ChaCha library interface that require the generic code as a ChaCha library interface that require the generic code as a

View File

@@ -46,8 +46,8 @@ static inline bool vdso_cycles_ok(u64 cycles)
#endif #endif
#ifdef CONFIG_TIME_NS #ifdef CONFIG_TIME_NS
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
{ {
const struct vdso_data *vd = __arch_get_timens_vdso_data(); const struct vdso_data *vd = __arch_get_timens_vdso_data();
const struct timens_offset *offs = &vdns->offset[clk]; const struct timens_offset *offs = &vdns->offset[clk];
@@ -97,8 +97,8 @@ static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
return NULL; return NULL;
} }
static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
{ {
return -EINVAL; return -EINVAL;
} }
@@ -159,8 +159,8 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
} }
#ifdef CONFIG_TIME_NS #ifdef CONFIG_TIME_NS
static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
{ {
const struct vdso_data *vd = __arch_get_timens_vdso_data(); const struct vdso_data *vd = __arch_get_timens_vdso_data();
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
@@ -188,8 +188,8 @@ static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
return 0; return 0;
} }
#else #else
static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, static __always_inline int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
{ {
return -1; return -1;
} }

View File

@@ -2773,6 +2773,18 @@ static void unmap_region(struct mm_struct *mm,
tlb_gather_mmu(&tlb, mm, start, end); tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm); update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end); unmap_vmas(&tlb, vma, start, end);
/*
* Ensure we have no stale TLB entries by the time this mapping is
* removed from the rmap.
* Note that we don't have to worry about nested flushes here because
* we're holding the mm semaphore for removing the mapping - so any
* concurrent flush in this region has to be coming through the rmap,
* and we synchronize against that using the rmap lock.
*/
if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0)
tlb_flush_mmu(&tlb);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : USER_PGTABLES_CEILING); next ? next->vm_start : USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb, start, end); tlb_finish_mmu(&tlb, start, end);

View File

@@ -91,7 +91,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) { if (anon_vma) {
atomic_set(&anon_vma->refcount, 1); atomic_set(&anon_vma->refcount, 1);
anon_vma->degree = 1; /* Reference for first vma */ anon_vma->num_children = 0;
anon_vma->num_active_vmas = 0;
anon_vma->parent = anon_vma; anon_vma->parent = anon_vma;
/* /*
* Initialise the anon_vma root to point to itself. If called * Initialise the anon_vma root to point to itself. If called
@@ -199,6 +200,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
anon_vma = anon_vma_alloc(); anon_vma = anon_vma_alloc();
if (unlikely(!anon_vma)) if (unlikely(!anon_vma))
goto out_enomem_free_avc; goto out_enomem_free_avc;
anon_vma->num_children++; /* self-parent link for new root */
allocated = anon_vma; allocated = anon_vma;
} }
@@ -208,8 +210,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
if (likely(!vma->anon_vma)) { if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma; vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma); anon_vma_chain_link(vma, avc, anon_vma);
/* vma reference or self-parent link for new root */ anon_vma->num_active_vmas++;
anon_vma->degree++;
allocated = NULL; allocated = NULL;
avc = NULL; avc = NULL;
} }
@@ -294,19 +295,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
anon_vma_chain_link(dst, avc, anon_vma); anon_vma_chain_link(dst, avc, anon_vma);
/* /*
* Reuse existing anon_vma if its degree lower than two, * Reuse existing anon_vma if it has no vma and only one
* that means it has no vma and only one anon_vma child. * anon_vma child.
* *
* Do not chose parent anon_vma, otherwise first child * Root anon_vma is never reused:
* will always reuse it. Root anon_vma is never reused:
* it has self-parent reference and at least one child. * it has self-parent reference and at least one child.
*/ */
if (!dst->anon_vma && src->anon_vma && if (!dst->anon_vma && src->anon_vma &&
anon_vma != src->anon_vma && anon_vma->degree < 2) anon_vma->num_children < 2 &&
anon_vma->num_active_vmas == 0)
dst->anon_vma = anon_vma; dst->anon_vma = anon_vma;
} }
if (dst->anon_vma) if (dst->anon_vma)
dst->anon_vma->degree++; dst->anon_vma->num_active_vmas++;
unlock_anon_vma_root(root); unlock_anon_vma_root(root);
return 0; return 0;
@@ -356,6 +357,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
anon_vma = anon_vma_alloc(); anon_vma = anon_vma_alloc();
if (!anon_vma) if (!anon_vma)
goto out_error; goto out_error;
anon_vma->num_active_vmas++;
avc = anon_vma_chain_alloc(GFP_KERNEL); avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc) if (!avc)
goto out_error_free_anon_vma; goto out_error_free_anon_vma;
@@ -376,7 +378,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
vma->anon_vma = anon_vma; vma->anon_vma = anon_vma;
anon_vma_lock_write(anon_vma); anon_vma_lock_write(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma); anon_vma_chain_link(vma, avc, anon_vma);
anon_vma->parent->degree++; anon_vma->parent->num_children++;
anon_vma_unlock_write(anon_vma); anon_vma_unlock_write(anon_vma);
return 0; return 0;
@@ -408,7 +410,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
* to free them outside the lock. * to free them outside the lock.
*/ */
if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
anon_vma->parent->degree--; anon_vma->parent->num_children--;
continue; continue;
} }
@@ -416,7 +418,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
anon_vma_chain_free(avc); anon_vma_chain_free(avc);
} }
if (vma->anon_vma) if (vma->anon_vma)
vma->anon_vma->degree--; vma->anon_vma->num_active_vmas--;
unlock_anon_vma_root(root); unlock_anon_vma_root(root);
/* /*
@@ -427,7 +429,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma; struct anon_vma *anon_vma = avc->anon_vma;
VM_WARN_ON(anon_vma->degree); VM_WARN_ON(anon_vma->num_children);
VM_WARN_ON(anon_vma->num_active_vmas);
put_anon_vma(anon_vma); put_anon_vma(anon_vma);
list_del(&avc->same_vma); list_del(&avc->same_vma);

View File

@@ -1988,11 +1988,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
src_match = !bacmp(&c->src, src); src_match = !bacmp(&c->src, src);
dst_match = !bacmp(&c->dst, dst); dst_match = !bacmp(&c->dst, dst);
if (src_match && dst_match) { if (src_match && dst_match) {
c = l2cap_chan_hold_unless_zero(c); if (!l2cap_chan_hold_unless_zero(c))
if (c) { continue;
read_unlock(&chan_list_lock);
return c; read_unlock(&chan_list_lock);
} return c;
} }
/* Closest match */ /* Closest match */

View File

@@ -441,6 +441,9 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
{ {
struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
if (!skb->len)
return -EINVAL;
if (!__skb) if (!__skb)
return 0; return 0;

View File

@@ -4102,6 +4102,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
bool again = false; bool again = false;
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb_assert_len(skb);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
__skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED); __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);

View File

@@ -280,11 +280,26 @@ static int neigh_del_timer(struct neighbour *n)
return 0; return 0;
} }
static void pneigh_queue_purge(struct sk_buff_head *list) static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
{ {
struct sk_buff_head tmp;
unsigned long flags;
struct sk_buff *skb; struct sk_buff *skb;
while ((skb = skb_dequeue(list)) != NULL) { skb_queue_head_init(&tmp);
spin_lock_irqsave(&list->lock, flags);
skb = skb_peek(list);
while (skb != NULL) {
struct sk_buff *skb_next = skb_peek_next(skb, list);
if (net == NULL || net_eq(dev_net(skb->dev), net)) {
__skb_unlink(skb, list);
__skb_queue_tail(&tmp, skb);
}
skb = skb_next;
}
spin_unlock_irqrestore(&list->lock, flags);
while ((skb = __skb_dequeue(&tmp))) {
dev_put(skb->dev); dev_put(skb->dev);
kfree_skb(skb); kfree_skb(skb);
} }
@@ -358,9 +373,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
write_lock_bh(&tbl->lock); write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev, skip_perm); neigh_flush_dev(tbl, dev, skip_perm);
pneigh_ifdown_and_unlock(tbl, dev); pneigh_ifdown_and_unlock(tbl, dev);
pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
del_timer_sync(&tbl->proxy_timer); if (skb_queue_empty_lockless(&tbl->proxy_queue))
pneigh_queue_purge(&tbl->proxy_queue); del_timer_sync(&tbl->proxy_timer);
return 0; return 0;
} }
@@ -1743,7 +1758,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
/* It is not clean... Fix it to unload IPv6 module safely */ /* It is not clean... Fix it to unload IPv6 module safely */
cancel_delayed_work_sync(&tbl->gc_work); cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer); del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue); pneigh_queue_purge(&tbl->proxy_queue, NULL);
neigh_ifdown(tbl, NULL); neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries)) if (atomic_read(&tbl->entries))
pr_crit("neighbour leakage\n"); pr_crit("neighbour leakage\n");

View File

@@ -612,7 +612,9 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
refcount_set(&psock->refcnt, 1); refcount_set(&psock->refcnt, 1);
rcu_assign_sk_user_data_nocopy(sk, psock); __rcu_assign_sk_user_data_with_flags(sk, psock,
SK_USER_DATA_NOCOPY |
SK_USER_DATA_PSOCK);
sock_hold(sk); sock_hold(sk);
out: out:

View File

@@ -118,7 +118,6 @@ config NF_CONNTRACK_ZONES
config NF_CONNTRACK_PROCFS config NF_CONNTRACK_PROCFS
bool "Supply CT list in procfs (OBSOLETE)" bool "Supply CT list in procfs (OBSOLETE)"
default y
depends on PROC_FS depends on PROC_FS
help help
This option enables for the list of known conntrack entries This option enables for the list of known conntrack entries

View File

@@ -2986,8 +2986,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (err) if (err)
goto out_free; goto out_free;
if (sock->type == SOCK_RAW && if ((sock->type == SOCK_RAW &&
!dev_validate_header(dev, skb->data, len)) { !dev_validate_header(dev, skb->data, len)) || !skb->len) {
err = -EINVAL; err = -EINVAL;
goto out_free; goto out_free;
} }

View File

@@ -93,8 +93,7 @@ obj := $(KBUILD_EXTMOD)
src := $(obj) src := $(obj)
# Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \ include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
$(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
# modpost option for external modules # modpost option for external modules
MODPOST += -e MODPOST += -e