Merge 5.10.74 into android12-5.10-lts
Changes in 5.10.74 ext4: check and update i_disksize properly ext4: correct the error path of ext4_write_inline_data_end() ASoC: Intel: sof_sdw: tag SoundWire BEs as non-atomic HID: apple: Fix logical maximum and usage maximum of Magic Keyboard JIS netfilter: ip6_tables: zero-initialize fragment offset HID: wacom: Add new Intuos BT (CTL-4100WL/CTL-6100WL) device IDs ASoC: SOF: loader: release_firmware() on load failure to avoid batching netfilter: nf_nat_masquerade: make async masq_inet6_event handling generic netfilter: nf_nat_masquerade: defer conntrack walk to work queue mac80211: Drop frames from invalid MAC address in ad-hoc mode m68k: Handle arrivals of multiple signals correctly hwmon: (ltc2947) Properly handle errors when looking for the external clock net: prevent user from passing illegal stab size mac80211: check return value of rhashtable_init vboxfs: fix broken legacy mount signature checking net: sun: SUNVNET_COMMON should depend on INET drm/amdgpu: fix gart.bo pin_count leak scsi: ses: Fix unsigned comparison with less than zero scsi: virtio_scsi: Fix spelling mistake "Unsupport" -> "Unsupported" perf/core: fix userpage->time_enabled of inactive events sched: Always inline is_percpu_thread() hwmon: (pmbus/ibm-cffps) max_power_out swap changes Linux 5.10.74 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ic5235aa8641c64ee2849355a7f10613ae204c74e
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 73
|
SUBLEVEL = 74
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Dare mighty things
|
NAME = Dare mighty things
|
||||||
|
|
||||||
|
@@ -447,7 +447,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
|
|||||||
|
|
||||||
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
|
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
|
||||||
fpu_version = sc->sc_fpstate[0];
|
fpu_version = sc->sc_fpstate[0];
|
||||||
if (CPU_IS_020_OR_030 &&
|
if (CPU_IS_020_OR_030 && !regs->stkadj &&
|
||||||
regs->vector >= (VEC_FPBRUC * 4) &&
|
regs->vector >= (VEC_FPBRUC * 4) &&
|
||||||
regs->vector <= (VEC_FPNAN * 4)) {
|
regs->vector <= (VEC_FPNAN * 4)) {
|
||||||
/* Clear pending exception in 68882 idle frame */
|
/* Clear pending exception in 68882 idle frame */
|
||||||
@@ -510,7 +510,7 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
|
|||||||
if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
|
if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
|
||||||
context_size = fpstate[1];
|
context_size = fpstate[1];
|
||||||
fpu_version = fpstate[0];
|
fpu_version = fpstate[0];
|
||||||
if (CPU_IS_020_OR_030 &&
|
if (CPU_IS_020_OR_030 && !regs->stkadj &&
|
||||||
regs->vector >= (VEC_FPBRUC * 4) &&
|
regs->vector >= (VEC_FPBRUC * 4) &&
|
||||||
regs->vector <= (VEC_FPNAN * 4)) {
|
regs->vector <= (VEC_FPNAN * 4)) {
|
||||||
/* Clear pending exception in 68882 idle frame */
|
/* Clear pending exception in 68882 idle frame */
|
||||||
@@ -828,18 +828,24 @@ badframe:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct pt_regs *rte_regs(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
return (void *)regs + regs->stkadj;
|
||||||
|
}
|
||||||
|
|
||||||
static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
|
static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
|
||||||
unsigned long mask)
|
unsigned long mask)
|
||||||
{
|
{
|
||||||
|
struct pt_regs *tregs = rte_regs(regs);
|
||||||
sc->sc_mask = mask;
|
sc->sc_mask = mask;
|
||||||
sc->sc_usp = rdusp();
|
sc->sc_usp = rdusp();
|
||||||
sc->sc_d0 = regs->d0;
|
sc->sc_d0 = regs->d0;
|
||||||
sc->sc_d1 = regs->d1;
|
sc->sc_d1 = regs->d1;
|
||||||
sc->sc_a0 = regs->a0;
|
sc->sc_a0 = regs->a0;
|
||||||
sc->sc_a1 = regs->a1;
|
sc->sc_a1 = regs->a1;
|
||||||
sc->sc_sr = regs->sr;
|
sc->sc_sr = tregs->sr;
|
||||||
sc->sc_pc = regs->pc;
|
sc->sc_pc = tregs->pc;
|
||||||
sc->sc_formatvec = regs->format << 12 | regs->vector;
|
sc->sc_formatvec = tregs->format << 12 | tregs->vector;
|
||||||
save_a5_state(sc, regs);
|
save_a5_state(sc, regs);
|
||||||
save_fpu_state(sc, regs);
|
save_fpu_state(sc, regs);
|
||||||
}
|
}
|
||||||
@@ -847,6 +853,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
|
|||||||
static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
|
static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct switch_stack *sw = (struct switch_stack *)regs - 1;
|
struct switch_stack *sw = (struct switch_stack *)regs - 1;
|
||||||
|
struct pt_regs *tregs = rte_regs(regs);
|
||||||
greg_t __user *gregs = uc->uc_mcontext.gregs;
|
greg_t __user *gregs = uc->uc_mcontext.gregs;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
@@ -867,9 +874,9 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
|
|||||||
err |= __put_user(sw->a5, &gregs[13]);
|
err |= __put_user(sw->a5, &gregs[13]);
|
||||||
err |= __put_user(sw->a6, &gregs[14]);
|
err |= __put_user(sw->a6, &gregs[14]);
|
||||||
err |= __put_user(rdusp(), &gregs[15]);
|
err |= __put_user(rdusp(), &gregs[15]);
|
||||||
err |= __put_user(regs->pc, &gregs[16]);
|
err |= __put_user(tregs->pc, &gregs[16]);
|
||||||
err |= __put_user(regs->sr, &gregs[17]);
|
err |= __put_user(tregs->sr, &gregs[17]);
|
||||||
err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
|
err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
|
||||||
err |= rt_save_fpu_state(uc, regs);
|
err |= rt_save_fpu_state(uc, regs);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@@ -886,13 +893,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
|||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct sigframe __user *frame;
|
struct sigframe __user *frame;
|
||||||
int fsize = frame_extra_sizes(regs->format);
|
struct pt_regs *tregs = rte_regs(regs);
|
||||||
|
int fsize = frame_extra_sizes(tregs->format);
|
||||||
struct sigcontext context;
|
struct sigcontext context;
|
||||||
int err = 0, sig = ksig->sig;
|
int err = 0, sig = ksig->sig;
|
||||||
|
|
||||||
if (fsize < 0) {
|
if (fsize < 0) {
|
||||||
pr_debug("setup_frame: Unknown frame format %#x\n",
|
pr_debug("setup_frame: Unknown frame format %#x\n",
|
||||||
regs->format);
|
tregs->format);
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -903,7 +911,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
|||||||
|
|
||||||
err |= __put_user(sig, &frame->sig);
|
err |= __put_user(sig, &frame->sig);
|
||||||
|
|
||||||
err |= __put_user(regs->vector, &frame->code);
|
err |= __put_user(tregs->vector, &frame->code);
|
||||||
err |= __put_user(&frame->sc, &frame->psc);
|
err |= __put_user(&frame->sc, &frame->psc);
|
||||||
|
|
||||||
if (_NSIG_WORDS > 1)
|
if (_NSIG_WORDS > 1)
|
||||||
@@ -929,34 +937,28 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
|
|||||||
|
|
||||||
push_cache ((unsigned long) &frame->retcode);
|
push_cache ((unsigned long) &frame->retcode);
|
||||||
|
|
||||||
/*
|
|
||||||
* Set up registers for signal handler. All the state we are about
|
|
||||||
* to destroy is successfully copied to sigframe.
|
|
||||||
*/
|
|
||||||
wrusp ((unsigned long) frame);
|
|
||||||
regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
|
|
||||||
adjustformat(regs);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is subtle; if we build more than one sigframe, all but the
|
* This is subtle; if we build more than one sigframe, all but the
|
||||||
* first one will see frame format 0 and have fsize == 0, so we won't
|
* first one will see frame format 0 and have fsize == 0, so we won't
|
||||||
* screw stkadj.
|
* screw stkadj.
|
||||||
*/
|
*/
|
||||||
if (fsize)
|
if (fsize) {
|
||||||
regs->stkadj = fsize;
|
regs->stkadj = fsize;
|
||||||
|
tregs = rte_regs(regs);
|
||||||
/* Prepare to skip over the extra stuff in the exception frame. */
|
|
||||||
if (regs->stkadj) {
|
|
||||||
struct pt_regs *tregs =
|
|
||||||
(struct pt_regs *)((ulong)regs + regs->stkadj);
|
|
||||||
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
|
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
|
||||||
/* This must be copied with decreasing addresses to
|
|
||||||
handle overlaps. */
|
|
||||||
tregs->vector = 0;
|
tregs->vector = 0;
|
||||||
tregs->format = 0;
|
tregs->format = 0;
|
||||||
tregs->pc = regs->pc;
|
|
||||||
tregs->sr = regs->sr;
|
tregs->sr = regs->sr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set up registers for signal handler. All the state we are about
|
||||||
|
* to destroy is successfully copied to sigframe.
|
||||||
|
*/
|
||||||
|
wrusp ((unsigned long) frame);
|
||||||
|
tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
|
||||||
|
adjustformat(regs);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -964,7 +966,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
|||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct rt_sigframe __user *frame;
|
struct rt_sigframe __user *frame;
|
||||||
int fsize = frame_extra_sizes(regs->format);
|
struct pt_regs *tregs = rte_regs(regs);
|
||||||
|
int fsize = frame_extra_sizes(tregs->format);
|
||||||
int err = 0, sig = ksig->sig;
|
int err = 0, sig = ksig->sig;
|
||||||
|
|
||||||
if (fsize < 0) {
|
if (fsize < 0) {
|
||||||
@@ -1014,34 +1017,27 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
|
|||||||
|
|
||||||
push_cache ((unsigned long) &frame->retcode);
|
push_cache ((unsigned long) &frame->retcode);
|
||||||
|
|
||||||
/*
|
|
||||||
* Set up registers for signal handler. All the state we are about
|
|
||||||
* to destroy is successfully copied to sigframe.
|
|
||||||
*/
|
|
||||||
wrusp ((unsigned long) frame);
|
|
||||||
regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
|
|
||||||
adjustformat(regs);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is subtle; if we build more than one sigframe, all but the
|
* This is subtle; if we build more than one sigframe, all but the
|
||||||
* first one will see frame format 0 and have fsize == 0, so we won't
|
* first one will see frame format 0 and have fsize == 0, so we won't
|
||||||
* screw stkadj.
|
* screw stkadj.
|
||||||
*/
|
*/
|
||||||
if (fsize)
|
if (fsize) {
|
||||||
regs->stkadj = fsize;
|
regs->stkadj = fsize;
|
||||||
|
tregs = rte_regs(regs);
|
||||||
/* Prepare to skip over the extra stuff in the exception frame. */
|
|
||||||
if (regs->stkadj) {
|
|
||||||
struct pt_regs *tregs =
|
|
||||||
(struct pt_regs *)((ulong)regs + regs->stkadj);
|
|
||||||
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
|
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
|
||||||
/* This must be copied with decreasing addresses to
|
|
||||||
handle overlaps. */
|
|
||||||
tregs->vector = 0;
|
tregs->vector = 0;
|
||||||
tregs->format = 0;
|
tregs->format = 0;
|
||||||
tregs->pc = regs->pc;
|
|
||||||
tregs->sr = regs->sr;
|
tregs->sr = regs->sr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set up registers for signal handler. All the state we are about
|
||||||
|
* to destroy is successfully copied to sigframe.
|
||||||
|
*/
|
||||||
|
wrusp ((unsigned long) frame);
|
||||||
|
tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
|
||||||
|
adjustformat(regs);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1018,6 +1018,8 @@ static int gmc_v10_0_hw_fini(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
gmc_v10_0_gart_disable(adev);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
/* full access mode, so don't touch any GMC register */
|
/* full access mode, so don't touch any GMC register */
|
||||||
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
||||||
@@ -1026,7 +1028,6 @@ static int gmc_v10_0_hw_fini(void *handle)
|
|||||||
|
|
||||||
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
|
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
|
||||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||||
gmc_v10_0_gart_disable(adev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -1677,6 +1677,8 @@ static int gmc_v9_0_hw_fini(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
gmc_v9_0_gart_disable(adev);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
/* full access mode, so don't touch any GMC register */
|
/* full access mode, so don't touch any GMC register */
|
||||||
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
|
||||||
@@ -1685,7 +1687,6 @@ static int gmc_v9_0_hw_fini(void *handle)
|
|||||||
|
|
||||||
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
|
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
|
||||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||||
gmc_v9_0_gart_disable(adev);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -322,12 +322,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* MacBook JIS keyboard has wrong logical maximum
|
* MacBook JIS keyboard has wrong logical maximum
|
||||||
|
* Magic Keyboard JIS has wrong logical maximum
|
||||||
*/
|
*/
|
||||||
static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||||
unsigned int *rsize)
|
unsigned int *rsize)
|
||||||
{
|
{
|
||||||
struct apple_sc *asc = hid_get_drvdata(hdev);
|
struct apple_sc *asc = hid_get_drvdata(hdev);
|
||||||
|
|
||||||
|
if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
|
||||||
|
hid_info(hdev,
|
||||||
|
"fixing up Magic Keyboard JIS report descriptor\n");
|
||||||
|
rdesc[64] = rdesc[70] = 0xe7;
|
||||||
|
}
|
||||||
|
|
||||||
if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
|
if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
|
||||||
rdesc[53] == 0x65 && rdesc[59] == 0x65) {
|
rdesc[53] == 0x65 && rdesc[59] == 0x65) {
|
||||||
hid_info(hdev,
|
hid_info(hdev,
|
||||||
|
@@ -4715,6 +4715,12 @@ static const struct wacom_features wacom_features_0x393 =
|
|||||||
{ "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
|
{ "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
|
||||||
INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
|
INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
|
||||||
.touch_max = 10 };
|
.touch_max = 10 };
|
||||||
|
static const struct wacom_features wacom_features_0x3c6 =
|
||||||
|
{ "Wacom Intuos BT S", 15200, 9500, 4095, 63,
|
||||||
|
INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
|
||||||
|
static const struct wacom_features wacom_features_0x3c8 =
|
||||||
|
{ "Wacom Intuos BT M", 21600, 13500, 4095, 63,
|
||||||
|
INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
|
||||||
|
|
||||||
static const struct wacom_features wacom_features_HID_ANY_ID =
|
static const struct wacom_features wacom_features_HID_ANY_ID =
|
||||||
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
|
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
|
||||||
@@ -4888,6 +4894,8 @@ const struct hid_device_id wacom_ids[] = {
|
|||||||
{ USB_DEVICE_WACOM(0x37A) },
|
{ USB_DEVICE_WACOM(0x37A) },
|
||||||
{ USB_DEVICE_WACOM(0x37B) },
|
{ USB_DEVICE_WACOM(0x37B) },
|
||||||
{ BT_DEVICE_WACOM(0x393) },
|
{ BT_DEVICE_WACOM(0x393) },
|
||||||
|
{ BT_DEVICE_WACOM(0x3c6) },
|
||||||
|
{ BT_DEVICE_WACOM(0x3c8) },
|
||||||
{ USB_DEVICE_WACOM(0x4001) },
|
{ USB_DEVICE_WACOM(0x4001) },
|
||||||
{ USB_DEVICE_WACOM(0x4004) },
|
{ USB_DEVICE_WACOM(0x4004) },
|
||||||
{ USB_DEVICE_WACOM(0x5000) },
|
{ USB_DEVICE_WACOM(0x5000) },
|
||||||
|
@@ -989,8 +989,12 @@ static int ltc2947_setup(struct ltc2947_data *st)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/* check external clock presence */
|
/* check external clock presence */
|
||||||
extclk = devm_clk_get(st->dev, NULL);
|
extclk = devm_clk_get_optional(st->dev, NULL);
|
||||||
if (!IS_ERR(extclk)) {
|
if (IS_ERR(extclk))
|
||||||
|
return dev_err_probe(st->dev, PTR_ERR(extclk),
|
||||||
|
"Failed to get external clock\n");
|
||||||
|
|
||||||
|
if (extclk) {
|
||||||
unsigned long rate_hz;
|
unsigned long rate_hz;
|
||||||
u8 pre = 0, div, tbctl;
|
u8 pre = 0, div, tbctl;
|
||||||
u64 aux;
|
u64 aux;
|
||||||
|
@@ -171,8 +171,14 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
|
|||||||
cmd = CFFPS_SN_CMD;
|
cmd = CFFPS_SN_CMD;
|
||||||
break;
|
break;
|
||||||
case CFFPS_DEBUGFS_MAX_POWER_OUT:
|
case CFFPS_DEBUGFS_MAX_POWER_OUT:
|
||||||
|
if (psu->version == cffps1) {
|
||||||
rc = i2c_smbus_read_word_swapped(psu->client,
|
rc = i2c_smbus_read_word_swapped(psu->client,
|
||||||
CFFPS_MAX_POWER_OUT_CMD);
|
CFFPS_MAX_POWER_OUT_CMD);
|
||||||
|
} else {
|
||||||
|
rc = i2c_smbus_read_word_data(psu->client,
|
||||||
|
CFFPS_MAX_POWER_OUT_CMD);
|
||||||
|
}
|
||||||
|
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
@@ -73,6 +73,7 @@ config CASSINI
|
|||||||
config SUNVNET_COMMON
|
config SUNVNET_COMMON
|
||||||
tristate "Common routines to support Sun Virtual Networking"
|
tristate "Common routines to support Sun Virtual Networking"
|
||||||
depends on SUN_LDOMS
|
depends on SUN_LDOMS
|
||||||
|
depends on INET
|
||||||
default m
|
default m
|
||||||
|
|
||||||
config SUNVNET
|
config SUNVNET
|
||||||
|
@@ -118,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
|
|||||||
static int ses_send_diag(struct scsi_device *sdev, int page_code,
|
static int ses_send_diag(struct scsi_device *sdev, int page_code,
|
||||||
void *buf, int bufflen)
|
void *buf, int bufflen)
|
||||||
{
|
{
|
||||||
u32 result;
|
int result;
|
||||||
|
|
||||||
unsigned char cmd[] = {
|
unsigned char cmd[] = {
|
||||||
SEND_DIAGNOSTIC,
|
SEND_DIAGNOSTIC,
|
||||||
|
@@ -302,7 +302,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
|
pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -394,7 +394,7 @@ static void virtscsi_handle_event(struct work_struct *work)
|
|||||||
virtscsi_handle_param_change(vscsi, event);
|
virtscsi_handle_param_change(vscsi, event);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_err("Unsupport virtio scsi event %x\n", event->event);
|
pr_err("Unsupported virtio scsi event %x\n", event->event);
|
||||||
}
|
}
|
||||||
virtscsi_kick_event(vscsi, event_node);
|
virtscsi_kick_event(vscsi, event_node);
|
||||||
}
|
}
|
||||||
|
@@ -747,18 +747,13 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
|
|||||||
void *kaddr;
|
void *kaddr;
|
||||||
struct ext4_iloc iloc;
|
struct ext4_iloc iloc;
|
||||||
|
|
||||||
if (unlikely(copied < len)) {
|
if (unlikely(copied < len) && !PageUptodate(page))
|
||||||
if (!PageUptodate(page)) {
|
return 0;
|
||||||
copied = 0;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = ext4_get_inode_loc(inode, &iloc);
|
ret = ext4_get_inode_loc(inode, &iloc);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ext4_std_error(inode->i_sb, ret);
|
ext4_std_error(inode->i_sb, ret);
|
||||||
copied = 0;
|
return ret;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ext4_write_lock_xattr(inode, &no_expand);
|
ext4_write_lock_xattr(inode, &no_expand);
|
||||||
@@ -771,7 +766,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
|
|||||||
(void) ext4_find_inline_data_nolock(inode);
|
(void) ext4_find_inline_data_nolock(inode);
|
||||||
|
|
||||||
kaddr = kmap_atomic(page);
|
kaddr = kmap_atomic(page);
|
||||||
ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
|
ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);
|
||||||
kunmap_atomic(kaddr);
|
kunmap_atomic(kaddr);
|
||||||
SetPageUptodate(page);
|
SetPageUptodate(page);
|
||||||
/* clear page dirty so that writepages wouldn't work for us. */
|
/* clear page dirty so that writepages wouldn't work for us. */
|
||||||
@@ -780,7 +775,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
|
|||||||
ext4_write_unlock_xattr(inode, &no_expand);
|
ext4_write_unlock_xattr(inode, &no_expand);
|
||||||
brelse(iloc.bh);
|
brelse(iloc.bh);
|
||||||
mark_inode_dirty(inode);
|
mark_inode_dirty(inode);
|
||||||
out:
|
|
||||||
return copied;
|
return copied;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1308,6 +1308,7 @@ static int ext4_write_end(struct file *file,
|
|||||||
goto errout;
|
goto errout;
|
||||||
}
|
}
|
||||||
copied = ret;
|
copied = ret;
|
||||||
|
ret = 0;
|
||||||
} else
|
} else
|
||||||
copied = block_write_end(file, mapping, pos,
|
copied = block_write_end(file, mapping, pos,
|
||||||
len, copied, page, fsdata);
|
len, copied, page, fsdata);
|
||||||
@@ -1334,13 +1335,14 @@ static int ext4_write_end(struct file *file,
|
|||||||
if (i_size_changed || inline_data)
|
if (i_size_changed || inline_data)
|
||||||
ret = ext4_mark_inode_dirty(handle, inode);
|
ret = ext4_mark_inode_dirty(handle, inode);
|
||||||
|
|
||||||
|
errout:
|
||||||
if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
|
if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
|
||||||
/* if we have allocated more blocks and copied
|
/* if we have allocated more blocks and copied
|
||||||
* less. We will have blocks allocated outside
|
* less. We will have blocks allocated outside
|
||||||
* inode->i_size. So truncate them
|
* inode->i_size. So truncate them
|
||||||
*/
|
*/
|
||||||
ext4_orphan_add(handle, inode);
|
ext4_orphan_add(handle, inode);
|
||||||
errout:
|
|
||||||
ret2 = ext4_journal_stop(handle);
|
ret2 = ext4_journal_stop(handle);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = ret2;
|
ret = ret2;
|
||||||
@@ -1424,6 +1426,7 @@ static int ext4_journalled_write_end(struct file *file,
|
|||||||
goto errout;
|
goto errout;
|
||||||
}
|
}
|
||||||
copied = ret;
|
copied = ret;
|
||||||
|
ret = 0;
|
||||||
} else if (unlikely(copied < len) && !PageUptodate(page)) {
|
} else if (unlikely(copied < len) && !PageUptodate(page)) {
|
||||||
copied = 0;
|
copied = 0;
|
||||||
ext4_journalled_zero_new_buffers(handle, page, from, to);
|
ext4_journalled_zero_new_buffers(handle, page, from, to);
|
||||||
@@ -1453,6 +1456,7 @@ static int ext4_journalled_write_end(struct file *file,
|
|||||||
ret = ret2;
|
ret = ret2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
errout:
|
||||||
if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
|
if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
|
||||||
/* if we have allocated more blocks and copied
|
/* if we have allocated more blocks and copied
|
||||||
* less. We will have blocks allocated outside
|
* less. We will have blocks allocated outside
|
||||||
@@ -1460,7 +1464,6 @@ static int ext4_journalled_write_end(struct file *file,
|
|||||||
*/
|
*/
|
||||||
ext4_orphan_add(handle, inode);
|
ext4_orphan_add(handle, inode);
|
||||||
|
|
||||||
errout:
|
|
||||||
ret2 = ext4_journal_stop(handle);
|
ret2 = ext4_journal_stop(handle);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = ret2;
|
ret = ret2;
|
||||||
@@ -3114,35 +3117,37 @@ static int ext4_da_write_end(struct file *file,
|
|||||||
end = start + copied - 1;
|
end = start + copied - 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* generic_write_end() will run mark_inode_dirty() if i_size
|
* Since we are holding inode lock, we are sure i_disksize <=
|
||||||
* changes. So let's piggyback the i_disksize mark_inode_dirty
|
* i_size. We also know that if i_disksize < i_size, there are
|
||||||
* into that.
|
* delalloc writes pending in the range upto i_size. If the end of
|
||||||
|
* the current write is <= i_size, there's no need to touch
|
||||||
|
* i_disksize since writeback will push i_disksize upto i_size
|
||||||
|
* eventually. If the end of the current write is > i_size and
|
||||||
|
* inside an allocated block (ext4_da_should_update_i_disksize()
|
||||||
|
* check), we need to update i_disksize here as neither
|
||||||
|
* ext4_writepage() nor certain ext4_writepages() paths not
|
||||||
|
* allocating blocks update i_disksize.
|
||||||
|
*
|
||||||
|
* Note that we defer inode dirtying to generic_write_end() /
|
||||||
|
* ext4_da_write_inline_data_end().
|
||||||
*/
|
*/
|
||||||
new_i_size = pos + copied;
|
new_i_size = pos + copied;
|
||||||
if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
|
if (copied && new_i_size > inode->i_size) {
|
||||||
if (ext4_has_inline_data(inode) ||
|
if (ext4_has_inline_data(inode) ||
|
||||||
ext4_da_should_update_i_disksize(page, end)) {
|
ext4_da_should_update_i_disksize(page, end))
|
||||||
ext4_update_i_disksize(inode, new_i_size);
|
ext4_update_i_disksize(inode, new_i_size);
|
||||||
/* We need to mark inode dirty even if
|
|
||||||
* new_i_size is less that inode->i_size
|
|
||||||
* bu greater than i_disksize.(hint delalloc)
|
|
||||||
*/
|
|
||||||
ret = ext4_mark_inode_dirty(handle, inode);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (write_mode != CONVERT_INLINE_DATA &&
|
if (write_mode != CONVERT_INLINE_DATA &&
|
||||||
ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
|
ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
|
||||||
ext4_has_inline_data(inode))
|
ext4_has_inline_data(inode))
|
||||||
ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
|
ret = ext4_da_write_inline_data_end(inode, pos, len, copied,
|
||||||
page);
|
page);
|
||||||
else
|
else
|
||||||
ret2 = generic_write_end(file, mapping, pos, len, copied,
|
ret = generic_write_end(file, mapping, pos, len, copied,
|
||||||
page, fsdata);
|
page, fsdata);
|
||||||
|
|
||||||
copied = ret2;
|
copied = ret;
|
||||||
if (ret2 < 0)
|
|
||||||
ret = ret2;
|
|
||||||
ret2 = ext4_journal_stop(handle);
|
ret2 = ext4_journal_stop(handle);
|
||||||
if (unlikely(ret2 && !ret))
|
if (unlikely(ret2 && !ret))
|
||||||
ret = ret2;
|
ret = ret2;
|
||||||
|
@@ -21,10 +21,7 @@
|
|||||||
|
|
||||||
#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
|
#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
|
||||||
|
|
||||||
#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
|
static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
|
||||||
#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
|
|
||||||
#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
|
|
||||||
#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
|
|
||||||
|
|
||||||
static int follow_symlinks;
|
static int follow_symlinks;
|
||||||
module_param(follow_symlinks, int, 0444);
|
module_param(follow_symlinks, int, 0444);
|
||||||
@@ -386,12 +383,7 @@ fail_nomem:
|
|||||||
|
|
||||||
static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
|
static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
|
||||||
{
|
{
|
||||||
unsigned char *options = data;
|
if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {
|
||||||
|
|
||||||
if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
|
|
||||||
options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
|
|
||||||
options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
|
|
||||||
options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
|
|
||||||
vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
|
vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@@ -682,7 +682,9 @@ struct perf_event {
|
|||||||
/*
|
/*
|
||||||
* timestamp shadows the actual context timing but it can
|
* timestamp shadows the actual context timing but it can
|
||||||
* be safely used in NMI interrupt context. It reflects the
|
* be safely used in NMI interrupt context. It reflects the
|
||||||
* context time as it was when the event was last scheduled in.
|
* context time as it was when the event was last scheduled in,
|
||||||
|
* or when ctx_sched_in failed to schedule the event because we
|
||||||
|
* run out of PMC.
|
||||||
*
|
*
|
||||||
* ctx_time already accounts for ctx->timestamp. Therefore to
|
* ctx_time already accounts for ctx->timestamp. Therefore to
|
||||||
* compute ctx_time for a sample, simply add perf_clock().
|
* compute ctx_time for a sample, simply add perf_clock().
|
||||||
|
@@ -1621,7 +1621,7 @@ extern struct pid *cad_pid;
|
|||||||
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
|
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
|
||||||
#define used_math() tsk_used_math(current)
|
#define used_math() tsk_used_math(current)
|
||||||
|
|
||||||
static inline bool is_percpu_thread(void)
|
static __always_inline bool is_percpu_thread(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
return (current->flags & PF_NO_SETAFFINITY) &&
|
return (current->flags & PF_NO_SETAFFINITY) &&
|
||||||
|
@@ -11,6 +11,7 @@
|
|||||||
#include <uapi/linux/pkt_sched.h>
|
#include <uapi/linux/pkt_sched.h>
|
||||||
|
|
||||||
#define DEFAULT_TX_QUEUE_LEN 1000
|
#define DEFAULT_TX_QUEUE_LEN 1000
|
||||||
|
#define STAB_SIZE_LOG_MAX 30
|
||||||
|
|
||||||
struct qdisc_walker {
|
struct qdisc_walker {
|
||||||
int stop;
|
int stop;
|
||||||
|
@@ -3695,6 +3695,29 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool event_update_userpage(struct perf_event *event)
|
||||||
|
{
|
||||||
|
if (likely(!atomic_read(&event->mmap_count)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
perf_event_update_time(event);
|
||||||
|
perf_set_shadow_time(event, event->ctx);
|
||||||
|
perf_event_update_userpage(event);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void group_update_userpage(struct perf_event *group_event)
|
||||||
|
{
|
||||||
|
struct perf_event *event;
|
||||||
|
|
||||||
|
if (!event_update_userpage(group_event))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for_each_sibling_event(event, group_event)
|
||||||
|
event_update_userpage(event);
|
||||||
|
}
|
||||||
|
|
||||||
static int merge_sched_in(struct perf_event *event, void *data)
|
static int merge_sched_in(struct perf_event *event, void *data)
|
||||||
{
|
{
|
||||||
struct perf_event_context *ctx = event->ctx;
|
struct perf_event_context *ctx = event->ctx;
|
||||||
@@ -3713,14 +3736,15 @@ static int merge_sched_in(struct perf_event *event, void *data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (event->state == PERF_EVENT_STATE_INACTIVE) {
|
if (event->state == PERF_EVENT_STATE_INACTIVE) {
|
||||||
|
*can_add_hw = 0;
|
||||||
if (event->attr.pinned) {
|
if (event->attr.pinned) {
|
||||||
perf_cgroup_event_disable(event, ctx);
|
perf_cgroup_event_disable(event, ctx);
|
||||||
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
|
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
|
||||||
}
|
} else {
|
||||||
|
|
||||||
*can_add_hw = 0;
|
|
||||||
ctx->rotate_necessary = 1;
|
ctx->rotate_necessary = 1;
|
||||||
perf_mux_hrtimer_restart(cpuctx);
|
perf_mux_hrtimer_restart(cpuctx);
|
||||||
|
group_update_userpage(event);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -6240,6 +6264,8 @@ accounting:
|
|||||||
|
|
||||||
ring_buffer_attach(event, rb);
|
ring_buffer_attach(event, rb);
|
||||||
|
|
||||||
|
perf_event_update_time(event);
|
||||||
|
perf_set_shadow_time(event, event->ctx);
|
||||||
perf_event_init_userpage(event);
|
perf_event_init_userpage(event);
|
||||||
perf_event_update_userpage(event);
|
perf_event_update_userpage(event);
|
||||||
} else {
|
} else {
|
||||||
|
@@ -273,6 +273,7 @@ ip6t_do_table(struct sk_buff *skb,
|
|||||||
* things we don't know, ie. tcp syn flag or ports). If the
|
* things we don't know, ie. tcp syn flag or ports). If the
|
||||||
* rule is also a fragment-specific rule, non-fragments won't
|
* rule is also a fragment-specific rule, non-fragments won't
|
||||||
* match it. */
|
* match it. */
|
||||||
|
acpar.fragoff = 0;
|
||||||
acpar.hotdrop = false;
|
acpar.hotdrop = false;
|
||||||
acpar.state = state;
|
acpar.state = state;
|
||||||
|
|
||||||
|
@@ -60,7 +60,10 @@ static struct mesh_table *mesh_table_alloc(void)
|
|||||||
atomic_set(&newtbl->entries, 0);
|
atomic_set(&newtbl->entries, 0);
|
||||||
spin_lock_init(&newtbl->gates_lock);
|
spin_lock_init(&newtbl->gates_lock);
|
||||||
spin_lock_init(&newtbl->walk_lock);
|
spin_lock_init(&newtbl->walk_lock);
|
||||||
rhashtable_init(&newtbl->rhead, &mesh_rht_params);
|
if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
|
||||||
|
kfree(newtbl);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return newtbl;
|
return newtbl;
|
||||||
}
|
}
|
||||||
|
@@ -4064,7 +4064,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
|
|||||||
if (!bssid)
|
if (!bssid)
|
||||||
return false;
|
return false;
|
||||||
if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
|
if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
|
||||||
ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
|
ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
|
||||||
|
!is_valid_ether_addr(hdr->addr2))
|
||||||
return false;
|
return false;
|
||||||
if (ieee80211_is_beacon(hdr->frame_control))
|
if (ieee80211_is_beacon(hdr->frame_control))
|
||||||
return true;
|
return true;
|
||||||
|
@@ -9,8 +9,19 @@
|
|||||||
|
|
||||||
#include <net/netfilter/nf_nat_masquerade.h>
|
#include <net/netfilter/nf_nat_masquerade.h>
|
||||||
|
|
||||||
|
struct masq_dev_work {
|
||||||
|
struct work_struct work;
|
||||||
|
struct net *net;
|
||||||
|
union nf_inet_addr addr;
|
||||||
|
int ifindex;
|
||||||
|
int (*iter)(struct nf_conn *i, void *data);
|
||||||
|
};
|
||||||
|
|
||||||
|
#define MAX_MASQ_WORKER_COUNT 16
|
||||||
|
|
||||||
static DEFINE_MUTEX(masq_mutex);
|
static DEFINE_MUTEX(masq_mutex);
|
||||||
static unsigned int masq_refcnt __read_mostly;
|
static unsigned int masq_refcnt __read_mostly;
|
||||||
|
static atomic_t masq_worker_count __read_mostly;
|
||||||
|
|
||||||
unsigned int
|
unsigned int
|
||||||
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
|
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
|
||||||
@@ -63,13 +74,71 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
|
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
|
||||||
|
|
||||||
static int device_cmp(struct nf_conn *i, void *ifindex)
|
static void iterate_cleanup_work(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct masq_dev_work *w;
|
||||||
|
|
||||||
|
w = container_of(work, struct masq_dev_work, work);
|
||||||
|
|
||||||
|
nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);
|
||||||
|
|
||||||
|
put_net(w->net);
|
||||||
|
kfree(w);
|
||||||
|
atomic_dec(&masq_worker_count);
|
||||||
|
module_put(THIS_MODULE);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Iterate conntrack table in the background and remove conntrack entries
|
||||||
|
* that use the device/address being removed.
|
||||||
|
*
|
||||||
|
* In case too many work items have been queued already or memory allocation
|
||||||
|
* fails iteration is skipped, conntrack entries will time out eventually.
|
||||||
|
*/
|
||||||
|
static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
|
||||||
|
int ifindex,
|
||||||
|
int (*iter)(struct nf_conn *i, void *data),
|
||||||
|
gfp_t gfp_flags)
|
||||||
|
{
|
||||||
|
struct masq_dev_work *w;
|
||||||
|
|
||||||
|
if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)
|
||||||
|
return;
|
||||||
|
|
||||||
|
net = maybe_get_net(net);
|
||||||
|
if (!net)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!try_module_get(THIS_MODULE))
|
||||||
|
goto err_module;
|
||||||
|
|
||||||
|
w = kzalloc(sizeof(*w), gfp_flags);
|
||||||
|
if (w) {
|
||||||
|
/* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */
|
||||||
|
atomic_inc(&masq_worker_count);
|
||||||
|
|
||||||
|
INIT_WORK(&w->work, iterate_cleanup_work);
|
||||||
|
w->ifindex = ifindex;
|
||||||
|
w->net = net;
|
||||||
|
w->iter = iter;
|
||||||
|
if (addr)
|
||||||
|
w->addr = *addr;
|
||||||
|
schedule_work(&w->work);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
module_put(THIS_MODULE);
|
||||||
|
err_module:
|
||||||
|
put_net(net);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int device_cmp(struct nf_conn *i, void *arg)
|
||||||
{
|
{
|
||||||
const struct nf_conn_nat *nat = nfct_nat(i);
|
const struct nf_conn_nat *nat = nfct_nat(i);
|
||||||
|
const struct masq_dev_work *w = arg;
|
||||||
|
|
||||||
if (!nat)
|
if (!nat)
|
||||||
return 0;
|
return 0;
|
||||||
return nat->masq_index == (int)(long)ifindex;
|
return nat->masq_index == w->ifindex;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int masq_device_event(struct notifier_block *this,
|
static int masq_device_event(struct notifier_block *this,
|
||||||
@@ -85,8 +154,8 @@ static int masq_device_event(struct notifier_block *this,
|
|||||||
* and forget them.
|
* and forget them.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
nf_ct_iterate_cleanup_net(net, device_cmp,
|
nf_nat_masq_schedule(net, NULL, dev->ifindex,
|
||||||
(void *)(long)dev->ifindex, 0, 0);
|
device_cmp, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
@@ -94,35 +163,45 @@ static int masq_device_event(struct notifier_block *this,
|
|||||||
|
|
||||||
static int inet_cmp(struct nf_conn *ct, void *ptr)
|
static int inet_cmp(struct nf_conn *ct, void *ptr)
|
||||||
{
|
{
|
||||||
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
|
|
||||||
struct net_device *dev = ifa->ifa_dev->dev;
|
|
||||||
struct nf_conntrack_tuple *tuple;
|
struct nf_conntrack_tuple *tuple;
|
||||||
|
struct masq_dev_work *w = ptr;
|
||||||
|
|
||||||
if (!device_cmp(ct, (void *)(long)dev->ifindex))
|
if (!device_cmp(ct, ptr))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
|
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
|
||||||
|
|
||||||
return ifa->ifa_address == tuple->dst.u3.ip;
|
return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int masq_inet_event(struct notifier_block *this,
|
static int masq_inet_event(struct notifier_block *this,
|
||||||
unsigned long event,
|
unsigned long event,
|
||||||
void *ptr)
|
void *ptr)
|
||||||
{
|
{
|
||||||
struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
|
const struct in_ifaddr *ifa = ptr;
|
||||||
struct net *net = dev_net(idev->dev);
|
const struct in_device *idev;
|
||||||
|
const struct net_device *dev;
|
||||||
|
union nf_inet_addr addr;
|
||||||
|
|
||||||
|
if (event != NETDEV_DOWN)
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
/* The masq_dev_notifier will catch the case of the device going
|
/* The masq_dev_notifier will catch the case of the device going
|
||||||
* down. So if the inetdev is dead and being destroyed we have
|
* down. So if the inetdev is dead and being destroyed we have
|
||||||
* no work to do. Otherwise this is an individual address removal
|
* no work to do. Otherwise this is an individual address removal
|
||||||
* and we have to perform the flush.
|
* and we have to perform the flush.
|
||||||
*/
|
*/
|
||||||
|
idev = ifa->ifa_dev;
|
||||||
if (idev->dead)
|
if (idev->dead)
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
if (event == NETDEV_DOWN)
|
memset(&addr, 0, sizeof(addr));
|
||||||
nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
|
|
||||||
|
addr.ip = ifa->ifa_address;
|
||||||
|
|
||||||
|
dev = idev->dev;
|
||||||
|
nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,
|
||||||
|
inet_cmp, GFP_KERNEL);
|
||||||
|
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
@@ -136,8 +215,6 @@ static struct notifier_block masq_inet_notifier = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
static atomic_t v6_worker_count __read_mostly;
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
|
nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
|
||||||
const struct in6_addr *daddr, unsigned int srcprefs,
|
const struct in6_addr *daddr, unsigned int srcprefs,
|
||||||
@@ -187,40 +264,6 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
|
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
|
||||||
|
|
||||||
struct masq_dev_work {
|
|
||||||
struct work_struct work;
|
|
||||||
struct net *net;
|
|
||||||
struct in6_addr addr;
|
|
||||||
int ifindex;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int inet6_cmp(struct nf_conn *ct, void *work)
|
|
||||||
{
|
|
||||||
struct masq_dev_work *w = (struct masq_dev_work *)work;
|
|
||||||
struct nf_conntrack_tuple *tuple;
|
|
||||||
|
|
||||||
if (!device_cmp(ct, (void *)(long)w->ifindex))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
|
|
||||||
|
|
||||||
return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void iterate_cleanup_work(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct masq_dev_work *w;
|
|
||||||
|
|
||||||
w = container_of(work, struct masq_dev_work, work);
|
|
||||||
|
|
||||||
nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);
|
|
||||||
|
|
||||||
put_net(w->net);
|
|
||||||
kfree(w);
|
|
||||||
atomic_dec(&v6_worker_count);
|
|
||||||
module_put(THIS_MODULE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
|
/* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
|
||||||
*
|
*
|
||||||
* Defer it to the system workqueue.
|
* Defer it to the system workqueue.
|
||||||
@@ -233,36 +276,19 @@ static int masq_inet6_event(struct notifier_block *this,
|
|||||||
{
|
{
|
||||||
struct inet6_ifaddr *ifa = ptr;
|
struct inet6_ifaddr *ifa = ptr;
|
||||||
const struct net_device *dev;
|
const struct net_device *dev;
|
||||||
struct masq_dev_work *w;
|
union nf_inet_addr addr;
|
||||||
struct net *net;
|
|
||||||
|
|
||||||
if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)
|
if (event != NETDEV_DOWN)
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
dev = ifa->idev->dev;
|
dev = ifa->idev->dev;
|
||||||
net = maybe_get_net(dev_net(dev));
|
|
||||||
if (!net)
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
|
|
||||||
if (!try_module_get(THIS_MODULE))
|
memset(&addr, 0, sizeof(addr));
|
||||||
goto err_module;
|
|
||||||
|
|
||||||
w = kmalloc(sizeof(*w), GFP_ATOMIC);
|
addr.in6 = ifa->addr;
|
||||||
if (w) {
|
|
||||||
atomic_inc(&v6_worker_count);
|
|
||||||
|
|
||||||
INIT_WORK(&w->work, iterate_cleanup_work);
|
nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,
|
||||||
w->ifindex = dev->ifindex;
|
GFP_ATOMIC);
|
||||||
w->net = net;
|
|
||||||
w->addr = ifa->addr;
|
|
||||||
schedule_work(&w->work);
|
|
||||||
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
}
|
|
||||||
|
|
||||||
module_put(THIS_MODULE);
|
|
||||||
err_module:
|
|
||||||
put_net(net);
|
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -513,6 +513,12 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
|
|||||||
return stab;
|
return stab;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (s->size_log > STAB_SIZE_LOG_MAX ||
|
||||||
|
s->cell_log > STAB_SIZE_LOG_MAX) {
|
||||||
|
NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
|
stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
|
||||||
if (!stab)
|
if (!stab)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
@@ -847,6 +847,11 @@ static int create_sdw_dailink(struct device *dev, int *be_index,
|
|||||||
cpus + *cpu_id, cpu_dai_num,
|
cpus + *cpu_id, cpu_dai_num,
|
||||||
codecs, codec_num,
|
codecs, codec_num,
|
||||||
NULL, &sdw_ops);
|
NULL, &sdw_ops);
|
||||||
|
/*
|
||||||
|
* SoundWire DAILINKs use 'stream' functions and Bank Switch operations
|
||||||
|
* based on wait_for_completion(), tag them as 'nonatomic'.
|
||||||
|
*/
|
||||||
|
dai_links[*be_index].nonatomic = true;
|
||||||
|
|
||||||
ret = set_codec_init_func(link, dai_links + (*be_index)++,
|
ret = set_codec_init_func(link, dai_links + (*be_index)++,
|
||||||
playback, group_id);
|
playback, group_id);
|
||||||
|
@@ -354,7 +354,6 @@ int snd_sof_device_remove(struct device *dev)
|
|||||||
dev_warn(dev, "error: %d failed to prepare DSP for device removal",
|
dev_warn(dev, "error: %d failed to prepare DSP for device removal",
|
||||||
ret);
|
ret);
|
||||||
|
|
||||||
snd_sof_fw_unload(sdev);
|
|
||||||
snd_sof_ipc_free(sdev);
|
snd_sof_ipc_free(sdev);
|
||||||
snd_sof_free_debug(sdev);
|
snd_sof_free_debug(sdev);
|
||||||
snd_sof_free_trace(sdev);
|
snd_sof_free_trace(sdev);
|
||||||
@@ -377,8 +376,7 @@ int snd_sof_device_remove(struct device *dev)
|
|||||||
snd_sof_remove(sdev);
|
snd_sof_remove(sdev);
|
||||||
|
|
||||||
/* release firmware */
|
/* release firmware */
|
||||||
release_firmware(pdata->fw);
|
snd_sof_fw_unload(sdev);
|
||||||
pdata->fw = NULL;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -830,5 +830,7 @@ EXPORT_SYMBOL(snd_sof_run_firmware);
|
|||||||
void snd_sof_fw_unload(struct snd_sof_dev *sdev)
|
void snd_sof_fw_unload(struct snd_sof_dev *sdev)
|
||||||
{
|
{
|
||||||
/* TODO: support module unloading at runtime */
|
/* TODO: support module unloading at runtime */
|
||||||
|
release_firmware(sdev->pdata->fw);
|
||||||
|
sdev->pdata->fw = NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(snd_sof_fw_unload);
|
EXPORT_SYMBOL(snd_sof_fw_unload);
|
||||||
|
Reference in New Issue
Block a user