Merge 5.10.61 into android12-5.10-lts

Changes in 5.10.61
	ath: Use safer key clearing with key cache entries
	ath9k: Clear key cache explicitly on disabling hardware
	ath: Export ath_hw_keysetmac()
	ath: Modify ath_key_delete() to not need full key entry
	ath9k: Postpone key cache entry deletion for TXQ frames reference it
	mtd: cfi_cmdset_0002: fix crash when erasing/writing AMD cards
	media: zr364xx: propagate errors from zr364xx_start_readpipe()
	media: zr364xx: fix memory leaks in probe()
	media: drivers/media/usb: fix memory leak in zr364xx_probe
	KVM: x86: Factor out x86 instruction emulation with decoding
	KVM: X86: Fix warning caused by stale emulation context
	USB: core: Avoid WARNings for 0-length descriptor requests
	USB: core: Fix incorrect pipe calculation in do_proc_control()
	dmaengine: xilinx_dma: Fix read-after-free bug when terminating transfers
	dmaengine: usb-dmac: Fix PM reference leak in usb_dmac_probe()
	spi: spi-mux: Add module info needed for autoloading
	net: xfrm: Fix end of loop tests for list_for_each_entry
	ARM: dts: am43x-epos-evm: Reduce i2c0 bus speed for tps65218
	dmaengine: of-dma: router_xlate to return -EPROBE_DEFER if controller is not yet available
	scsi: pm80xx: Fix TMF task completion race condition
	scsi: megaraid_mm: Fix end of loop tests for list_for_each_entry()
	scsi: scsi_dh_rdac: Avoid crash during rdac_bus_attach()
	scsi: core: Avoid printing an error if target_alloc() returns -ENXIO
	scsi: core: Fix capacity set to zero after offlinining device
	drm/amdgpu: fix the doorbell missing when in CGPG issue for renoir.
	qede: fix crash in rmmod qede while automatic debug collection
	ARM: dts: nomadik: Fix up interrupt controller node names
	net: usb: pegasus: Check the return value of get_geristers() and friends;
	net: usb: lan78xx: don't modify phy_device state concurrently
	drm/amd/display: Fix Dynamic bpp issue with 8K30 with Navi 1X
	drm/amd/display: workaround for hard hang on HPD on native DP
	Bluetooth: hidp: use correct wait queue when removing ctrl_wait
	arm64: dts: qcom: c630: fix correct powerdown pin for WSA881x
	arm64: dts: qcom: msm8992-bullhead: Remove PSCI
	iommu: Check if group is NULL before remove device
	cpufreq: armada-37xx: forbid cpufreq for 1.2 GHz variant
	dccp: add do-while-0 stubs for dccp_pr_debug macros
	virtio: Protect vqs list access
	vhost-vdpa: Fix integer overflow in vhost_vdpa_process_iotlb_update()
	bus: ti-sysc: Fix error handling for sysc_check_active_timer()
	vhost: Fix the calculation in vhost_overflow()
	vdpa/mlx5: Avoid destroying MR on empty iotlb
	soc / drm: mediatek: Move DDP component defines into mtk-mmsys.h
	drm/mediatek: Fix aal size config
	drm/mediatek: Add AAL output size configuration
	bpf: Clear zext_dst of dead insns
	bnxt: don't lock the tx queue from napi poll
	bnxt: disable napi before canceling DIM
	bnxt: make sure xmit_more + errors does not miss doorbells
	bnxt: count Tx drops
	net: 6pack: fix slab-out-of-bounds in decode_data
	ptp_pch: Restore dependency on PCI
	bnxt_en: Disable aRFS if running on 212 firmware
	bnxt_en: Add missing DMA memory barriers
	vrf: Reset skb conntrack connection on VRF rcv
	virtio-net: support XDP when not more queues
	virtio-net: use NETIF_F_GRO_HW instead of NETIF_F_LRO
	net: qlcnic: add missed unlock in qlcnic_83xx_flash_read32
	ixgbe, xsk: clean up the resources in ixgbe_xsk_pool_enable error path
	sch_cake: fix srchost/dsthost hashing mode
	net: mdio-mux: Don't ignore memory allocation errors
	net: mdio-mux: Handle -EPROBE_DEFER correctly
	ovs: clear skb->tstamp in forwarding path
	iommu/vt-d: Consolidate duplicate cache invaliation code
	iommu/vt-d: Fix incomplete cache flush in intel_pasid_tear_down_entry()
	r8152: fix writing USB_BP2_EN
	i40e: Fix ATR queue selection
	iavf: Fix ping is lost after untrusted VF had tried to change MAC
	Revert "flow_offload: action should not be NULL when it is referenced"
	mmc: dw_mmc: Fix hang on data CRC error
	mmc: mmci: stm32: Check when the voltage switch procedure should be done
	mmc: sdhci-msm: Update the software timeout value for sdhc
	clk: imx6q: fix uart earlycon unwork
	clk: qcom: gdsc: Ensure regulator init state matches GDSC state
	ALSA: hda - fix the 'Capture Switch' value change notifications
	tracing / histogram: Fix NULL pointer dereference on strcmp() on NULL event name
	slimbus: messaging: start transaction ids from 1 instead of zero
	slimbus: messaging: check for valid transaction id
	slimbus: ngd: reset dma setup during runtime pm
	ipack: tpci200: fix many double free issues in tpci200_pci_probe
	ipack: tpci200: fix memory leak in the tpci200_register
	ALSA: hda/realtek: Enable 4-speaker output for Dell XPS 15 9510 laptop
	mmc: sdhci-iproc: Cap min clock frequency on BCM2711
	mmc: sdhci-iproc: Set SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN on BCM2711
	btrfs: prevent rename2 from exchanging a subvol with a directory from different parents
	ALSA: hda/via: Apply runtime PM workaround for ASUS B23E
	s390/pci: fix use after free of zpci_dev
	PCI: Increase D3 delay for AMD Renoir/Cezanne XHCI
	ALSA: hda/realtek: Limit mic boost on HP ProBook 445 G8
	ASoC: intel: atom: Fix breakage for PCM buffer address setup
	mm: memcontrol: fix occasional OOMs due to proportional memory.low reclaim
	fs: warn about impending deprecation of mandatory locks
	io_uring: fix xa_alloc_cycle() error return value check
	io_uring: only assign io_uring_enter() SQPOLL error in actual error case
	Linux 5.10.61

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I5b6e2a66b03d1cb01c8310b83dcc2a119c1bd6b3
This commit is contained in:
Greg Kroah-Hartman
2021-08-27 20:51:37 +02:00
92 changed files with 965 additions and 448 deletions

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 60
SUBLEVEL = 61
EXTRAVERSION =
NAME = Dare mighty things

View File

@@ -582,7 +582,7 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
clock-frequency = <400000>;
clock-frequency = <100000>;
tps65218: tps65218@24 {
reg = <0x24>;

View File

@@ -755,14 +755,14 @@
status = "disabled";
};
vica: intc@10140000 {
vica: interrupt-controller@10140000 {
compatible = "arm,versatile-vic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x10140000 0x20>;
};
vicb: intc@10140020 {
vicb: interrupt-controller@10140020 {
compatible = "arm,versatile-vic";
interrupt-controller;
#interrupt-cells = <1>;

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015, LGE Inc. All rights reserved.
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
* Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
*/
/dts-v1/;
@@ -15,6 +16,9 @@
qcom,board-id = <0xb64 0>;
qcom,pmic-id = <0x10009 0x1000A 0x0 0x0>;
/* Bullhead firmware doesn't support PSCI */
/delete-node/ psci;
aliases {
serial0 = &blsp1_uart2;
};

View File

@@ -564,7 +564,7 @@
left_spkr: wsa8810-left{
compatible = "sdw10217211000";
reg = <0 3>;
powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
#thermal-sensor-cells = <0>;
sound-name-prefix = "SpkrLeft";
#sound-dai-cells = <0>;
@@ -572,7 +572,7 @@
right_spkr: wsa8810-right{
compatible = "sdw10217211000";
powerdown-gpios = <&wcdgpio 3 GPIO_ACTIVE_HIGH>;
powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
reg = <0 4>;
#thermal-sensor-cells = <0>;
sound-name-prefix = "SpkrRight";

View File

@@ -558,9 +558,12 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
int pcibios_add_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
struct resource *res;
int i;
/* The pdev has a reference to the zdev via its bus */
zpci_zdev_get(zdev);
if (pdev->is_physfn)
pdev->no_vf_scan = 1;
@@ -580,7 +583,10 @@ int pcibios_add_device(struct pci_dev *pdev)
void pcibios_release_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
zpci_unmap_resources(pdev);
zpci_zdev_put(zdev);
}
int pcibios_enable_device(struct pci_dev *pdev, int mask)

View File

@@ -16,6 +16,11 @@ static inline void zpci_zdev_put(struct zpci_dev *zdev)
kref_put(&zdev->kref, zpci_release_device);
}
static inline void zpci_zdev_get(struct zpci_dev *zdev)
{
kref_get(&zdev->kref);
}
int zpci_alloc_domain(int domain);
void zpci_free_domain(int domain);
int zpci_setup_bus_resources(struct zpci_dev *zdev,

View File

@@ -7023,6 +7023,11 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->exception.vector = -1;
ctxt->perm_ok = false;
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
}
@@ -7338,6 +7343,37 @@ static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
return false;
}
/*
* Decode to be emulated instruction. Return EMULATION_OK if success.
*/
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
void *insn, int insn_len)
{
int r = EMULATION_OK;
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
init_emulate_ctxt(vcpu);
/*
* We will reenter on the same instruction since we do not set
* complete_userspace_io. This does not handle watchpoints yet,
* those would be handled in the emulate_ops.
*/
if (!(emulation_type & EMULTYPE_SKIP) &&
kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
return r;
}
EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len)
{
@@ -7357,32 +7393,12 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
*/
write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
vcpu->arch.write_fault_to_shadow_pgtable = false;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
kvm_clear_exception_queue(vcpu);
/*
* We will reenter on the same instruction since
* we do not set complete_userspace_io. This does not
* handle watchpoints yet, those would be handled in
* the emulate_ops.
*/
if (!(emulation_type & EMULTYPE_SKIP) &&
kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->exception.vector = -1;
ctxt->perm_ok = false;
ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
r = x86_decode_emulated_instruction(vcpu, emulation_type,
insn, insn_len);
if (r != EMULATION_OK) {
if ((emulation_type & EMULTYPE_TRAP_UD) ||
(emulation_type & EMULTYPE_TRAP_UD_FORCED)) {

View File

@@ -272,6 +272,8 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
int page_num);
bool kvm_vector_hashing_enabled(void);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
void *insn, int insn_len);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len);
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);

View File

@@ -3061,8 +3061,10 @@ static int sysc_probe(struct platform_device *pdev)
return error;
error = sysc_check_active_timer(ddata);
if (error == -EBUSY)
if (error == -ENXIO)
ddata->reserved = true;
else if (error)
return error;
error = sysc_get_clocks(ddata);
if (error)

View File

@@ -974,6 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
}
imx_register_uart_clocks(1);
imx_register_uart_clocks(2);
}
CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);

View File

@@ -357,27 +357,43 @@ static int gdsc_init(struct gdsc *sc)
if (on < 0)
return on;
if (on) {
/* The regulator must be on, sync the kernel state */
if (sc->rsupply) {
ret = regulator_enable(sc->rsupply);
if (ret < 0)
return ret;
}
/*
* Votable GDSCs can be ON due to Vote from other masters.
* If a Votable GDSC is ON, make sure we have a Vote.
*/
if ((sc->flags & VOTABLE) && on)
gdsc_enable(&sc->pd);
if (sc->flags & VOTABLE) {
ret = regmap_update_bits(sc->regmap, sc->gdscr,
SW_COLLAPSE_MASK, val);
if (ret)
return ret;
}
/* Turn on HW trigger mode if supported */
if (sc->flags & HW_CTRL) {
ret = gdsc_hwctrl(sc, true);
if (ret < 0)
return ret;
}
/*
* Make sure the retain bit is set if the GDSC is already on, otherwise
* we end up turning off the GDSC and destroying all the register
* contents that we thought we were saving.
* Make sure the retain bit is set if the GDSC is already on,
* otherwise we end up turning off the GDSC and destroying all
* the register contents that we thought we were saving.
*/
if ((sc->flags & RETAIN_FF_ENABLE) && on)
if (sc->flags & RETAIN_FF_ENABLE)
gdsc_retain_ff_on(sc);
} else if (sc->flags & ALWAYS_ON) {
/* If ALWAYS_ON GDSCs are not ON, turn them ON */
if (sc->flags & ALWAYS_ON) {
if (!on)
gdsc_enable(&sc->pd);
on = true;
sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
}
if (on || (sc->pwrsts & PWRSTS_RET))
@@ -385,6 +401,8 @@ static int gdsc_init(struct gdsc *sc)
else
gdsc_clear_mem_on(sc);
if (sc->flags & ALWAYS_ON)
sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
if (!sc->pd.power_off)
sc->pd.power_off = gdsc_disable;
if (!sc->pd.power_on)

View File

@@ -102,7 +102,11 @@ struct armada_37xx_dvfs {
};
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
{.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
/*
* The cpufreq scaling for 1.2 GHz variant of the SOC is currently
* unstable because we do not know how to configure it properly.
*/
/* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },

View File

@@ -67,8 +67,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
return NULL;
ofdma_target = of_dma_find_controller(&dma_spec_target);
if (!ofdma_target)
return NULL;
if (!ofdma_target) {
ofdma->dma_router->route_free(ofdma->dma_router->dev,
route_data);
chan = ERR_PTR(-EPROBE_DEFER);
goto err;
}
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
if (IS_ERR_OR_NULL(chan)) {
@@ -79,6 +83,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
chan->route_data = route_data;
}
err:
/*
* Need to put the node back since the ofdma->of_dma_route_allocate
* has taken it for generating the new, translated dma_spec

View File

@@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
error:
of_dma_controller_free(pdev->dev.of_node);
pm_runtime_put(&pdev->dev);
error_pm:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}

View File

@@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor {
* @genlock: Support genlock mode
* @err: Channel has errors
* @idle: Check for channel idle
* @terminating: Check for channel being synchronized by user
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
@@ -431,6 +432,7 @@ struct xilinx_dma_chan {
bool genlock;
bool err;
bool idle;
bool terminating;
struct tasklet_struct tasklet;
struct xilinx_vdma_config config;
bool flush_on_fsync;
@@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc);
/*
* While we ran a callback the user called a terminate function,
* which takes care of cleaning up any remaining descriptors
*/
if (chan->terminating)
break;
}
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
if (desc->cyclic)
chan->cyclic = true;
chan->terminating = false;
spin_unlock_irqrestore(&chan->lock, flags);
return cookie;
@@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */
chan->terminating = true;
xilinx_dma_free_descriptors(chan);
chan->idle = true;

View File

@@ -1271,6 +1271,16 @@ static bool is_raven_kicker(struct amdgpu_device *adev)
return false;
}
static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_RENOIR) &&
(adev->gfx.me_fw_version >= 0x000000a5) &&
(adev->gfx.me_feature_version >= 52))
return true;
else
return false;
}
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@@ -3619,6 +3629,15 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
(adev->doorbell_index.kiq * 2) << 2);
/* If GC has entered CGPG, ringing doorbell > first page
* doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
* workaround this issue. And this change has to align with firmware
* update.
*/
if (check_if_enlarge_doorbell_range(adev))
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(adev->doorbell.size - 4));
else
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
(adev->doorbell_index.userqueue_end * 2) << 2);
}

View File

@@ -66,9 +66,11 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
/* Extend the WA to DP for Linux*/
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
tmds_present = true;
}

View File

@@ -357,7 +357,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
MASTER_UPDATE_LOCK_DB_X,
h_blank_start - 200 - 1,
(h_blank_start - 200 - 1) / optc1->opp_count,
MASTER_UPDATE_LOCK_DB_Y,
v_blank_start - 1);
}

View File

@@ -34,6 +34,7 @@
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
#define DISP_AAL_OUTPUT_SIZE 0x04d8
#define DISP_CCORR_EN 0x0000
#define CCORR_EN BIT(0)
@@ -180,7 +181,8 @@ static void mtk_aal_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
{
mtk_ddp_write(cmdq_pkt, h << 16 | w, comp, DISP_AAL_SIZE);
mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_AAL_SIZE);
mtk_ddp_write(cmdq_pkt, w << 16 | h, comp, DISP_AAL_OUTPUT_SIZE);
}
static void mtk_aal_start(struct mtk_ddp_comp *comp)

View File

@@ -7,6 +7,7 @@
#define MTK_DRM_DDP_COMP_H
#include <linux/io.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
struct device;
struct device_node;
@@ -35,39 +36,6 @@ enum mtk_ddp_comp_type {
MTK_DDP_COMP_TYPE_MAX,
};
enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_AAL1,
DDP_COMPONENT_BLS,
DDP_COMPONENT_CCORR,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_DITHER,
DDP_COMPONENT_DPI0,
DDP_COMPONENT_DPI1,
DDP_COMPONENT_DSI0,
DDP_COMPONENT_DSI1,
DDP_COMPONENT_DSI2,
DDP_COMPONENT_DSI3,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_OD0,
DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
DDP_COMPONENT_OVL_2L0,
DDP_COMPONENT_OVL_2L1,
DDP_COMPONENT_OVL1,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
DDP_COMPONENT_PWM2,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_RDMA2,
DDP_COMPONENT_UFOE,
DDP_COMPONENT_WDMA0,
DDP_COMPONENT_WDMA1,
DDP_COMPONENT_ID_MAX,
};
struct mtk_ddp_comp;
struct cmdq_pkt;
struct mtk_ddp_comp_funcs {

View File

@@ -466,20 +466,6 @@ pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
qi_submit_sync(iommu, &desc, 1, 0);
}
static void
iotlb_invalidation_with_pasid(struct intel_iommu *iommu, u16 did, u32 pasid)
{
struct qi_desc desc;
desc.qw0 = QI_EIOTLB_PASID(pasid) | QI_EIOTLB_DID(did) |
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
desc.qw1 = 0;
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(iommu, &desc, 1, 0);
}
static void
devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
struct device *dev, u32 pasid)
@@ -511,20 +497,26 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
u32 pasid, bool fault_ignore)
{
struct pasid_entry *pte;
u16 did;
u16 did, pgtt;
pte = intel_pasid_get_entry(dev, pasid);
if (WARN_ON(!pte))
return;
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
intel_pasid_clear_entry(dev, pasid, fault_ignore);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
iotlb_invalidation_with_pasid(iommu, did, pasid);
if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY)
qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
else
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
/* Device IOTLB doesn't need to be flushed in caching mode. */
if (!cap_caching_mode(iommu->cap))
@@ -540,7 +532,7 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
if (cap_caching_mode(iommu->cap)) {
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
iotlb_invalidation_with_pasid(iommu, did, pasid);
qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
} else {
iommu_flush_write_buffer(iommu);
}

View File

@@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte)
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
/* Get PGTT field of a PASID table entry */
static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
{
return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
}
extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
void intel_pasid_free_id(u32 pasid);

View File

@@ -123,53 +123,16 @@ static void __flush_svm_range_dev(struct intel_svm *svm,
unsigned long address,
unsigned long pages, int ih)
{
struct qi_desc desc;
struct device_domain_info *info = get_domain_info(sdev->dev);
if (pages == -1) {
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
QI_EIOTLB_DID(sdev->did) |
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = 0;
} else {
int mask = ilog2(__roundup_pow_of_two(pages));
if (WARN_ON(!pages))
return;
desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
QI_EIOTLB_DID(sdev->did) |
QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
QI_EIOTLB_TYPE;
desc.qw1 = QI_EIOTLB_ADDR(address) |
QI_EIOTLB_IH(ih) |
QI_EIOTLB_AM(mask);
}
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(sdev->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
QI_DEV_EIOTLB_SID(sdev->sid) |
QI_DEV_EIOTLB_QDEP(sdev->qdep) |
QI_DEIOTLB_TYPE;
if (pages == -1) {
desc.qw1 = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) |
QI_DEV_EIOTLB_SIZE;
} else if (pages > 1) {
/* The least significant zero bit indicates the size. So,
* for example, an "address" value of 0x12345f000 will
* flush from 0x123440000 to 0x12347ffff (256KiB). */
unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
unsigned long mask = __rounddown_pow_of_two(address ^ last);
desc.qw1 = QI_DEV_EIOTLB_ADDR((address & ~mask) |
(mask - 1)) | QI_DEV_EIOTLB_SIZE;
} else {
desc.qw1 = QI_DEV_EIOTLB_ADDR(address);
}
desc.qw2 = 0;
desc.qw3 = 0;
qi_submit_sync(sdev->iommu, &desc, 1, 0);
}
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
if (info->ats_enabled)
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
svm->pasid, sdev->qdep, address,
order_base_2(pages));
}
static void intel_flush_svm_range_dev(struct intel_svm *svm,

View File

@@ -880,6 +880,9 @@ void iommu_group_remove_device(struct device *dev)
struct iommu_group *group = dev->iommu_group;
struct group_device *tmp_device, *device = NULL;
if (!group)
return;
dev_info(dev, "Removing from iommu group %d\n", group->id);
/* Pre-notify listeners that a device is being removed. */

View File

@@ -91,16 +91,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200)
free_irq(tpci200->info->pdev->irq, (void *) tpci200);
pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
pci_disable_device(tpci200->info->pdev);
pci_dev_put(tpci200->info->pdev);
}
static void tpci200_enable_irq(struct tpci200_board *tpci200,
@@ -259,7 +256,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_disable_pci;
goto err_disable_device;
}
/* Request IO ID INT space (Bar 3) */
@@ -271,7 +268,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_release_ip_space;
goto err_ip_interface_bar;
}
/* Request MEM8 space (Bar 5) */
@@ -282,7 +279,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_release_ioid_int_space;
goto err_io_id_int_spaces_bar;
}
/* Request MEM16 space (Bar 4) */
@@ -293,7 +290,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_release_mem8_space;
goto err_mem8_space_bar;
}
/* Map internal tpci200 driver user space */
@@ -307,7 +304,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
res = -ENOMEM;
goto out_release_mem8_space;
goto err_mem16_space_bar;
}
/* Initialize lock that protects interface_regs */
@@ -346,18 +343,22 @@ static int tpci200_register(struct tpci200_board *tpci200)
"(bn 0x%X, sn 0x%X) unable to register IRQ !",
tpci200->info->pdev->bus->number,
tpci200->info->pdev->devfn);
goto out_release_ioid_int_space;
goto err_interface_regs;
}
return 0;
out_release_mem8_space:
err_interface_regs:
pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
err_mem16_space_bar:
pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
err_mem8_space_bar:
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
out_release_ioid_int_space:
err_io_id_int_spaces_bar:
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
out_release_ip_space:
err_ip_interface_bar:
pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
out_disable_pci:
err_disable_device:
pci_disable_device(tpci200->info->pdev);
return res;
}
@@ -529,7 +530,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
if (!tpci200->info) {
ret = -ENOMEM;
goto out_err_info;
goto err_tpci200;
}
pci_dev_get(pdev);
@@ -540,7 +541,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
ret = -EBUSY;
goto out_err_pci_request;
goto err_tpci200_info;
}
tpci200->info->cfg_regs = ioremap(
pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
@@ -548,7 +549,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (!tpci200->info->cfg_regs) {
dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
ret = -EFAULT;
goto out_err_ioremap;
goto err_request_region;
}
/* Disable byte swapping for 16 bit IP module access. This will ensure
@@ -571,7 +572,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "error during tpci200 install\n");
ret = -ENODEV;
goto out_err_install;
goto err_cfg_regs;
}
/* Register the carrier in the industry pack bus driver */
@@ -583,7 +584,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev,
"error registering the carrier on ipack driver\n");
ret = -EFAULT;
goto out_err_bus_register;
goto err_tpci200_install;
}
/* save the bus number given by ipack to logging purpose */
@@ -594,19 +595,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
tpci200_create_device(tpci200, i);
return 0;
out_err_bus_register:
err_tpci200_install:
tpci200_uninstall(tpci200);
/* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
tpci200->info->cfg_regs = NULL;
out_err_install:
if (tpci200->info->cfg_regs)
iounmap(tpci200->info->cfg_regs);
out_err_ioremap:
err_cfg_regs:
pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
err_request_region:
pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
out_err_pci_request:
pci_dev_put(pdev);
err_tpci200_info:
kfree(tpci200->info);
out_err_info:
pci_dev_put(pdev);
err_tpci200:
kfree(tpci200);
return ret;
}
@@ -616,6 +614,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
ipack_bus_unregister(tpci200->info->ipack_bus);
tpci200_uninstall(tpci200);
pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
pci_dev_put(tpci200->info->pdev);
kfree(tpci200->info);
kfree(tpci200);
}

View File

@@ -1184,15 +1184,11 @@ out:
return err;
}
static void zr364xx_release(struct v4l2_device *v4l2_dev)
static void zr364xx_board_uninit(struct zr364xx_camera *cam)
{
struct zr364xx_camera *cam =
container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
unsigned long i;
v4l2_device_unregister(&cam->v4l2_dev);
videobuf_mmap_free(&cam->vb_vidq);
zr364xx_stop_readpipe(cam);
/* release sys buffers */
for (i = 0; i < FRAMES; i++) {
@@ -1203,9 +1199,19 @@ static void zr364xx_release(struct v4l2_device *v4l2_dev)
cam->buffer.frame[i].lpvbits = NULL;
}
v4l2_ctrl_handler_free(&cam->ctrl_handler);
/* release transfer buffer */
kfree(cam->pipe->transfer_buffer);
}
static void zr364xx_release(struct v4l2_device *v4l2_dev)
{
struct zr364xx_camera *cam =
container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
videobuf_mmap_free(&cam->vb_vidq);
v4l2_ctrl_handler_free(&cam->ctrl_handler);
zr364xx_board_uninit(cam);
v4l2_device_unregister(&cam->v4l2_dev);
kfree(cam);
}
@@ -1328,6 +1334,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
{
struct zr364xx_pipeinfo *pipe = cam->pipe;
unsigned long i;
int err;
DBG("board init: %p\n", cam);
memset(pipe, 0, sizeof(*pipe));
@@ -1360,9 +1367,8 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
if (i == 0) {
printk(KERN_INFO KBUILD_MODNAME ": out of memory. Aborting\n");
kfree(cam->pipe->transfer_buffer);
cam->pipe->transfer_buffer = NULL;
return -ENOMEM;
err = -ENOMEM;
goto err_free;
} else
cam->buffer.dwFrames = i;
@@ -1377,9 +1383,20 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
/*** end create system buffers ***/
/* start read pipe */
zr364xx_start_readpipe(cam);
err = zr364xx_start_readpipe(cam);
if (err)
goto err_free_frames;
DBG(": board initialized\n");
return 0;
err_free_frames:
for (i = 0; i < FRAMES; i++)
vfree(cam->buffer.frame[i].lpvbits);
err_free:
kfree(cam->pipe->transfer_buffer);
cam->pipe->transfer_buffer = NULL;
return err;
}
static int zr364xx_probe(struct usb_interface *intf,
@@ -1404,12 +1421,10 @@ static int zr364xx_probe(struct usb_interface *intf,
if (!cam)
return -ENOMEM;
cam->v4l2_dev.release = zr364xx_release;
err = v4l2_device_register(&intf->dev, &cam->v4l2_dev);
if (err < 0) {
dev_err(&udev->dev, "couldn't register v4l2_device\n");
kfree(cam);
return err;
goto free_cam;
}
hdl = &cam->ctrl_handler;
v4l2_ctrl_handler_init(hdl, 1);
@@ -1418,7 +1433,7 @@ static int zr364xx_probe(struct usb_interface *intf,
if (hdl->error) {
err = hdl->error;
dev_err(&udev->dev, "couldn't register control\n");
goto fail;
goto free_hdlr_and_unreg_dev;
}
/* save the init method used by this camera */
cam->method = id->driver_info;
@@ -1491,7 +1506,7 @@ static int zr364xx_probe(struct usb_interface *intf,
if (!cam->read_endpoint) {
err = -ENOMEM;
dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
goto fail;
goto free_hdlr_and_unreg_dev;
}
/* v4l */
@@ -1502,10 +1517,11 @@ static int zr364xx_probe(struct usb_interface *intf,
/* load zr364xx board specific */
err = zr364xx_board_init(cam);
if (!err)
if (err)
goto free_hdlr_and_unreg_dev;
err = v4l2_ctrl_handler_setup(hdl);
if (err)
goto fail;
goto board_uninit;
spin_lock_init(&cam->slock);
@@ -1520,16 +1536,20 @@ static int zr364xx_probe(struct usb_interface *intf,
err = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
if (err) {
dev_err(&udev->dev, "video_register_device failed\n");
goto fail;
goto board_uninit;
}
cam->v4l2_dev.release = zr364xx_release;
dev_info(&udev->dev, DRIVER_DESC " controlling device %s\n",
video_device_node_name(&cam->vdev));
return 0;
fail:
board_uninit:
zr364xx_board_uninit(cam);
free_hdlr_and_unreg_dev:
v4l2_ctrl_handler_free(hdl);
v4l2_device_unregister(&cam->v4l2_dev);
free_cam:
kfree(cam);
return err;
}
@@ -1576,10 +1596,19 @@ static int zr364xx_resume(struct usb_interface *intf)
if (!cam->was_streaming)
return 0;
zr364xx_start_readpipe(cam);
res = zr364xx_start_readpipe(cam);
if (res)
return res;
res = zr364xx_prepare(cam);
if (!res)
if (res)
goto err_prepare;
zr364xx_start_acquire(cam);
return 0;
err_prepare:
zr364xx_stop_readpipe(cam);
return res;
}
#endif

View File

@@ -2018,8 +2018,8 @@ static void dw_mci_tasklet_func(unsigned long priv)
continue;
}
dw_mci_stop_dma(host);
send_stop_abort(host, data);
dw_mci_stop_dma(host);
state = STATE_SENDING_STOP;
break;
}
@@ -2043,10 +2043,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}
@@ -2079,10 +2079,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
*/
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
if (!(host->data_status & (SDMMC_INT_DRTO |
SDMMC_INT_EBE)))
send_stop_abort(host, data);
dw_mci_stop_dma(host);
state = STATE_DATA_ERROR;
break;
}

View File

@@ -479,8 +479,9 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
u32 status;
int ret = 0;
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
spin_lock_irqsave(&host->lock, flags);
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 &&
host->pwr_reg & MCI_STM32_VSWITCHEN) {
mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH);
spin_unlock_irqrestore(&host->lock, flags);
@@ -492,9 +493,11 @@ static int sdmmc_post_sig_volt_switch(struct mmci_host *host,
writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC,
host->base + MMCICLEAR);
spin_lock_irqsave(&host->lock, flags);
mmci_write_pwrreg(host, host->pwr_reg &
~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH));
}
spin_unlock_irqrestore(&host->lock, flags);
return ret;
}

View File

@@ -173,6 +173,23 @@ static unsigned int sdhci_iproc_get_max_clock(struct sdhci_host *host)
return pltfm_host->clock;
}
/*
* There is a known bug on BCM2711's SDHCI core integration where the
* controller will hang when the difference between the core clock and the bus
* clock is too great. Specifically this can be reproduced under the following
* conditions:
*
* - No SD card plugged in, polling thread is running, probing cards at
* 100 kHz.
* - BCM2711's core clock configured at 500MHz or more
*
* So we set 200kHz as the minimum clock frequency available for that SoC.
*/
static unsigned int sdhci_iproc_bcm2711_get_min_clock(struct sdhci_host *host)
{
return 200000;
}
static const struct sdhci_ops sdhci_iproc_ops = {
.set_clock = sdhci_set_clock,
.get_max_clock = sdhci_iproc_get_max_clock,
@@ -271,13 +288,15 @@ static const struct sdhci_ops sdhci_iproc_bcm2711_ops = {
.set_clock = sdhci_set_clock,
.set_power = sdhci_set_power_and_bus_voltage,
.get_max_clock = sdhci_iproc_get_max_clock,
.get_min_clock = sdhci_iproc_bcm2711_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
.reset = sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
.ops = &sdhci_iproc_bcm2711_ops,
};

View File

@@ -2087,6 +2087,23 @@ static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
sdhci_cqe_disable(mmc, recovery);
}
static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
{
u32 count, start = 15;
__sdhci_set_timeout(host, cmd);
count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
/*
* Update software timeout value if its value is less than hardware data
* timeout value. Qcom SoC hardware data timeout value was calculated
* using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
*/
if (cmd && cmd->data && host->clock > 400000 &&
host->clock <= 50000000 &&
((1 << (count + start)) > (10 * host->clock)))
host->data_timeout = 22LL * NSEC_PER_SEC;
}
static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
.enable = sdhci_msm_cqe_enable,
.disable = sdhci_msm_cqe_disable,
@@ -2444,6 +2461,7 @@ static const struct sdhci_ops sdhci_msm_ops = {
.irq = sdhci_msm_cqe_irq,
.dump_vendor_regs = sdhci_msm_dump_vendor_regs,
.set_power = sdhci_set_power_noreg,
.set_timeout = sdhci_msm_set_timeout,
};
static const struct sdhci_pltfm_data sdhci_msm_pdata = {

View File

@@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
return extp->MinorVersion >= '5' &&
return extp && extp->MinorVersion >= '5' &&
(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
}

View File

@@ -69,7 +69,8 @@
#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@@ -360,6 +361,33 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
return md_dst->u.port_info.port_id;
}
static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
u16 prod)
{
bnxt_db_write(bp, &txr->tx_db, prod);
txr->kick_pending = 0;
}
static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
struct netdev_queue *txq)
{
netif_tx_stop_queue(txq);
/* netif_tx_stop_queue() must be done before checking
* tx index in bnxt_tx_avail() below, because in
* bnxt_tx_int(), we update tx index before checking for
* netif_tx_queue_stopped().
*/
smp_mb();
if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
netif_tx_wake_queue(txq);
return false;
}
return true;
}
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -378,6 +406,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK;
}
@@ -387,7 +416,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
free_size = bnxt_tx_avail(bp, txr);
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
netif_tx_stop_queue(txq);
/* We must have raced with NAPI cleanup */
if (net_ratelimit() && txr->kick_pending)
netif_warn(bp, tx_err, dev,
"bnxt: ring busy w/ flush pending!\n");
if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
return NETDEV_TX_BUSY;
}
@@ -490,21 +523,16 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
normal_tx:
if (length < BNXT_MIN_PKT_SIZE) {
pad = BNXT_MIN_PKT_SIZE - length;
if (skb_pad(skb, pad)) {
if (skb_pad(skb, pad))
/* SKB already freed. */
tx_buf->skb = NULL;
return NETDEV_TX_OK;
}
goto tx_kick_pending;
length = BNXT_MIN_PKT_SIZE;
}
mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
dev_kfree_skb_any(skb);
tx_buf->skb = NULL;
return NETDEV_TX_OK;
}
if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
goto tx_free;
dma_unmap_addr_set(tx_buf, mapping, mapping);
flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
@@ -589,24 +617,17 @@ normal_tx:
txr->tx_prod = prod;
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
bnxt_db_write(bp, &txr->tx_db, prod);
bnxt_txr_db_kick(bp, txr, prod);
else
txr->kick_pending = 1;
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
if (netdev_xmit_more() && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod);
bnxt_txr_db_kick(bp, txr, prod);
netif_tx_stop_queue(txq);
/* netif_tx_stop_queue() must be done before checking
* tx index in bnxt_tx_avail() below, because in
* bnxt_tx_int(), we update tx index before checking for
* netif_tx_queue_stopped().
*/
smp_mb();
if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
netif_tx_wake_queue(txq);
bnxt_txr_netif_try_stop_queue(bp, txr, txq);
}
return NETDEV_TX_OK;
@@ -616,7 +637,6 @@ tx_dma_error:
/* start back at beginning and unmap skb */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
tx_buf->skb = NULL;
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
prod = NEXT_TX(prod);
@@ -630,7 +650,13 @@ tx_dma_error:
PCI_DMA_TODEVICE);
}
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
atomic_long_inc(&dev->tx_dropped);
return NETDEV_TX_OK;
}
@@ -690,14 +716,9 @@ next_tx_int:
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
(bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
__netif_tx_lock(txq, smp_processor_id());
if (netif_tx_queue_stopped(txq) &&
bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
txr->dev_state != BNXT_DEV_STATE_CLOSING)
READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
}
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -1726,6 +1747,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
prod = rxr->rx_prod;
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
@@ -1929,6 +1954,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
return -EBUSY;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
cmp_type = RX_CMP_TYPE(rxcmp);
if (cmp_type == CMP_TYPE_RX_L2_CMP) {
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
@@ -2373,6 +2402,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (!TX_CMP_VALID(txcmp, raw_cons))
break;
/* The valid test of the entry must be done first before
* reading any further.
*/
dma_rmb();
if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
cp_cons = RING_CMP(tmp_raw_cons);
@@ -8849,10 +8882,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
napi_disable(&bp->bnapi[i]->napi);
if (bp->bnapi[i]->rx_ring)
cancel_work_sync(&cpr->dim.work);
napi_disable(&bp->bnapi[i]->napi);
}
}
@@ -8885,9 +8917,11 @@ void bnxt_tx_disable(struct bnxt *bp)
if (bp->tx_ring) {
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
txr->dev_state = BNXT_DEV_STATE_CLOSING;
WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
}
}
/* Make sure napi polls see @dev_state change */
synchronize_net();
/* Drop carrier first to prevent TX timeout */
netif_carrier_off(bp->dev);
/* Stop all TX queues */
@@ -8901,8 +8935,10 @@ void bnxt_tx_enable(struct bnxt *bp)
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
txr->dev_state = 0;
WRITE_ONCE(txr->dev_state, 0);
}
/* Make sure napi polls see @dev_state change */
synchronize_net();
netif_tx_wake_all_queues(bp->dev);
if (bp->link_info.link_up)
netif_carrier_on(bp->dev);
@@ -10372,6 +10408,9 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
return true;
return false;
}
/* 212 firmware is broken for aRFS */
if (BNXT_FW_MAJ(bp) == 212)
return false;
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)

View File

@@ -770,6 +770,7 @@ struct bnxt_tx_ring_info {
u16 tx_prod;
u16 tx_cons;
u16 txq_index;
u8 kick_pending;
struct bnxt_db_info tx_db;
struct tx_bd *tx_desc_ring[MAX_TX_PAGES];

View File

@@ -3556,8 +3556,7 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
/* is DCB enabled at all? */
if (vsi->tc_config.numtc == 1)
return i40e_swdcb_skb_tx_hash(netdev, skb,
netdev->real_num_tx_queues);
return netdev_pick_tx(netdev, skb, sb_dev);
prio = skb->priority;
hw = &vsi->back->hw;

View File

@@ -134,6 +134,7 @@ struct iavf_q_vector {
struct iavf_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
bool is_new_mac; /* filter is new, wait for PF decision */
bool remove; /* filter needs to be removed */
bool add; /* filter needs to be added */
};

View File

@@ -751,6 +751,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
f->is_new_mac = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;

View File

@@ -537,6 +537,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
kfree(veal);
}
/**
* iavf_mac_add_ok
* @adapter: adapter structure
*
* Submit list of filters based on PF response.
**/
static void iavf_mac_add_ok(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
f->is_new_mac = false;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
* iavf_mac_add_reject
* @adapter: adapter structure
*
* Remove filters from list based on PF response.
**/
static void iavf_mac_add_reject(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
f->remove = false;
if (f->is_new_mac) {
list_del(&f->list);
kfree(f);
}
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
* iavf_add_vlans
* @adapter: adapter structure
@@ -1295,6 +1336,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
break;
@@ -1364,10 +1406,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
switch (v_opcode) {
case VIRTCHNL_OP_ADD_ETH_ADDR: {
case VIRTCHNL_OP_ADD_ETH_ADDR:
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
}
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =

View File

@@ -52,9 +52,12 @@ static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
/* Kick start the NAPI context so that receiving will start */
err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
if (err)
if (err) {
clear_bit(qid, adapter->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
return err;
}
}
return 0;
}

View File

@@ -494,6 +494,7 @@ struct qede_fastpath {
#define QEDE_SP_HW_ERR 4
#define QEDE_SP_ARFS_CONFIG 5
#define QEDE_SP_AER 7
#define QEDE_SP_DISABLE 8
#ifdef CONFIG_RFS_ACCEL
int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,

View File

@@ -1006,6 +1006,13 @@ static void qede_sp_task(struct work_struct *work)
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
/* Disable execution of this deferred work once
* qede removal is in progress, this stop any future
* scheduling of sp_task.
*/
if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
return;
/* The locking scheme depends on the specific flag:
* In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
* ensure that ongoing flows are ended and new ones are not started.
@@ -1297,6 +1304,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
if (mode != QEDE_REMOVE_RECOVERY) {
set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);

View File

@@ -3156,8 +3156,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
ret = QLCRD32(adapter, indirect_addr, &err);
if (err == -EIO)
if (err == -EIO) {
qlcnic_83xx_unlock_flash(adapter);
return err;
}
word = ret;
*(u32 *)p_data = word;

View File

@@ -839,6 +839,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
return;
}
if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
pr_err("6pack: cooked buffer overrun, data loss\n");
sp->rx_count = 0;
return;
}
buf = sp->raw_buf;
sp->cooked_buf[sp->rx_count_cooked++] =
buf[0] | ((buf[1] << 2) & 0xc0);

View File

@@ -82,6 +82,17 @@ out:
static int parent_count;
static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
{
struct mdio_mux_child_bus *cb = pb->children;
while (cb) {
mdiobus_unregister(cb->mii_bus);
mdiobus_free(cb->mii_bus);
cb = cb->next;
}
}
int mdio_mux_init(struct device *dev,
struct device_node *mux_node,
int (*switch_fn)(int cur, int desired, void *data),
@@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev,
cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
if (!cb) {
ret_val = -ENOMEM;
continue;
goto err_loop;
}
cb->bus_number = v;
cb->parent = pb;
@@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus = mdiobus_alloc();
if (!cb->mii_bus) {
ret_val = -ENOMEM;
devm_kfree(dev, cb);
continue;
goto err_loop;
}
cb->mii_bus->priv = cb;
@@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev,
cb->mii_bus->write = mdio_mux_write;
r = of_mdiobus_register(cb->mii_bus, child_bus_node);
if (r) {
mdiobus_free(cb->mii_bus);
if (r == -EPROBE_DEFER) {
ret_val = r;
goto err_loop;
}
devm_kfree(dev, cb);
dev_err(dev,
"Error: Failed to register MDIO bus for child %pOF\n",
child_bus_node);
mdiobus_free(cb->mii_bus);
devm_kfree(dev, cb);
} else {
cb->next = pb->children;
pb->children = cb;
@@ -182,6 +196,10 @@ int mdio_mux_init(struct device *dev,
dev_err(dev, "Error: No acceptable child buses found\n");
devm_kfree(dev, pb);
err_loop:
mdio_mux_uninit_children(pb);
of_node_put(child_bus_node);
err_pb_kz:
put_device(&parent_bus->dev);
err_parent_bus:
@@ -193,14 +211,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
void mdio_mux_uninit(void *mux_handle)
{
struct mdio_mux_parent_bus *pb = mux_handle;
struct mdio_mux_child_bus *cb = pb->children;
while (cb) {
mdiobus_unregister(cb->mii_bus);
mdiobus_free(cb->mii_bus);
cb = cb->next;
}
mdio_mux_uninit_children(pb);
put_device(&pb->mii_bus->dev);
}
EXPORT_SYMBOL_GPL(mdio_mux_uninit);

View File

@@ -1159,7 +1159,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
{
struct phy_device *phydev = dev->net->phydev;
struct ethtool_link_ksettings ecmd;
int ladv, radv, ret;
int ladv, radv, ret, link;
u32 buf;
/* clear LAN78xx interrupt status */
@@ -1167,9 +1167,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
if (unlikely(ret < 0))
return -EIO;
mutex_lock(&phydev->lock);
phy_read_status(phydev);
link = phydev->link;
mutex_unlock(&phydev->lock);
if (!phydev->link && dev->link_on) {
if (!link && dev->link_on) {
dev->link_on = false;
/* reset MAC */
@@ -1182,7 +1185,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
return -EIO;
del_timer(&dev->stat_monitor);
} else if (phydev->link && !dev->link_on) {
} else if (link && !dev->link_on) {
dev->link_on = true;
phy_ethtool_ksettings_get(phydev, &ecmd);
@@ -1471,9 +1474,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
static u32 lan78xx_get_link(struct net_device *net)
{
phy_read_status(net->phydev);
u32 link;
return net->phydev->link;
mutex_lock(&net->phydev->lock);
phy_read_status(net->phydev);
link = net->phydev->link;
mutex_unlock(&net->phydev->lock);
return link;
}
static void lan78xx_get_drvinfo(struct net_device *net,

View File

@@ -132,9 +132,15 @@ static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
const void *data)
{
return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
int ret;
ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS,
PEGASUS_REQT_WRITE, 0, indx, data, size,
1000, GFP_NOIO);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
return ret;
}
/*
@@ -145,10 +151,15 @@ static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
void *buf = &data;
int ret;
return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
ret = usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG,
PEGASUS_REQT_WRITE, data, indx, buf, 1,
1000, GFP_NOIO);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net, "%s failed with %d\n", __func__, ret);
return ret;
}
static int update_eth_regs_async(pegasus_t *pegasus)
@@ -188,10 +199,9 @@ static int update_eth_regs_async(pegasus_t *pegasus)
static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
{
int i;
__u8 data[4] = { phy, 0, 0, indx };
int i, ret;
__le16 regdi;
int ret = -ETIMEDOUT;
__u8 data[4] = { phy, 0, 0, indx };
if (cmd & PHY_WRITE) {
__le16 *t = (__le16 *) & data[1];
@@ -207,12 +217,15 @@ static int __mii_op(pegasus_t *p, __u8 phy, __u8 indx, __u16 *regd, __u8 cmd)
if (data[0] & PHY_DONE)
break;
}
if (i >= REG_TIMEOUT)
if (i >= REG_TIMEOUT) {
ret = -ETIMEDOUT;
goto fail;
}
if (cmd & PHY_READ) {
ret = get_registers(p, PhyData, 2, &regdi);
if (ret < 0)
goto fail;
*regd = le16_to_cpu(regdi);
return ret;
}
return 0;
fail:
@@ -235,9 +248,13 @@ static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd)
static int mdio_read(struct net_device *dev, int phy_id, int loc)
{
pegasus_t *pegasus = netdev_priv(dev);
int ret;
u16 res;
read_mii_word(pegasus, phy_id, loc, &res);
ret = read_mii_word(pegasus, phy_id, loc, &res);
if (ret < 0)
return ret;
return (int)res;
}
@@ -251,10 +268,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
{
int i;
__u8 tmp = 0;
int ret, i;
__le16 retdatai;
int ret;
__u8 tmp = 0;
set_register(pegasus, EpromCtrl, 0);
set_register(pegasus, EpromOffset, index);
@@ -262,21 +278,25 @@ static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
for (i = 0; i < REG_TIMEOUT; i++) {
ret = get_registers(pegasus, EpromCtrl, 1, &tmp);
if (ret < 0)
goto fail;
if (tmp & EPROM_DONE)
break;
if (ret == -ESHUTDOWN)
}
if (i >= REG_TIMEOUT) {
ret = -ETIMEDOUT;
goto fail;
}
if (i >= REG_TIMEOUT)
goto fail;
ret = get_registers(pegasus, EpromData, 2, &retdatai);
if (ret < 0)
goto fail;
*retdata = le16_to_cpu(retdatai);
return ret;
fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
}
#ifdef PEGASUS_WRITE_EEPROM
@@ -324,7 +344,7 @@ static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data)
return ret;
fail:
netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
#endif /* PEGASUS_WRITE_EEPROM */
@@ -367,19 +387,21 @@ static void set_ethernet_addr(pegasus_t *pegasus)
return;
err:
eth_hw_addr_random(pegasus->net);
dev_info(&pegasus->intf->dev, "software assigned MAC address.\n");
netif_dbg(pegasus, drv, pegasus->net, "software assigned MAC address.\n");
return;
}
static inline int reset_mac(pegasus_t *pegasus)
{
int ret, i;
__u8 data = 0x8;
int i;
set_register(pegasus, EthCtrl1, data);
for (i = 0; i < REG_TIMEOUT; i++) {
get_registers(pegasus, EthCtrl1, 1, &data);
ret = get_registers(pegasus, EthCtrl1, 1, &data);
if (ret < 0)
goto fail;
if (~data & 0x08) {
if (loopback)
break;
@@ -402,22 +424,29 @@ static inline int reset_mac(pegasus_t *pegasus)
}
if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) {
__u16 auxmode;
read_mii_word(pegasus, 3, 0x1b, &auxmode);
ret = read_mii_word(pegasus, 3, 0x1b, &auxmode);
if (ret < 0)
goto fail;
auxmode |= 4;
write_mii_word(pegasus, 3, 0x1b, &auxmode);
}
return 0;
fail:
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
}
static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
{
__u16 linkpart;
__u8 data[4];
pegasus_t *pegasus = netdev_priv(dev);
int ret;
__u16 linkpart;
__u8 data[4];
read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
ret = read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
if (ret < 0)
goto fail;
data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
data[1] = 0;
if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
@@ -435,11 +464,16 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 ||
usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) {
u16 auxmode;
read_mii_word(pegasus, 0, 0x1b, &auxmode);
ret = read_mii_word(pegasus, 0, 0x1b, &auxmode);
if (ret < 0)
goto fail;
auxmode |= 4;
write_mii_word(pegasus, 0, 0x1b, &auxmode);
}
return 0;
fail:
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
}
@@ -447,9 +481,9 @@ static void read_bulk_callback(struct urb *urb)
{
pegasus_t *pegasus = urb->context;
struct net_device *net;
u8 *buf = urb->transfer_buffer;
int rx_status, count = urb->actual_length;
int status = urb->status;
u8 *buf = urb->transfer_buffer;
__u16 pkt_len;
if (!pegasus)
@@ -1005,8 +1039,7 @@ static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
data[0] = pegasus->phy;
fallthrough;
case SIOCDEVPRIVATE + 1:
read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
res = 0;
res = read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]);
break;
case SIOCDEVPRIVATE + 2:
if (!capable(CAP_NET_ADMIN))
@@ -1040,22 +1073,25 @@ static void pegasus_set_multicast(struct net_device *net)
static __u8 mii_phy_probe(pegasus_t *pegasus)
{
int i;
int i, ret;
__u16 tmp;
for (i = 0; i < 32; i++) {
read_mii_word(pegasus, i, MII_BMSR, &tmp);
ret = read_mii_word(pegasus, i, MII_BMSR, &tmp);
if (ret < 0)
goto fail;
if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0)
continue;
else
return i;
}
fail:
return 0xff;
}
static inline void setup_pegasus_II(pegasus_t *pegasus)
{
int ret;
__u8 data = 0xa5;
set_register(pegasus, Reg1d, 0);
@@ -1067,7 +1103,9 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg7b, 2);
set_register(pegasus, 0x83, data);
get_registers(pegasus, 0x83, 1, &data);
ret = get_registers(pegasus, 0x83, 1, &data);
if (ret < 0)
goto fail;
if (data == 0xa5)
pegasus->chip = 0x8513;
@@ -1082,6 +1120,10 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg81, 6);
else
set_register(pegasus, Reg81, 2);
return;
fail:
netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__);
}
static void check_carrier(struct work_struct *work)

View File

@@ -3432,7 +3432,7 @@ static void rtl_clear_bp(struct r8152 *tp, u16 type)
case RTL_VER_09:
default:
if (type == MCU_TYPE_USB) {
ocp_write_byte(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP2_EN, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_8, 0);
ocp_write_word(tp, MCU_TYPE_USB, USB_BP_9, 0);

View File

@@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = {
VIRTIO_NET_F_GUEST_CSUM
};
#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_NET_F_GUEST_UFO))
@@ -195,6 +195,9 @@ struct virtnet_info {
/* # of XDP queue pairs currently used by the driver */
u16 xdp_queue_pairs;
/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
bool xdp_enabled;
/* I like... big packets and I cannot lie! */
bool big_packets;
@@ -485,12 +488,41 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
return 0;
}
static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
{
unsigned int qp;
/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
* the current cpu, so it does not need to be locked.
*
* Here we use marco instead of inline functions because we have to deal with
* three issues at the same time: 1. the choice of sq. 2. judge and execute the
* lock/unlock of txq 3. make sparse happy. It is difficult for two inline
* functions to perfectly solve these three problems at the same time.
*/
#define virtnet_xdp_get_sq(vi) ({ \
struct netdev_queue *txq; \
typeof(vi) v = (vi); \
unsigned int qp; \
\
if (v->curr_queue_pairs > nr_cpu_ids) { \
qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
qp += smp_processor_id(); \
txq = netdev_get_tx_queue(v->dev, qp); \
__netif_tx_acquire(txq); \
} else { \
qp = smp_processor_id() % v->curr_queue_pairs; \
txq = netdev_get_tx_queue(v->dev, qp); \
__netif_tx_lock(txq, raw_smp_processor_id()); \
} \
v->sq + qp; \
})
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
return &vi->sq[qp];
#define virtnet_xdp_put_sq(vi, q) { \
struct netdev_queue *txq; \
typeof(vi) v = (vi); \
\
txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
if (v->curr_queue_pairs > nr_cpu_ids) \
__netif_tx_release(txq); \
else \
__netif_tx_unlock(txq); \
}
static int virtnet_xdp_xmit(struct net_device *dev,
@@ -516,7 +548,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
if (!xdp_prog)
return -ENXIO;
sq = virtnet_xdp_sq(vi);
sq = virtnet_xdp_get_sq(vi);
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
ret = -EINVAL;
@@ -564,12 +596,13 @@ out:
sq->stats.kicks += kicks;
u64_stats_update_end(&sq->stats.syncp);
virtnet_xdp_put_sq(vi, sq);
return ret;
}
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
}
/* We copy the packet for XDP in the following cases:
@@ -1473,12 +1506,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
xdp_do_flush();
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_sq(vi);
sq = virtnet_xdp_get_sq(vi);
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
u64_stats_update_begin(&sq->stats.syncp);
sq->stats.kicks++;
u64_stats_update_end(&sq->stats.syncp);
}
virtnet_xdp_put_sq(vi, sq);
}
return received;
@@ -2432,7 +2466,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
return -EOPNOTSUPP;
}
@@ -2453,10 +2487,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* XDP requires extra queues for XDP_TX */
if (curr_qp + xdp_qp > vi->max_queue_pairs) {
NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
netdev_warn(dev, "request %i queues but max is %i\n",
netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
curr_qp + xdp_qp, vi->max_queue_pairs);
return -ENOMEM;
xdp_qp = 0;
}
old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
@@ -2490,11 +2523,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
vi->xdp_queue_pairs = xdp_qp;
if (prog) {
vi->xdp_enabled = true;
for (i = 0; i < vi->max_queue_pairs; i++) {
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
if (i == 0 && !old_prog)
virtnet_clear_guest_offloads(vi);
}
} else {
vi->xdp_enabled = false;
}
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -2564,15 +2600,15 @@ static int virtnet_set_features(struct net_device *dev,
if (!vi->has_cvq)
return 0;
if ((dev->features ^ features) & NETIF_F_LRO) {
if (vi->xdp_queue_pairs)
if ((dev->features ^ features) & NETIF_F_GRO_HW) {
if (vi->xdp_enabled)
return -EBUSY;
if (features & NETIF_F_LRO)
if (features & NETIF_F_GRO_HW)
offloads = vi->guest_offloads_capable;
else
offloads = vi->guest_offloads_capable &
~GUEST_OFFLOAD_LRO_MASK;
~GUEST_OFFLOAD_GRO_HW_MASK;
err = virtnet_set_guest_offloads(vi, offloads);
if (err)
@@ -3047,9 +3083,9 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_RXCSUM;
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dev->features |= NETIF_F_LRO;
dev->features |= NETIF_F_GRO_HW;
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
dev->hw_features |= NETIF_F_LRO;
dev->hw_features |= NETIF_F_GRO_HW;
dev->vlan_features = dev->features;

View File

@@ -1313,6 +1313,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
nf_reset_ct(skb);
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb.
* For strict packets with a source LLA, determine the dst using the
@@ -1369,6 +1371,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE;
nf_reset_ct(skb);
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
goto out;

View File

@@ -197,12 +197,13 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
void ath_hw_setbssidmask(struct ath_common *common);
void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
void ath_key_delete(struct ath_common *common, u8 hw_key_idx);
int ath_key_config(struct ath_common *common,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
bool ath_hw_keyreset(struct ath_common *common, u16 entry);
bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac);
void ath_hw_cycle_counters_update(struct ath_common *common);
int32_t ath_hw_get_listen_time(struct ath_common *common);

View File

@@ -521,7 +521,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
break;
case DISABLE_KEY:
ath_key_delete(common, key);
ath_key_delete(common, key->hw_key_idx);
break;
default:
ret = -EINVAL;

View File

@@ -1461,7 +1461,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
}
break;
case DISABLE_KEY:
ath_key_delete(common, key);
ath_key_delete(common, key->hw_key_idx);
break;
default:
ret = -EINVAL;

View File

@@ -820,6 +820,7 @@ struct ath_hw {
struct ath9k_pacal_info pacal_info;
struct ar5416Stats stats;
struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
DECLARE_BITMAP(pending_del_keymap, ATH_KEYMAX);
enum ath9k_int imask;
u32 imrs2_reg;

View File

@@ -826,12 +826,80 @@ exit:
ieee80211_free_txskb(hw, skb);
}
static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
{
struct ath_buf *bf;
struct ieee80211_tx_info *txinfo;
struct ath_frame_info *fi;
list_for_each_entry(bf, txq_list, list) {
if (bf->bf_state.stale || !bf->bf_mpdu)
continue;
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
if (fi->keyix == keyix)
return true;
}
return false;
}
static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
{
struct ath_hw *ah = sc->sc_ah;
int i;
struct ath_txq *txq;
bool key_in_use = false;
for (i = 0; !key_in_use && i < ATH9K_NUM_TX_QUEUES; i++) {
if (!ATH_TXQ_SETUP(sc, i))
continue;
txq = &sc->tx.txq[i];
if (!txq->axq_depth)
continue;
if (!ath9k_hw_numtxpending(ah, txq->axq_qnum))
continue;
ath_txq_lock(sc, txq);
key_in_use = ath9k_txq_list_has_key(&txq->axq_q, keyix);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
int idx = txq->txq_tailidx;
while (!key_in_use &&
!list_empty(&txq->txq_fifo[idx])) {
key_in_use = ath9k_txq_list_has_key(
&txq->txq_fifo[idx], keyix);
INCR(idx, ATH_TXFIFO_DEPTH);
}
}
ath_txq_unlock(sc, txq);
}
return key_in_use;
}
static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
if (!test_bit(keyix, ah->pending_del_keymap) ||
ath9k_txq_has_key(sc, keyix))
return;
/* No more TXQ frames point to this key cache entry, so delete it. */
clear_bit(keyix, ah->pending_del_keymap);
ath_key_delete(common, keyix);
}
static void ath9k_stop(struct ieee80211_hw *hw)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
bool prev_idle;
int i;
ath9k_deinit_channel_context(sc);
@@ -899,6 +967,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
for (i = 0; i < ATH_KEYMAX; i++)
ath9k_pending_key_del(sc, i);
/* Clear key cache entries explicitly to get rid of any potentially
* remaining keys.
*/
ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_ps_restore(sc);
sc->ps_idle = prev_idle;
@@ -1548,12 +1624,11 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_node *an = (struct ath_node *) sta->drv_priv;
struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
if (!an->ps_key)
return;
ath_key_delete(common, &ps_key);
ath_key_delete(common, an->ps_key);
an->ps_key = 0;
an->key_idx[0] = 0;
}
@@ -1724,6 +1799,12 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
if (sta)
an = (struct ath_node *)sta->drv_priv;
/* Delete pending key cache entries if no more frames are pointing to
* them in TXQs.
*/
for (i = 0; i < ATH_KEYMAX; i++)
ath9k_pending_key_del(sc, i);
switch (cmd) {
case SET_KEY:
if (sta)
@@ -1753,7 +1834,15 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
}
break;
case DISABLE_KEY:
ath_key_delete(common, key);
if (ath9k_txq_has_key(sc, key->hw_key_idx)) {
/* Delay key cache entry deletion until there are no
* remaining TXQ frames pointing to this entry.
*/
set_bit(key->hw_key_idx, sc->sc_ah->pending_del_keymap);
ath_hw_keysetmac(common, key->hw_key_idx, NULL);
} else {
ath_key_delete(common, key->hw_key_idx);
}
if (an) {
for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
if (an->key_idx[i] != key->hw_key_idx)

View File

@@ -84,8 +84,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
}
EXPORT_SYMBOL(ath_hw_keyreset);
static bool ath_hw_keysetmac(struct ath_common *common,
u16 entry, const u8 *mac)
bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
{
u32 macHi, macLo;
u32 unicast_flag = AR_KEYTABLE_VALID;
@@ -125,6 +124,7 @@ static bool ath_hw_keysetmac(struct ath_common *common,
return true;
}
EXPORT_SYMBOL(ath_hw_keysetmac);
static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
const struct ath_keyval *k,
@@ -581,29 +581,38 @@ EXPORT_SYMBOL(ath_key_config);
/*
* Delete Key.
*/
void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
void ath_key_delete(struct ath_common *common, u8 hw_key_idx)
{
ath_hw_keyreset(common, key->hw_key_idx);
if (key->hw_key_idx < IEEE80211_WEP_NKID)
/* Leave CCMP and TKIP (main key) configured to avoid disabling
* encryption for potentially pending frames already in a TXQ with the
* keyix pointing to this key entry. Instead, only clear the MAC address
* to prevent RX processing from using this key cache entry.
*/
if (test_bit(hw_key_idx, common->ccmp_keymap) ||
test_bit(hw_key_idx, common->tkip_keymap))
ath_hw_keysetmac(common, hw_key_idx, NULL);
else
ath_hw_keyreset(common, hw_key_idx);
if (hw_key_idx < IEEE80211_WEP_NKID)
return;
clear_bit(key->hw_key_idx, common->keymap);
clear_bit(key->hw_key_idx, common->ccmp_keymap);
if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
clear_bit(hw_key_idx, common->keymap);
clear_bit(hw_key_idx, common->ccmp_keymap);
if (!test_bit(hw_key_idx, common->tkip_keymap))
return;
clear_bit(key->hw_key_idx + 64, common->keymap);
clear_bit(hw_key_idx + 64, common->keymap);
clear_bit(key->hw_key_idx, common->tkip_keymap);
clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
clear_bit(hw_key_idx, common->tkip_keymap);
clear_bit(hw_key_idx + 64, common->tkip_keymap);
if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
ath_hw_keyreset(common, key->hw_key_idx + 32);
clear_bit(key->hw_key_idx + 32, common->keymap);
clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
ath_hw_keyreset(common, hw_key_idx + 32);
clear_bit(hw_key_idx + 32, common->keymap);
clear_bit(hw_key_idx + 64 + 32, common->keymap);
clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
clear_bit(hw_key_idx + 32, common->tkip_keymap);
clear_bit(hw_key_idx + 64 + 32, common->tkip_keymap);
}
}
EXPORT_SYMBOL(ath_key_delete);

View File

@@ -1905,6 +1905,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
#ifdef CONFIG_X86_IO_APIC
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)

View File

@@ -90,7 +90,8 @@ config PTP_1588_CLOCK_INES
config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86_32 || COMPILE_TEST
depends on HAS_IOMEM && NET
depends on HAS_IOMEM && PCI
depends on NET
imply PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP

View File

@@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
else {
list_add_rcu(&h->node, &h->ctlr->dh_list);
h->sdev = sdev;
list_add_rcu(&h->node, &h->ctlr->dh_list);
}
spin_unlock(&list_lock);
err = SCSI_DH_OK;
@@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
spin_lock(&list_lock);
if (h->ctlr) {
list_del_rcu(&h->node);
h->sdev = NULL;
kref_put(&h->ctlr->kref, release_controller);
}
spin_unlock(&list_lock);
sdev->handler_data = NULL;
synchronize_rcu();
kfree(h);
}

View File

@@ -238,7 +238,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
mimd_t mimd;
uint32_t adapno;
int iterator;
bool is_found;
if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
*rval = -EFAULT;
@@ -254,12 +254,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
adapter = NULL;
iterator = 0;
is_found = false;
list_for_each_entry(adapter, &adapters_list_g, list) {
if (iterator++ == adapno) break;
if (iterator++ == adapno) {
is_found = true;
break;
}
}
if (!adapter) {
if (!is_found) {
*rval = -ENODEV;
return NULL;
}
@@ -725,6 +729,7 @@ ioctl_done(uioc_t *kioc)
uint32_t adapno;
int iterator;
mraid_mmadp_t* adapter;
bool is_found;
/*
* When the kioc returns from driver, make sure it still doesn't
@@ -747,19 +752,23 @@ ioctl_done(uioc_t *kioc)
iterator = 0;
adapter = NULL;
adapno = kioc->adapno;
is_found = false;
con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
"ioctl that was timedout before\n"));
list_for_each_entry(adapter, &adapters_list_g, list) {
if (iterator++ == adapno) break;
if (iterator++ == adapno) {
is_found = true;
break;
}
}
kioc->timedout = 0;
if (adapter) {
if (is_found)
mraid_mm_dealloc_kioc( adapter, kioc );
}
}
else {
wake_up(&wait_q);

View File

@@ -684,8 +684,7 @@ int pm8001_dev_found(struct domain_device *dev)
void pm8001_task_done(struct sas_task *task)
{
if (!del_timer(&task->slow_task->timer))
return;
del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -693,9 +692,14 @@ static void pm8001_tmf_timedout(struct timer_list *t)
{
struct sas_task_slow *slow = from_timer(slow, t, timer);
struct sas_task *task = slow->task;
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
complete(&task->slow_task->completion);
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
}
#define PM8001_TASK_TIMEOUT 20
@@ -748,14 +752,11 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
}
res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
pm8001_dbg(pm8001_ha, FAIL,
"TMF task[%x]timeout.\n",
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
tmf->tmf);
goto ex_err;
}
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAM_STAT_GOOD) {
@@ -834,13 +835,10 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
wait_for_completion(&task->slow_task->completion);
res = TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
pm8001_dbg(pm8001_ha, FAIL,
"TMF task timeout.\n");
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
pm8001_dbg(pm8001_ha, FAIL, "TMF task timeout.\n");
goto ex_err;
}
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAM_STAT_GOOD) {

View File

@@ -453,7 +453,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
error = shost->hostt->target_alloc(starget);
if(error) {
dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
if (error != -ENXIO)
dev_err(dev, "target allocation failed, error %d\n", error);
/* don't want scsi_target_reap to do the final
* put because it will be under the host lock */
scsi_target_destroy(starget);

View File

@@ -807,11 +807,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
mutex_lock(&sdev->state_mutex);
ret = scsi_device_set_state(sdev, state);
/*
* If the device state changes to SDEV_RUNNING, we need to run
* the queue to avoid I/O hang.
* If the device state changes to SDEV_RUNNING, we need to
* rescan the device to revalidate it, and run the queue to
* avoid I/O hang.
*/
if (ret == 0 && state == SDEV_RUNNING)
if (ret == 0 && state == SDEV_RUNNING) {
scsi_rescan_device(dev);
blk_mq_run_hw_queues(sdev->request_queue, true);
}
mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL;

View File

@@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
int ret = 0;
spin_lock_irqsave(&ctrl->txn_lock, flags);
ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
SLIM_MAX_TIDS, GFP_ATOMIC);
if (ret < 0) {
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
@@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
goto slim_xfer_err;
}
}
/* Initialize tid to invalid value */
txn->tid = 0;
need_tid = slim_tid_txn(txn->mt, txn->mc);
if (need_tid) {
@@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
txn->mt, txn->mc, txn->la, ret);
slim_xfer_err:
if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) {
if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) {
/*
* remove runtime-pm vote if this was TX only, or
* if there was error during this transaction

View File

@@ -1065,7 +1065,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
{
u32 cfg = readl_relaxed(ctrl->ngd->base);
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
qcom_slim_ngd_init_dma(ctrl);
/* By default enable message queues */
@@ -1116,6 +1117,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
return 0;
}
qcom_slim_ngd_setup(ctrl);
return 0;
}
@@ -1506,6 +1508,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
int ret = 0;
qcom_slim_ngd_exit_dma(ctrl);
if (!ctrl->qmi.handle)
return 0;

View File

@@ -5,13 +5,11 @@
*/
#include <linux/device.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include "../../gpu/drm/mediatek/mtk_drm_ddp.h"
#include "../../gpu/drm/mediatek/mtk_drm_ddp_comp.h"
#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048

View File

@@ -167,10 +167,17 @@ err_put_ctlr:
return ret;
}
static const struct spi_device_id spi_mux_id[] = {
{ "spi-mux" },
{ }
};
MODULE_DEVICE_TABLE(spi, spi_mux_id);
static const struct of_device_id spi_mux_of_match[] = {
{ .compatible = "spi-mux" },
{ }
};
MODULE_DEVICE_TABLE(of, spi_mux_of_match);
static struct spi_driver spi_mux_driver = {
.probe = spi_mux_probe,
@@ -178,6 +185,7 @@ static struct spi_driver spi_mux_driver = {
.name = "spi-mux",
.of_match_table = spi_mux_of_match,
},
.id_table = spi_mux_id,
};
module_spi_driver(spi_mux_driver);

View File

@@ -1133,7 +1133,7 @@ static int do_proc_control(struct usb_dev_state *ps,
"wIndex=%04x wLength=%04x\n",
ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
ctrl->wIndex, ctrl->wLength);
if (ctrl->bRequestType & 0x80) {
if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength) {
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl->wLength, tmo, SUBMIT, NULL, 0);

View File

@@ -785,6 +785,9 @@ int usb_get_descriptor(struct usb_device *dev, unsigned char type,
int i;
int result;
if (size <= 0) /* No point in asking for no data */
return -EINVAL;
memset(buf, 0, size); /* Make sure we parse really received data */
for (i = 0; i < 3; ++i) {
@@ -833,6 +836,9 @@ static int usb_get_string(struct usb_device *dev, unsigned short langid,
int i;
int result;
if (size <= 0) /* No point in asking for no data */
return -EINVAL;
for (i = 0; i < 3; ++i) {
/* retry on length 0 or stall; some devices are flakey */
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),

View File

@@ -454,11 +454,6 @@ out:
mutex_unlock(&mr->mkey_mtx);
}
static bool map_empty(struct vhost_iotlb *iotlb)
{
return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
}
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map)
{
@@ -466,10 +461,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
int err = 0;
*change_map = false;
if (map_empty(iotlb)) {
mlx5_vdpa_destroy_mr(mvdev);
return 0;
}
mutex_lock(&mr->mkey_mtx);
if (mr->initialized) {
mlx5_vdpa_info(mvdev, "memory map update\n");

View File

@@ -623,7 +623,8 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
long pinned;
int ret = 0;
if (msg->iova < v->range.first ||
if (msg->iova < v->range.first || !msg->size ||
msg->iova > U64_MAX - msg->size + 1 ||
msg->iova + msg->size - 1 > v->range.last)
return -EINVAL;

View File

@@ -735,10 +735,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
/* Make sure 64 bit math will not overflow. */
static bool vhost_overflow(u64 uaddr, u64 size)
{
/* Make sure 64 bit math will not overflow. */
return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
if (uaddr > ULONG_MAX || size > ULONG_MAX)
return true;
if (!size)
return false;
return uaddr > ULONG_MAX - size + 1;
}
/* Caller should have vq mutex and device mutex. */

View File

@@ -357,6 +357,7 @@ int register_virtio_device(struct virtio_device *dev)
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
INIT_LIST_HEAD(&dev->vqs);
spin_lock_init(&dev->vqs_list_lock);
/*
* device_add() causes the bus infrastructure to look for a matching

View File

@@ -1668,7 +1668,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
cpu_to_le16(vq->packed.event_flags_shadow);
}
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
err_desc_extra:
@@ -2126,7 +2128,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
memset(vq->split.desc_state, 0, vring.num *
sizeof(struct vring_desc_state_split));
spin_lock(&vdev->vqs_list_lock);
list_add_tail(&vq->vq.list, &vdev->vqs);
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
}
EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
@@ -2210,7 +2214,9 @@ void vring_del_virtqueue(struct virtqueue *_vq)
}
if (!vq->packed_ring)
kfree(vq->split.desc_state);
spin_lock(&vq->vq.vdev->vqs_list_lock);
list_del(&_vq->list);
spin_unlock(&vq->vq.vdev->vqs_list_lock);
kfree(vq);
}
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
@@ -2274,10 +2280,12 @@ void virtio_break_device(struct virtio_device *dev)
{
struct virtqueue *_vq;
spin_lock(&dev->vqs_list_lock);
list_for_each_entry(_vq, &dev->vqs, list) {
struct vring_virtqueue *vq = to_vvq(_vq);
vq->broken = true;
}
spin_unlock(&dev->vqs_list_lock);
}
EXPORT_SYMBOL_GPL(virtio_break_device);

View File

@@ -8904,8 +8904,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
bool dest_log_pinned = false;
bool need_abort = false;
/* we only allow rename subvolume link between subvolumes */
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
/*
* For non-subvolumes allow exchange only within one subvolume, in the
* same inode namespace. Two subvolumes (represented as directory) can
* be exchanged as they're a logical link and have a fixed inode number.
*/
if (root != dest &&
(old_ino != BTRFS_FIRST_FREE_OBJECTID ||
new_ino != BTRFS_FIRST_FREE_OBJECTID))
return -EXDEV;
/* close the race window with snapshot create/destroy ioctl */

View File

@@ -9078,9 +9078,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
if (ctx->flags & IORING_SETUP_SQPOLL) {
io_cqring_overflow_flush(ctx, false, NULL, NULL);
if (unlikely(ctx->sqo_dead)) {
ret = -EOWNERDEAD;
if (unlikely(ctx->sqo_dead))
goto out;
}
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sq_data->wait);
if (flags & IORING_ENTER_SQ_WAIT) {
@@ -9601,11 +9602,12 @@ static int io_register_personality(struct io_ring_ctx *ctx)
ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)iod,
XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
if (!ret)
return id;
if (ret < 0) {
put_cred(iod->creds);
kfree(iod);
return ret;
}
return id;
}
static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,

View File

@@ -1697,8 +1697,12 @@ static inline bool may_mount(void)
}
#ifdef CONFIG_MANDATORY_FILE_LOCKING
static inline bool may_mandlock(void)
static bool may_mandlock(void)
{
pr_warn_once("======================================================\n"
"WARNING: the mand mount option is being deprecated and\n"
" will be removed in v5.15!\n"
"======================================================\n");
return capable(CAP_SYS_ADMIN);
}
#else

View File

@@ -361,12 +361,15 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
static inline void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
bool in_low_reclaim)
unsigned long *min,
unsigned long *low)
{
*min = *low = 0;
if (mem_cgroup_disabled())
return 0;
return;
/*
* There is no reclaim protection applied to a targeted reclaim.
@@ -402,13 +405,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
*
*/
if (root == memcg)
return 0;
return;
if (in_low_reclaim)
return READ_ONCE(memcg->memory.emin);
return max(READ_ONCE(memcg->memory.emin),
READ_ONCE(memcg->memory.elow));
*min = READ_ONCE(memcg->memory.emin);
*low = READ_ONCE(memcg->memory.elow);
}
void mem_cgroup_calculate_protection(struct mem_cgroup *root,
@@ -988,11 +988,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
{
}
static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
static inline void mem_cgroup_protection(struct mem_cgroup *root,
struct mem_cgroup *memcg,
bool in_low_reclaim)
unsigned long *min,
unsigned long *low)
{
return 0;
*min = *low = 0;
}
static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,

View File

@@ -9,6 +9,39 @@
enum mtk_ddp_comp_id;
struct device;
enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL0,
DDP_COMPONENT_AAL1,
DDP_COMPONENT_BLS,
DDP_COMPONENT_CCORR,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_DITHER,
DDP_COMPONENT_DPI0,
DDP_COMPONENT_DPI1,
DDP_COMPONENT_DSI0,
DDP_COMPONENT_DSI1,
DDP_COMPONENT_DSI2,
DDP_COMPONENT_DSI3,
DDP_COMPONENT_GAMMA,
DDP_COMPONENT_OD0,
DDP_COMPONENT_OD1,
DDP_COMPONENT_OVL0,
DDP_COMPONENT_OVL_2L0,
DDP_COMPONENT_OVL_2L1,
DDP_COMPONENT_OVL1,
DDP_COMPONENT_PWM0,
DDP_COMPONENT_PWM1,
DDP_COMPONENT_PWM2,
DDP_COMPONENT_RDMA0,
DDP_COMPONENT_RDMA1,
DDP_COMPONENT_RDMA2,
DDP_COMPONENT_UFOE,
DDP_COMPONENT_WDMA0,
DDP_COMPONENT_WDMA1,
DDP_COMPONENT_ID_MAX,
};
void mtk_mmsys_ddp_connect(struct device *dev,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next);

View File

@@ -110,6 +110,7 @@ struct virtio_device {
bool config_enabled;
bool config_change_pending;
spinlock_t config_lock;
spinlock_t vqs_list_lock; /* Protects VQs list access */
struct device dev;
struct virtio_device_id id;
const struct virtio_config_ops *config;

View File

@@ -312,7 +312,6 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
if (flow_offload_has_one_action(action))
return true;
if (action) {
flow_action_for_each(i, action_entry, action) {
if (i && action_entry->hw_stats != last_hw_stats) {
NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
@@ -320,7 +319,6 @@ flow_action_mixed_hw_stats_check(const struct flow_action *action,
}
last_hw_stats = action_entry->hw_stats;
}
}
return true;
}

View File

@@ -10705,6 +10705,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
if (aux_data[i].seen)
continue;
memcpy(insn + i, &trap, sizeof(trap));
aux_data[i].zext_dst = false;
}
}

View File

@@ -3405,6 +3405,8 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
event = data->match_data.event;
}
if (!event)
goto free;
/*
* At this point, we're looking at a field on another
* event. Because we can't modify a hist trigger on

View File

@@ -108,9 +108,12 @@ struct scan_control {
unsigned int may_swap:1;
/*
* Cgroups are not reclaimed below their configured memory.low,
* unless we threaten to OOM. If any cgroups are skipped due to
* memory.low and nothing was reclaimed, go back for memory.low.
* Cgroup memory below memory.low is protected as long as we
* don't threaten to OOM. If any cgroup is reclaimed at
* reduced force or passed over entirely due to its memory.low
* setting (memcg_low_skipped), and nothing is reclaimed as a
* result, then go back for one more cycle that reclaims the protected
* memory (memcg_low_reclaim) to avert OOM.
*/
unsigned int memcg_low_reclaim:1;
unsigned int memcg_low_skipped:1;
@@ -2392,15 +2395,14 @@ out:
for_each_evictable_lru(lru) {
int file = is_file_lru(lru);
unsigned long lruvec_size;
unsigned long low, min;
unsigned long scan;
unsigned long protection;
lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
protection = mem_cgroup_protection(sc->target_mem_cgroup,
memcg,
sc->memcg_low_reclaim);
mem_cgroup_protection(sc->target_mem_cgroup, memcg,
&min, &low);
if (protection) {
if (min || low) {
/*
* Scale a cgroup's reclaim pressure by proportioning
* its current usage to its memory.low or memory.min
@@ -2431,6 +2433,15 @@ out:
* hard protection.
*/
unsigned long cgroup_size = mem_cgroup_size(memcg);
unsigned long protection;
/* memory.low scaling, make sure we retry before OOM */
if (!sc->memcg_low_reclaim && low > min) {
protection = low;
sc->memcg_low_skipped = 1;
} else {
protection = min;
}
/* Avoid TOCTOU with earlier protection check */
cgroup_size = max(cgroup_size, protection);

View File

@@ -1290,7 +1290,7 @@ static int hidp_session_thread(void *arg)
/* cleanup runtime environment */
remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait);
remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
wake_up_interruptible(&session->report_queue);
hidp_del_timer(session);

View File

@@ -41,9 +41,9 @@ extern bool dccp_debug;
#define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
#define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
#else
#define dccp_pr_debug(format, a...)
#define dccp_pr_debug_cat(format, a...)
#define dccp_debug(format, a...)
#define dccp_pr_debug(format, a...) do {} while (0)
#define dccp_pr_debug_cat(format, a...) do {} while (0)
#define dccp_debug(format, a...) do {} while (0)
#endif
extern struct inet_hashinfo dccp_hashinfo;

View File

@@ -503,6 +503,7 @@ void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
}
skb->dev = vport->dev;
skb->tstamp = 0;
vport->ops->send(skb);
return;

View File

@@ -720,7 +720,7 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
skip_hash:
if (flow_override)
flow_hash = flow_override - 1;
else if (use_skbhash)
else if (use_skbhash && (flow_mode & CAKE_FLOW_FLOWS))
flow_hash = skb->hash;
if (host_override) {
dsthost_hash = host_override - 1;

View File

@@ -250,7 +250,7 @@ static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms)
break;
}
WARN_ON(!pos);
WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list));
if (--pos->users)
return;

View File

@@ -3458,7 +3458,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
struct hda_gen_spec *spec = codec->spec;
const struct hda_input_mux *imux;
struct nid_path *path;
int i, adc_idx, err = 0;
int i, adc_idx, ret, err = 0;
imux = &spec->input_mux;
adc_idx = kcontrol->id.index;
@@ -3468,10 +3468,14 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
if (!path || !path->ctls[type])
continue;
kcontrol->private_value = path->ctls[type];
err = func(kcontrol, ucontrol);
if (err < 0)
ret = func(kcontrol, ucontrol);
if (ret < 0) {
err = ret;
break;
}
if (ret > 0)
err = 1;
}
mutex_unlock(&codec->control_mutex);
if (err >= 0 && spec->cap_sync_hook)
spec->cap_sync_hook(codec, kcontrol, ucontrol);

View File

@@ -6590,6 +6590,7 @@ enum {
ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP,
ALC623_FIXUP_LENOVO_THINKSTATION_P340,
ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8168,6 +8169,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC255_FIXUP_XIAOMI_HEADSET_MIC
},
[ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_limit_int_mic_boost,
.chained = true,
.chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
},
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8258,6 +8265,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0a61, "Dell XPS 15 9510", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -8363,8 +8371,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8862, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x8863, "HP ProBook 445 G8 Notebook PC", ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),

View File

@@ -1041,6 +1041,7 @@ static const struct hda_fixup via_fixups[] = {
};
static const struct snd_pci_quirk vt2002p_fixups[] = {
SND_PCI_QUIRK(0x1043, 0x13f7, "Asus B23E", VIA_FIXUP_POWER_SAVE),
SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),

View File

@@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
snd_pcm_uframes_t period_size;
ssize_t periodbytes;
ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
u32 buffer_addr = substream->runtime->dma_addr;
u32 buffer_addr = virt_to_phys(substream->runtime->dma_area);
channels = substream->runtime->channels;
period_size = substream->runtime->period_size;