Merge 5.10.15 into android12-5.10

Changes in 5.10.15
	USB: serial: cp210x: add pid/vid for WSDA-200-USB
	USB: serial: cp210x: add new VID/PID for supporting Teraoka AD2000
	USB: serial: option: Adding support for Cinterion MV31
	usb: host: xhci: mvebu: make USB 3.0 PHY optional for Armada 3720
	USB: gadget: legacy: fix an error code in eth_bind()
	usb: gadget: aspeed: add missing of_node_put
	USB: usblp: don't call usb_set_interface if there's a single alt
	usb: renesas_usbhs: Clear pipe running flag in usbhs_pkt_pop()
	usb: dwc2: Fix endpoint direction check in ep_from_windex
	usb: dwc3: fix clock issue during resume in OTG mode
	usb: xhci-mtk: fix unreleased bandwidth data
	usb: xhci-mtk: skip dropping bandwidth of unchecked endpoints
	usb: xhci-mtk: break loop when find the endpoint to drop
	ARM: OMAP1: OSK: fix ohci-omap breakage
	arm64: dts: qcom: c630: keep both touchpad devices enabled
	Input: i8042 - unbreak Pegatron C15B
	arm64: dts: amlogic: meson-g12: Set FL-adj property value
	arm64: dts: rockchip: fix vopl iommu irq on px30
	arm64: dts: rockchip: Use only supported PCIe link speed on Pinebook Pro
	ARM: dts: stm32: Fix polarity of the DH DRC02 uSD card detect
	ARM: dts: stm32: Connect card-detect signal on DHCOM
	ARM: dts: stm32: Disable WP on DHCOM uSD slot
	ARM: dts: stm32: Disable optional TSC2004 on DRC02 board
	ARM: dts: stm32: Fix GPIO hog flags on DHCOM DRC02
	vdpa/mlx5: Fix memory key MTT population
	bpf, cgroup: Fix optlen WARN_ON_ONCE toctou
	bpf, cgroup: Fix problematic bounds check
	bpf, inode_storage: Put file handler if no storage was found
	um: virtio: free vu_dev only with the contained struct device
	bpf, preload: Fix build when $(O) points to a relative path
	arm64: dts: meson: switch TFLASH_VDD_EN pin to open drain on Odroid-C4
	r8169: work around RTL8125 UDP hw bug
	rxrpc: Fix deadlock around release of dst cached on udp tunnel
	arm64: dts: ls1046a: fix dcfg address range
	SUNRPC: Fix NFS READs that start at non-page-aligned offsets
	igc: set the default return value to -IGC_ERR_NVM in igc_write_nvm_srwr
	igc: check return value of ret_val in igc_config_fc_after_link_up
	i40e: Revert "i40e: don't report link up for a VF who hasn't enabled queues"
	ibmvnic: device remove has higher precedence over reset
	net/mlx5: Fix function calculation for page trees
	net/mlx5: Fix leak upon failure of rule creation
	net/mlx5e: Update max_opened_tc also when channels are closed
	net/mlx5e: Release skb in case of failure in tc update skb
	net: lapb: Copy the skb before sending a packet
	net: mvpp2: TCAM entry enable should be written after SRAM data
	r8169: fix WoL on shutdown if CONFIG_DEBUG_SHIRQ is set
	net: ipa: pass correct dma_handle to dma_free_coherent()
	ARM: dts: sun7i: a20: bananapro: Fix ethernet phy-mode
	nvmet-tcp: fix out-of-bounds access when receiving multiple h2cdata PDUs
	vdpa/mlx5: Restore the hardware used index after change map
	memblock: do not start bottom-up allocations with kernel_end
	kbuild: fix duplicated flags in DEBUG_CFLAGS
	thunderbolt: Fix possible NULL pointer dereference in tb_acpi_add_link()
	ovl: fix dentry leak in ovl_get_redirect
	ovl: avoid deadlock on directory ioctl
	ovl: implement volatile-specific fsync error behaviour
	mac80211: fix station rate table updates on assoc
	gpiolib: free device name on error path to fix kmemleak
	fgraph: Initialize tracing_graph_pause at task creation
	tracing/kprobe: Fix to support kretprobe events on unloaded modules
	kretprobe: Avoid re-registration of the same kretprobe earlier
	tracing: Use pause-on-trace with the latency tracers
	tracepoint: Fix race between tracing and removing tracepoint
	libnvdimm/namespace: Fix visibility of namespace resource attribute
	libnvdimm/dimm: Avoid race between probe and available_slots_show()
	genirq: Prevent [devm_]irq_alloc_desc from returning irq 0
	genirq/msi: Activate Multi-MSI early when MSI_FLAG_ACTIVATE_EARLY is set
	scripts: use pkg-config to locate libcrypto
	xhci: fix bounce buffer usage for non-sg list case
	RISC-V: Define MAXPHYSMEM_1GB only for RV32
	cifs: report error instead of invalid when revalidating a dentry fails
	iommu: Check dev->iommu in dev_iommu_priv_get() before dereferencing it
	smb3: Fix out-of-bounds bug in SMB2_negotiate()
	smb3: fix crediting for compounding when only one request in flight
	mmc: sdhci-pltfm: Fix linking err for sdhci-brcmstb
	mmc: core: Limit retries when analyse of SDIO tuples fails
	Fix unsynchronized access to sev members through svm_register_enc_region
	drm/dp/mst: Export drm_dp_get_vc_payload_bw()
	drm/i915: Fix the MST PBN divider calculation
	drm/i915/gem: Drop lru bumping on display unpinning
	drm/i915/gt: Close race between enable_breadcrumbs and cancel_breadcrumbs
	drm/i915/display: Prevent double YUV range correction on HDR planes
	drm/i915: Extract intel_ddi_power_up_lanes()
	drm/i915: Power up combo PHY lanes for for HDMI as well
	drm/amd/display: Revert "Fix EDID parsing after resume from suspend"
	io_uring: don't modify identity's files uncess identity is cowed
	nvme-pci: avoid the deepest sleep state on Kingston A2000 SSDs
	KVM: SVM: Treat SVM as unsupported when running as an SEV guest
	KVM: x86/mmu: Fix TDP MMU zap collapsible SPTEs
	KVM: x86: Allow guests to see MSR_IA32_TSX_CTRL even if tsx=off
	KVM: x86: fix CPUID entries returned by KVM_GET_CPUID2 ioctl
	KVM: x86: Update emulator context mode if SYSENTER xfers to 64-bit mode
	KVM: x86: Set so called 'reserved CR3 bits in LM mask' at vCPU reset
	DTS: ARM: gta04: remove legacy spi-cs-high to make display work again
	ARM: dts; gta04: SPI panel chip select is active low
	ARM: footbridge: fix dc21285 PCI configuration accessors
	ARM: 9043/1: tegra: Fix misplaced tegra_uart_config in decompressor
	mm: hugetlbfs: fix cannot migrate the fallocated HugeTLB page
	mm: hugetlb: fix a race between freeing and dissolving the page
	mm: hugetlb: fix a race between isolating and freeing page
	mm: hugetlb: remove VM_BUG_ON_PAGE from page_huge_active
	mm, compaction: move high_pfn to the for loop scope
	mm/vmalloc: separate put pages and flush VM flags
	mm: thp: fix MADV_REMOVE deadlock on shmem THP
	mm/filemap: add missing mem_cgroup_uncharge() to __add_to_page_cache_locked()
	x86/build: Disable CET instrumentation in the kernel
	x86/debug: Fix DR6 handling
	x86/debug: Prevent data breakpoints on __per_cpu_offset
	x86/debug: Prevent data breakpoints on cpu_dr7
	x86/apic: Add extra serialization for non-serializing MSRs
	Input: goodix - add support for Goodix GT9286 chip
	Input: xpad - sync supported devices with fork on GitHub
	Input: ili210x - implement pressure reporting for ILI251x
	md: Set prev_flush_start and flush_bio in an atomic way
	igc: Report speed and duplex as unknown when device is runtime suspended
	neighbour: Prevent a dead entry from updating gc_list
	net: ip_tunnel: fix mtu calculation
	udp: ipv4: manipulate network header of NATed UDP GRO fraglist
	net: dsa: mv88e6xxx: override existent unicast portvec in port_fdb_add
	net: sched: replaced invalid qdisc tree flush helper in qdisc_replace
	Linux 5.10.15

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I15750357b4c30739515fdc0bbbd0e04b7c986171
This commit is contained in:
Greg Kroah-Hartman
2021-02-10 09:53:50 +01:00
126 changed files with 900 additions and 557 deletions

View File

@@ -598,6 +598,14 @@ without significant effort.
The advantage of mounting with the "volatile" option is that all forms of The advantage of mounting with the "volatile" option is that all forms of
sync calls to the upper filesystem are omitted. sync calls to the upper filesystem are omitted.
In order to avoid a giving a false sense of safety, the syncfs (and fsync)
semantics of volatile mounts are slightly different than that of the rest of
VFS. If any writeback error occurs on the upperdir's filesystem after a
volatile mount takes place, all sync functions will return an error. Once this
condition is reached, the filesystem will not recover, and every subsequent sync
call will return an error, even if the upperdir has not experience a new error
since the last sync call.
When overlay is mounted with "volatile" option, the directory When overlay is mounted with "volatile" option, the directory
"$workdir/work/incompat/volatile" is created. During next mount, overlay "$workdir/work/incompat/volatile" is created. During next mount, overlay
checks for this directory and refuses to mount if present. This is a strong checks for this directory and refuses to mount if present. This is a strong

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 14 SUBLEVEL = 15
EXTRAVERSION = EXTRAVERSION =
NAME = Kleptomaniac Octopus NAME = Kleptomaniac Octopus
@@ -813,10 +813,12 @@ KBUILD_CFLAGS += -ftrivial-auto-var-init=zero
KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang KBUILD_CFLAGS += -enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang
endif endif
DEBUG_CFLAGS :=
# Workaround for GCC versions < 5.0 # Workaround for GCC versions < 5.0
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61801
ifdef CONFIG_CC_IS_GCC ifdef CONFIG_CC_IS_GCC
DEBUG_CFLAGS := $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments)) DEBUG_CFLAGS += $(call cc-ifversion, -lt, 0500, $(call cc-option, -fno-var-tracking-assignments))
endif endif
ifdef CONFIG_DEBUG_INFO ifdef CONFIG_DEBUG_INFO
@@ -987,12 +989,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
# change __FILE__ to the relative path from the srctree # change __FILE__ to the relative path from the srctree
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
# ensure -fcf-protection is disabled when using retpoline as it is
# incompatible with -mindirect-branch=thunk-extern
ifdef CONFIG_RETPOLINE
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif
# include additional Makefiles when needed # include additional Makefiles when needed
include-y := scripts/Makefile.extrawarn include-y := scripts/Makefile.extrawarn
include-$(CONFIG_KASAN) += scripts/Makefile.kasan include-$(CONFIG_KASAN) += scripts/Makefile.kasan

View File

@@ -114,7 +114,7 @@
gpio-sck = <&gpio1 12 GPIO_ACTIVE_HIGH>; gpio-sck = <&gpio1 12 GPIO_ACTIVE_HIGH>;
gpio-miso = <&gpio1 18 GPIO_ACTIVE_HIGH>; gpio-miso = <&gpio1 18 GPIO_ACTIVE_HIGH>;
gpio-mosi = <&gpio1 20 GPIO_ACTIVE_HIGH>; gpio-mosi = <&gpio1 20 GPIO_ACTIVE_HIGH>;
cs-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>; cs-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
num-chipselects = <1>; num-chipselects = <1>;
/* lcd panel */ /* lcd panel */
@@ -124,7 +124,6 @@
spi-max-frequency = <100000>; spi-max-frequency = <100000>;
spi-cpol; spi-cpol;
spi-cpha; spi-cpha;
spi-cs-high;
backlight= <&backlight>; backlight= <&backlight>;
label = "lcd"; label = "lcd";

View File

@@ -35,7 +35,7 @@
*/ */
rs485-rx-en { rs485-rx-en {
gpio-hog; gpio-hog;
gpios = <8 GPIO_ACTIVE_HIGH>; gpios = <8 0>;
output-low; output-low;
line-name = "rs485-rx-en"; line-name = "rs485-rx-en";
}; };
@@ -63,7 +63,7 @@
*/ */
usb-hub { usb-hub {
gpio-hog; gpio-hog;
gpios = <2 GPIO_ACTIVE_HIGH>; gpios = <2 0>;
output-high; output-high;
line-name = "usb-hub-reset"; line-name = "usb-hub-reset";
}; };
@@ -87,6 +87,12 @@
}; };
}; };
&i2c4 {
touchscreen@49 {
status = "disabled";
};
};
&i2c5 { /* TP7/TP8 */ &i2c5 { /* TP7/TP8 */
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&i2c5_pins_a>; pinctrl-0 = <&i2c5_pins_a>;
@@ -104,7 +110,7 @@
* are used for on-board microSD slot instead. * are used for on-board microSD slot instead.
*/ */
/delete-property/broken-cd; /delete-property/broken-cd;
cd-gpios = <&gpioi 10 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>; cd-gpios = <&gpioi 10 GPIO_ACTIVE_HIGH>;
disable-wp; disable-wp;
}; };

View File

@@ -353,7 +353,8 @@
pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>; pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>; pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>; pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
broken-cd; cd-gpios = <&gpiog 1 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
disable-wp;
st,sig-dir; st,sig-dir;
st,neg-edge; st,neg-edge;
st,use-ckin; st,use-ckin;

View File

@@ -110,7 +110,7 @@
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>; pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>; phy-handle = <&phy1>;
phy-mode = "rgmii"; phy-mode = "rgmii-id";
phy-supply = <&reg_gmac_3v3>; phy-supply = <&reg_gmac_3v3>;
status = "okay"; status = "okay";
}; };

View File

@@ -149,7 +149,34 @@
.align .align
99: .word . 99: .word .
#if defined(ZIMAGE)
.word . + 4
/*
* Storage for the state maintained by the macro.
*
* In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
* That's because this header is included from multiple files, and we only
* want a single copy of the data. In particular, the UART probing code above
* assumes it's running using physical addresses. This is true when this file
* is included from head.o, but not when included from debug.o. So we need
* to share the probe results between the two copies, rather than having
* to re-run the probing again later.
*
* In the decompressor, we put the storage right here, since common.c
* isn't included in the decompressor build. This storage data gets put in
* .text even though it's really data, since .data is discarded from the
* decompressor. Luckily, .text is writeable in the decompressor, unless
* CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
*/
/* Debug UART initialization required */
.word 1
/* Debug UART physical address */
.word 0
/* Debug UART virtual address */
.word 0
#else
.word tegra_uart_config .word tegra_uart_config
#endif
.ltorg .ltorg
/* Load previously selected UART address */ /* Load previously selected UART address */
@@ -189,30 +216,3 @@
.macro waituarttxrdy,rd,rx .macro waituarttxrdy,rd,rx
.endm .endm
/*
* Storage for the state maintained by the macros above.
*
* In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
* That's because this header is included from multiple files, and we only
* want a single copy of the data. In particular, the UART probing code above
* assumes it's running using physical addresses. This is true when this file
* is included from head.o, but not when included from debug.o. So we need
* to share the probe results between the two copies, rather than having
* to re-run the probing again later.
*
* In the decompressor, we put the symbol/storage right here, since common.c
* isn't included in the decompressor build. This symbol gets put in .text
* even though it's really data, since .data is discarded from the
* decompressor. Luckily, .text is writeable in the decompressor, unless
* CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
*/
#if defined(ZIMAGE)
tegra_uart_config:
/* Debug UART initialization required */
.word 1
/* Debug UART physical address */
.word 0
/* Debug UART virtual address */
.word 0
#endif

View File

@@ -65,15 +65,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
if (addr) if (addr)
switch (size) { switch (size) {
case 1: case 1:
asm("ldrb %0, [%1, %2]" asm volatile("ldrb %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc"); : "=r" (v) : "r" (addr), "r" (where) : "cc");
break; break;
case 2: case 2:
asm("ldrh %0, [%1, %2]" asm volatile("ldrh %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc"); : "=r" (v) : "r" (addr), "r" (where) : "cc");
break; break;
case 4: case 4:
asm("ldr %0, [%1, %2]" asm volatile("ldr %0, [%1, %2]"
: "=r" (v) : "r" (addr), "r" (where) : "cc"); : "=r" (v) : "r" (addr), "r" (where) : "cc");
break; break;
} }
@@ -99,17 +99,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
if (addr) if (addr)
switch (size) { switch (size) {
case 1: case 1:
asm("strb %0, [%1, %2]" asm volatile("strb %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where) : : "r" (value), "r" (addr), "r" (where)
: "cc"); : "cc");
break; break;
case 2: case 2:
asm("strh %0, [%1, %2]" asm volatile("strh %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where) : : "r" (value), "r" (addr), "r" (where)
: "cc"); : "cc");
break; break;
case 4: case 4:
asm("str %0, [%1, %2]" asm volatile("str %0, [%1, %2]"
: : "r" (value), "r" (addr), "r" (where) : : "r" (value), "r" (addr), "r" (where)
: "cc"); : "cc");
break; break;

View File

@@ -203,6 +203,8 @@ static int osk_tps_setup(struct i2c_client *client, void *context)
*/ */
gpio_request(OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en"); gpio_request(OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en");
gpio_direction_output(OSK_TPS_GPIO_USB_PWR_EN, 1); gpio_direction_output(OSK_TPS_GPIO_USB_PWR_EN, 1);
/* Free the GPIO again as the driver will request it */
gpio_free(OSK_TPS_GPIO_USB_PWR_EN);
/* Set GPIO 2 high so LED D3 is off by default */ /* Set GPIO 2 high so LED D3 is off by default */
tps65010_set_gpio_out_value(GPIO2, HIGH); tps65010_set_gpio_out_value(GPIO2, HIGH);

View File

@@ -2384,7 +2384,7 @@
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host"; dr_mode = "host";
snps,dis_u2_susphy_quirk; snps,dis_u2_susphy_quirk;
snps,quirk-frame-length-adjustment; snps,quirk-frame-length-adjustment = <0x20>;
snps,parkmode-disable-ss-quirk; snps,parkmode-disable-ss-quirk;
}; };
}; };

View File

@@ -52,7 +52,7 @@
regulator-min-microvolt = <3300000>; regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>; regulator-max-microvolt = <3300000>;
gpio = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>; gpio = <&gpio_ao GPIOAO_3 GPIO_OPEN_DRAIN>;
enable-active-high; enable-active-high;
regulator-always-on; regulator-always-on;
}; };

View File

@@ -385,7 +385,7 @@
dcfg: dcfg@1ee0000 { dcfg: dcfg@1ee0000 {
compatible = "fsl,ls1046a-dcfg", "syscon"; compatible = "fsl,ls1046a-dcfg", "syscon";
reg = <0x0 0x1ee0000 0x0 0x10000>; reg = <0x0 0x1ee0000 0x0 0x1000>;
big-endian; big-endian;
}; };

View File

@@ -263,6 +263,8 @@
&i2c3 { &i2c3 {
status = "okay"; status = "okay";
clock-frequency = <400000>; clock-frequency = <400000>;
/* Overwrite pinctrl-0 from sdm845.dtsi */
pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>;
tsel: hid@15 { tsel: hid@15 {
compatible = "hid-over-i2c"; compatible = "hid-over-i2c";
@@ -270,9 +272,6 @@
hid-descr-addr = <0x1>; hid-descr-addr = <0x1>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&i2c3_hid_active>;
}; };
tsc2: hid@2c { tsc2: hid@2c {
@@ -281,11 +280,6 @@
hid-descr-addr = <0x20>; hid-descr-addr = <0x20>;
interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>; interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&i2c3_hid_active>;
status = "disabled";
}; };
}; };

View File

@@ -1097,7 +1097,7 @@
vopl_mmu: iommu@ff470f00 { vopl_mmu: iommu@ff470f00 {
compatible = "rockchip,iommu"; compatible = "rockchip,iommu";
reg = <0x0 0xff470f00 0x0 0x100>; reg = <0x0 0xff470f00 0x0 0x100>;
interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu"; interrupt-names = "vopl_mmu";
clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>; clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>;
clock-names = "aclk", "iface"; clock-names = "aclk", "iface";

View File

@@ -790,7 +790,6 @@
&pcie0 { &pcie0 {
bus-scan-delay-ms = <1000>; bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>; ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
max-link-speed = <2>;
num-lanes = <4>; num-lanes = <4>;
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreqn_cpm>; pinctrl-0 = <&pcie_clkreqn_cpm>;

View File

@@ -252,8 +252,10 @@ choice
default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
config MAXPHYSMEM_1GB config MAXPHYSMEM_1GB
depends on 32BIT
bool "1GiB" bool "1GiB"
config MAXPHYSMEM_2GB config MAXPHYSMEM_2GB
depends on 64BIT && CMODEL_MEDLOW
bool "2GiB" bool "2GiB"
config MAXPHYSMEM_128GB config MAXPHYSMEM_128GB
depends on 64BIT && CMODEL_MEDANY depends on 64BIT && CMODEL_MEDANY

View File

@@ -1083,6 +1083,7 @@ static void virtio_uml_release_dev(struct device *d)
} }
os_close_file(vu_dev->sock); os_close_file(vu_dev->sock);
kfree(vu_dev);
} }
/* Platform device */ /* Platform device */
@@ -1096,7 +1097,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
if (!pdata) if (!pdata)
return -EINVAL; return -EINVAL;
vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL); vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
if (!vu_dev) if (!vu_dev)
return -ENOMEM; return -ENOMEM;

View File

@@ -127,6 +127,9 @@ else
KBUILD_CFLAGS += -mno-red-zone KBUILD_CFLAGS += -mno-red-zone
KBUILD_CFLAGS += -mcmodel=kernel KBUILD_CFLAGS += -mcmodel=kernel
# Intel CET isn't enabled in the kernel
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif endif
ifdef CONFIG_X86_X32 ifdef CONFIG_X86_X32

View File

@@ -197,16 +197,6 @@ static inline bool apic_needs_pit(void) { return true; }
#endif /* !CONFIG_X86_LOCAL_APIC */ #endif /* !CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_X2APIC #ifdef CONFIG_X86_X2APIC
/*
* Make previous memory operations globally visible before
* sending the IPI through x2apic wrmsr. We need a serializing instruction or
* mfence for this.
*/
static inline void x2apic_wrmsr_fence(void)
{
asm volatile("mfence" : : : "memory");
}
static inline void native_apic_msr_write(u32 reg, u32 v) static inline void native_apic_msr_write(u32 reg, u32 v)
{ {
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||

View File

@@ -84,4 +84,22 @@ do { \
#include <asm-generic/barrier.h> #include <asm-generic/barrier.h>
/*
* Make previous memory operations globally visible before
* a WRMSR.
*
* MFENCE makes writes visible, but only affects load/store
* instructions. WRMSR is unfortunately not a load/store
* instruction and is unaffected by MFENCE. The LFENCE ensures
* that the WRMSR is not reordered.
*
* Most WRMSRs are full serializing instructions themselves and
* do not require this barrier. This is only required for the
* IA32_TSC_DEADLINE and X2APIC MSRs.
*/
static inline void weak_wrmsr_fence(void)
{
asm volatile("mfence; lfence" : : : "memory");
}
#endif /* _ASM_X86_BARRIER_H */ #endif /* _ASM_X86_BARRIER_H */

View File

@@ -41,6 +41,7 @@
#include <asm/perf_event.h> #include <asm/perf_event.h>
#include <asm/x86_init.h> #include <asm/x86_init.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/barrier.h>
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <asm/i8259.h> #include <asm/i8259.h>
#include <asm/proto.h> #include <asm/proto.h>
@@ -472,6 +473,9 @@ static int lapic_next_deadline(unsigned long delta,
{ {
u64 tsc; u64 tsc;
/* This MSR is special and need a special fence: */
weak_wrmsr_fence();
tsc = rdtsc(); tsc = rdtsc();
wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR)); wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
return 0; return 0;

View File

@@ -29,7 +29,8 @@ static void x2apic_send_IPI(int cpu, int vector)
{ {
u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu); u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
x2apic_wrmsr_fence(); /* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL); __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
} }
@@ -41,7 +42,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
unsigned long flags; unsigned long flags;
u32 dest; u32 dest;
x2apic_wrmsr_fence(); /* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
local_irq_save(flags); local_irq_save(flags);
tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask); tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);

View File

@@ -43,7 +43,8 @@ static void x2apic_send_IPI(int cpu, int vector)
{ {
u32 dest = per_cpu(x86_cpu_to_apicid, cpu); u32 dest = per_cpu(x86_cpu_to_apicid, cpu);
x2apic_wrmsr_fence(); /* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
__x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL); __x2apic_send_IPI_dest(dest, vector, APIC_DEST_PHYSICAL);
} }
@@ -54,7 +55,8 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
unsigned long this_cpu; unsigned long this_cpu;
unsigned long flags; unsigned long flags;
x2apic_wrmsr_fence(); /* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
local_irq_save(flags); local_irq_save(flags);
@@ -125,7 +127,8 @@ void __x2apic_send_IPI_shorthand(int vector, u32 which)
{ {
unsigned long cfg = __prepare_ICR(which, vector, 0); unsigned long cfg = __prepare_ICR(which, vector, 0);
x2apic_wrmsr_fence(); /* x2apic MSRs are special and need a special fence: */
weak_wrmsr_fence();
native_x2apic_icr_write(cfg, 0); native_x2apic_icr_write(cfg, 0);
} }

View File

@@ -269,6 +269,20 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
CPU_ENTRY_AREA_TOTAL_SIZE)) CPU_ENTRY_AREA_TOTAL_SIZE))
return true; return true;
/*
* When FSGSBASE is enabled, paranoid_entry() fetches the per-CPU
* GSBASE value via __per_cpu_offset or pcpu_unit_offsets.
*/
#ifdef CONFIG_SMP
if (within_area(addr, end, (unsigned long)__per_cpu_offset,
sizeof(unsigned long) * nr_cpu_ids))
return true;
#else
if (within_area(addr, end, (unsigned long)&pcpu_unit_offsets,
sizeof(pcpu_unit_offsets)))
return true;
#endif
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
/* The original rw GDT is being used after load_direct_gdt() */ /* The original rw GDT is being used after load_direct_gdt() */
if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu), if (within_area(addr, end, (unsigned long)get_cpu_gdt_rw(cpu),
@@ -293,6 +307,14 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)
(unsigned long)&per_cpu(cpu_tlbstate, cpu), (unsigned long)&per_cpu(cpu_tlbstate, cpu),
sizeof(struct tlb_state))) sizeof(struct tlb_state)))
return true; return true;
/*
* When in guest (X86_FEATURE_HYPERVISOR), local_db_save()
* will read per-cpu cpu_dr7 before clear dr7 register.
*/
if (within_area(addr, end, (unsigned long)&per_cpu(cpu_dr7, cpu),
sizeof(cpu_dr7)))
return true;
} }
return false; return false;
@@ -491,15 +513,12 @@ static int hw_breakpoint_handler(struct die_args *args)
struct perf_event *bp; struct perf_event *bp;
unsigned long *dr6_p; unsigned long *dr6_p;
unsigned long dr6; unsigned long dr6;
bool bpx;
/* The DR6 value is pointed by args->err */ /* The DR6 value is pointed by args->err */
dr6_p = (unsigned long *)ERR_PTR(args->err); dr6_p = (unsigned long *)ERR_PTR(args->err);
dr6 = *dr6_p; dr6 = *dr6_p;
/* If it's a single step, TRAP bits are random */
if (dr6 & DR_STEP)
return NOTIFY_DONE;
/* Do an early return if no trap bits are set in DR6 */ /* Do an early return if no trap bits are set in DR6 */
if ((dr6 & DR_TRAP_BITS) == 0) if ((dr6 & DR_TRAP_BITS) == 0)
return NOTIFY_DONE; return NOTIFY_DONE;
@@ -509,28 +528,29 @@ static int hw_breakpoint_handler(struct die_args *args)
if (likely(!(dr6 & (DR_TRAP0 << i)))) if (likely(!(dr6 & (DR_TRAP0 << i))))
continue; continue;
/*
* The counter may be concurrently released but that can only
* occur from a call_rcu() path. We can then safely fetch
* the breakpoint, use its callback, touch its counter
* while we are in an rcu_read_lock() path.
*/
rcu_read_lock();
bp = this_cpu_read(bp_per_reg[i]); bp = this_cpu_read(bp_per_reg[i]);
if (!bp)
continue;
bpx = bp->hw.info.type == X86_BREAKPOINT_EXECUTE;
/*
* TF and data breakpoints are traps and can be merged, however
* instruction breakpoints are faults and will be raised
* separately.
*
* However DR6 can indicate both TF and instruction
* breakpoints. In that case take TF as that has precedence and
* delay the instruction breakpoint for the next exception.
*/
if (bpx && (dr6 & DR_STEP))
continue;
/* /*
* Reset the 'i'th TRAP bit in dr6 to denote completion of * Reset the 'i'th TRAP bit in dr6 to denote completion of
* exception handling * exception handling
*/ */
(*dr6_p) &= ~(DR_TRAP0 << i); (*dr6_p) &= ~(DR_TRAP0 << i);
/*
* bp can be NULL due to lazy debug register switching
* or due to concurrent perf counter removing.
*/
if (!bp) {
rcu_read_unlock();
break;
}
perf_bp_event(bp, args->regs); perf_bp_event(bp, args->regs);
@@ -538,11 +558,10 @@ static int hw_breakpoint_handler(struct die_args *args)
* Set up resume flag to avoid breakpoint recursion when * Set up resume flag to avoid breakpoint recursion when
* returning back to origin. * returning back to origin.
*/ */
if (bp->hw.info.type == X86_BREAKPOINT_EXECUTE) if (bpx)
args->regs->flags |= X86_EFLAGS_RF; args->regs->flags |= X86_EFLAGS_RF;
rcu_read_unlock();
} }
/* /*
* Further processing in do_debug() is needed for a) user-space * Further processing in do_debug() is needed for a) user-space
* breakpoints (to generate signals) and b) when the system has * breakpoints (to generate signals) and b) when the system has

View File

@@ -320,7 +320,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
if (cpuid->nent < vcpu->arch.cpuid_nent) if (cpuid->nent < vcpu->arch.cpuid_nent)
goto out; goto out;
r = -EFAULT; r = -EFAULT;
if (copy_to_user(entries, &vcpu->arch.cpuid_entries, if (copy_to_user(entries, vcpu->arch.cpuid_entries,
vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
goto out; goto out;
return 0; return 0;

View File

@@ -2879,6 +2879,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data : *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
(u32)msr_data; (u32)msr_data;
if (efer & EFER_LMA)
ctxt->mode = X86EMUL_MODE_PROT64;
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }

View File

@@ -1037,8 +1037,8 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
} }
/* /*
* Clear non-leaf entries (and free associated page tables) which could * Clear leaf entries which could be replaced by large mappings, for
* be replaced by large mappings, for GFNs within the slot. * GFNs within the slot.
*/ */
static void zap_collapsible_spte_range(struct kvm *kvm, static void zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root, struct kvm_mmu_page *root,
@@ -1050,7 +1050,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
tdp_root_for_each_pte(iter, root, start, end) { tdp_root_for_each_pte(iter, root, start, end) {
if (!is_shadow_present_pte(iter.old_spte) || if (!is_shadow_present_pte(iter.old_spte) ||
is_last_spte(iter.old_spte, iter.level)) !is_last_spte(iter.old_spte, iter.level))
continue; continue;
pfn = spte_to_pfn(iter.old_spte); pfn = spte_to_pfn(iter.old_spte);

View File

@@ -320,6 +320,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
unsigned long first, last; unsigned long first, last;
int ret; int ret;
lockdep_assert_held(&kvm->lock);
if (ulen == 0 || uaddr + ulen < uaddr) if (ulen == 0 || uaddr + ulen < uaddr)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@@ -1001,12 +1003,20 @@ int svm_register_enc_region(struct kvm *kvm,
if (!region) if (!region)
return -ENOMEM; return -ENOMEM;
mutex_lock(&kvm->lock);
region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1); region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
if (IS_ERR(region->pages)) { if (IS_ERR(region->pages)) {
ret = PTR_ERR(region->pages); ret = PTR_ERR(region->pages);
mutex_unlock(&kvm->lock);
goto e_free; goto e_free;
} }
region->uaddr = range->addr;
region->size = range->size;
list_add_tail(&region->list, &sev->regions_list);
mutex_unlock(&kvm->lock);
/* /*
* The guest may change the memory encryption attribute from C=0 -> C=1 * The guest may change the memory encryption attribute from C=0 -> C=1
* or vice versa for this memory range. Lets make sure caches are * or vice versa for this memory range. Lets make sure caches are
@@ -1015,13 +1025,6 @@ int svm_register_enc_region(struct kvm *kvm,
*/ */
sev_clflush_pages(region->pages, region->npages); sev_clflush_pages(region->pages, region->npages);
region->uaddr = range->addr;
region->size = range->size;
mutex_lock(&kvm->lock);
list_add_tail(&region->list, &sev->regions_list);
mutex_unlock(&kvm->lock);
return ret; return ret;
e_free: e_free:

View File

@@ -438,6 +438,11 @@ static int has_svm(void)
return 0; return 0;
} }
if (sev_active()) {
pr_info("KVM is unsupported when running as an SEV guest\n");
return 0;
}
return 1; return 1;
} }

View File

@@ -6874,11 +6874,20 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
switch (index) { switch (index) {
case MSR_IA32_TSX_CTRL: case MSR_IA32_TSX_CTRL:
/* /*
* No need to pass TSX_CTRL_CPUID_CLEAR through, so * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
* let's avoid changing CPUID bits under the host * interception. Keep the host value unchanged to avoid
* kernel's feet. * changing CPUID bits under the host kernel's feet.
*
* hle=0, rtm=0, tsx_ctrl=1 can be found with some
* combinations of new kernel and old userspace. If
* those guests run on a tsx=off host, do allow guests
* to use TSX_CTRL, but do not change the value on the
* host so that TSX remains always disabled.
*/ */
vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; if (boot_cpu_has(X86_FEATURE_RTM))
vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
else
vmx->guest_uret_msrs[j].mask = 0;
break; break;
default: default:
vmx->guest_uret_msrs[j].mask = -1ull; vmx->guest_uret_msrs[j].mask = -1ull;

View File

@@ -1376,16 +1376,24 @@ static u64 kvm_get_arch_capabilities(void)
if (!boot_cpu_has_bug(X86_BUG_MDS)) if (!boot_cpu_has_bug(X86_BUG_MDS))
data |= ARCH_CAP_MDS_NO; data |= ARCH_CAP_MDS_NO;
/* if (!boot_cpu_has(X86_FEATURE_RTM)) {
* On TAA affected systems: /*
* - nothing to do if TSX is disabled on the host. * If RTM=0 because the kernel has disabled TSX, the host might
* - we emulate TSX_CTRL if present on the host. * have TAA_NO or TSX_CTRL. Clear TAA_NO (the guest sees RTM=0
* This lets the guest use VERW to clear CPU buffers. * and therefore knows that there cannot be TAA) but keep
*/ * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
if (!boot_cpu_has(X86_FEATURE_RTM)) * and we want to allow migrating those guests to tsx=off hosts.
data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR); */
else if (!boot_cpu_has_bug(X86_BUG_TAA)) data &= ~ARCH_CAP_TAA_NO;
} else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
data |= ARCH_CAP_TAA_NO; data |= ARCH_CAP_TAA_NO;
} else {
/*
* Nothing to do here; we emulate TSX_CTRL if present on the
* host so the guest can choose between disabling TSX or
* using VERW to clear CPU buffers.
*/
}
return data; return data;
} }
@@ -9907,6 +9915,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
fx_init(vcpu); fx_init(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;

View File

@@ -351,6 +351,7 @@ bool sev_active(void)
{ {
return sev_status & MSR_AMD64_SEV_ENABLED; return sev_status & MSR_AMD64_SEV_ENABLED;
} }
EXPORT_SYMBOL_GPL(sev_active);
/* Needs to be called from non-instrumentable code */ /* Needs to be called from non-instrumentable code */
bool noinstr sev_es_active(void) bool noinstr sev_es_active(void)

View File

@@ -602,7 +602,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gdev->id; ret = gdev->id;
goto err_free_gdev; goto err_free_gdev;
} }
dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
if (ret)
goto err_free_ida;
device_initialize(&gdev->dev); device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev); dev_set_drvdata(&gdev->dev, gdev);
if (gc->parent && gc->parent->driver) if (gc->parent && gc->parent->driver)
@@ -616,7 +620,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
if (!gdev->descs) { if (!gdev->descs) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_free_ida; goto err_free_dev_name;
} }
if (gc->ngpio == 0) { if (gc->ngpio == 0) {
@@ -767,6 +771,8 @@ err_free_label:
kfree_const(gdev->label); kfree_const(gdev->label);
err_free_descs: err_free_descs:
kfree(gdev->descs); kfree(gdev->descs);
err_free_dev_name:
kfree(dev_name(&gdev->dev));
err_free_ida: err_free_ida:
ida_free(&gpio_ida, gdev->id); ida_free(&gpio_ida, gdev->id);
err_free_gdev: err_free_gdev:

View File

@@ -2278,8 +2278,6 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector, drm_connector_update_edid_property(connector,
aconnector->edid); aconnector->edid);
drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode) if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid); aconnector->edid);

View File

@@ -3629,14 +3629,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return 0; return 0;
} }
static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count) /**
* drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
* @link_rate: link rate in 10kbits/s units
* @link_lane_count: lane count
*
* Calculate the total bandwidth of a MultiStream Transport link. The returned
* value is in units of PBNs/(timeslots/1 MTP). This value can be used to
* convert the number of PBNs required for a given stream to the number of
* timeslots this stream requires in each MTP.
*/
int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{ {
if (dp_link_bw == 0 || dp_link_count == 0) if (link_rate == 0 || link_lane_count == 0)
DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n", DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
dp_link_bw, dp_link_count); link_rate, link_lane_count);
return dp_link_bw * dp_link_count / 2; /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
return link_rate * link_lane_count / 54000;
} }
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
/** /**
* drm_dp_read_mst_cap() - check whether or not a sink supports MST * drm_dp_read_mst_cap() - check whether or not a sink supports MST
@@ -3692,7 +3704,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock; goto out_unlock;
} }
mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
if (mgr->pbn_div == 0) { if (mgr->pbn_div == 0) {
ret = -EINVAL; ret = -EINVAL;

View File

@@ -3274,6 +3274,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl); intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
} }
static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum phy phy = intel_port_to_phy(i915, encoder->port);
if (intel_phy_is_combo(i915, phy)) {
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
intel_combo_phy_power_up_lanes(i915, phy, false,
crtc_state->lane_count,
lane_reversal);
}
}
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder, struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
@@ -3367,14 +3384,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
* 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
* the used lanes of the DDI. * the used lanes of the DDI.
*/ */
if (intel_phy_is_combo(dev_priv, phy)) { intel_ddi_power_up_lanes(encoder, crtc_state);
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
intel_combo_phy_power_up_lanes(dev_priv, phy, false,
crtc_state->lane_count,
lane_reversal);
}
/* /*
* 7.g Configure and enable DDI_BUF_CTL * 7.g Configure and enable DDI_BUF_CTL
@@ -3458,14 +3468,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
else else
intel_prepare_dp_ddi_buffers(encoder, crtc_state); intel_prepare_dp_ddi_buffers(encoder, crtc_state);
if (intel_phy_is_combo(dev_priv, phy)) { intel_ddi_power_up_lanes(encoder, crtc_state);
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
intel_combo_phy_power_up_lanes(dev_priv, phy, false,
crtc_state->lane_count,
lane_reversal);
}
intel_ddi_init_dp_buf_reg(encoder); intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst) if (!is_mst)
@@ -3933,6 +3936,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, reg, val); intel_de_write(dev_priv, reg, val);
} }
intel_ddi_power_up_lanes(encoder, crtc_state);
/* In HDMI/DVI mode, the port width, and swing/emphasis values /* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides * are ignored so nothing special needs to be done besides
* enabling the port. * enabling the port.

View File

@@ -2294,7 +2294,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
*/ */
ret = i915_vma_pin_fence(vma); ret = i915_vma_pin_fence(vma);
if (ret != 0 && INTEL_GEN(dev_priv) < 4) { if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
i915_gem_object_unpin_from_display_plane(vma); i915_vma_unpin(vma);
vma = ERR_PTR(ret); vma = ERR_PTR(ret);
goto err; goto err;
} }
@@ -2312,12 +2312,9 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
{ {
i915_gem_object_lock(vma->obj, NULL);
if (flags & PLANE_HAS_FENCE) if (flags & PLANE_HAS_FENCE)
i915_vma_unpin_fence(vma); i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma); i915_vma_unpin(vma);
i915_gem_object_unlock(vma->obj);
i915_vma_put(vma); i915_vma_put(vma);
} }
@@ -4883,6 +4880,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} else if (fb->format->is_yuv) { } else if (fb->format->is_yuv) {
plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
} }
return plane_color_ctl; return plane_color_ctl;

View File

@@ -68,7 +68,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
connector->port, connector->port,
crtc_state->pbn, 0); crtc_state->pbn,
drm_dp_get_vc_payload_bw(crtc_state->port_clock,
crtc_state->lane_count));
if (slots == -EDEADLK) if (slots == -EDEADLK)
return slots; return slots;
if (slots >= 0) if (slots >= 0)

View File

@@ -359,7 +359,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
intel_frontbuffer_flip_complete(overlay->i915, intel_frontbuffer_flip_complete(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_gem_object_unpin_from_display_plane(vma); i915_vma_unpin(vma);
i915_vma_put(vma); i915_vma_put(vma);
} }
@@ -860,7 +860,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
return 0; return 0;
out_unpin: out_unpin:
i915_gem_object_unpin_from_display_plane(vma); i915_vma_unpin(vma);
out_pin_section: out_pin_section:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin); atomic_dec(&dev_priv->gpu_error.pending_fb_pin);

View File

@@ -469,13 +469,19 @@ skl_program_scaler(struct intel_plane *plane,
/* Preoffset values for YUV to RGB Conversion */ /* Preoffset values for YUV to RGB Conversion */
#define PREOFF_YUV_TO_RGB_HI 0x1800 #define PREOFF_YUV_TO_RGB_HI 0x1800
#define PREOFF_YUV_TO_RGB_ME 0x1F00 #define PREOFF_YUV_TO_RGB_ME 0x0000
#define PREOFF_YUV_TO_RGB_LO 0x1800 #define PREOFF_YUV_TO_RGB_LO 0x1800
#define ROFF(x) (((x) & 0xffff) << 16) #define ROFF(x) (((x) & 0xffff) << 16)
#define GOFF(x) (((x) & 0xffff) << 0) #define GOFF(x) (((x) & 0xffff) << 0)
#define BOFF(x) (((x) & 0xffff) << 16) #define BOFF(x) (((x) & 0xffff) << 16)
/*
* Programs the input color space conversion stage for ICL HDR planes.
* Note that it is assumed that this stage always happens after YUV
* range correction. Thus, the input to this stage is assumed to be
* in full-range YCbCr.
*/
static void static void
icl_program_input_csc(struct intel_plane *plane, icl_program_input_csc(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
@@ -523,52 +529,7 @@ icl_program_input_csc(struct intel_plane *plane,
0x0, 0x7800, 0x7F10, 0x0, 0x7800, 0x7F10,
}, },
}; };
const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
/* Matrix for Limited Range to Full Range Conversion */
static const u16 input_csc_matrix_lr[][9] = {
/*
* BT.601 Limted range YCbCr -> full range RGB
* The matrix required is :
* [1.164384, 0.000, 1.596027,
* 1.164384, -0.39175, -0.812813,
* 1.164384, 2.017232, 0.0000]
*/
[DRM_COLOR_YCBCR_BT601] = {
0x7CC8, 0x7950, 0x0,
0x8D00, 0x7950, 0x9C88,
0x0, 0x7950, 0x6810,
},
/*
* BT.709 Limited range YCbCr -> full range RGB
* The matrix required is :
* [1.164384, 0.000, 1.792741,
* 1.164384, -0.213249, -0.532909,
* 1.164384, 2.112402, 0.0000]
*/
[DRM_COLOR_YCBCR_BT709] = {
0x7E58, 0x7950, 0x0,
0x8888, 0x7950, 0xADA8,
0x0, 0x7950, 0x6870,
},
/*
* BT.2020 Limited range YCbCr -> full range RGB
* The matrix required is :
* [1.164, 0.000, 1.678,
* 1.164, -0.1873, -0.6504,
* 1.164, 2.1417, 0.0000]
*/
[DRM_COLOR_YCBCR_BT2020] = {
0x7D70, 0x7950, 0x0,
0x8A68, 0x7950, 0xAC00,
0x0, 0x7950, 0x6890,
},
};
const u16 *csc;
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
csc = input_csc_matrix[plane_state->hw.color_encoding];
else
csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
ROFF(csc[0]) | GOFF(csc[1])); ROFF(csc[0]) | GOFF(csc[1]));
@@ -585,14 +546,8 @@ icl_program_input_csc(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI); PREOFF_YUV_TO_RGB_HI);
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
intel_de_write_fw(dev_priv, PREOFF_YUV_TO_RGB_ME);
PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
0);
else
intel_de_write_fw(dev_priv,
PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
PREOFF_YUV_TO_RGB_ME);
intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO); PREOFF_YUV_TO_RGB_LO);
intel_de_write_fw(dev_priv, intel_de_write_fw(dev_priv,

View File

@@ -387,48 +387,6 @@ err:
return vma; return vma;
} }
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_vma *vma;
if (list_empty(&obj->vma.list))
return;
mutex_lock(&i915->ggtt.vm.mutex);
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj) {
if (!drm_mm_node_allocated(&vma->node))
continue;
GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
}
spin_unlock(&obj->vma.lock);
mutex_unlock(&i915->ggtt.vm.mutex);
if (i915_gem_object_is_shrinkable(obj)) {
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
if (obj->mm.madv == I915_MADV_WILLNEED &&
!atomic_read(&obj->mm.shrink_pin))
list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
/* Bump the LRU to try and avoid premature eviction whilst flipping */
i915_gem_object_bump_inactive_ggtt(vma->obj);
i915_vma_unpin(vma);
}
/** /**
* Moves a single object to the CPU read, and possibly write domain. * Moves a single object to the CPU read, and possibly write domain.
* @obj: object to act on * @obj: object to act on
@@ -569,9 +527,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
else else
err = i915_gem_object_set_to_cpu_domain(obj, write_domain); err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
/* And bump the LRU for this access */
i915_gem_object_bump_inactive_ggtt(obj);
i915_gem_object_unlock(obj); i915_gem_object_unlock(obj);
if (write_domain) if (write_domain)

View File

@@ -471,7 +471,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment, u32 alignment,
const struct i915_ggtt_view *view, const struct i915_ggtt_view *view,
unsigned int flags); unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);

View File

@@ -451,10 +451,12 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq)
struct intel_context *ce = rq->context; struct intel_context *ce = rq->context;
bool release; bool release;
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return;
spin_lock(&ce->signal_lock); spin_lock(&ce->signal_lock);
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
spin_unlock(&ce->signal_lock);
return;
}
list_del_rcu(&rq->signal_link); list_del_rcu(&rq->signal_link);
release = remove_signaling_context(rq->engine->breadcrumbs, ce); release = remove_signaling_context(rq->engine->breadcrumbs, ce);
spin_unlock(&ce->signal_lock); spin_unlock(&ce->signal_lock);

View File

@@ -215,9 +215,17 @@ static const struct xpad_device {
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -296,6 +304,9 @@ static const struct xpad_device {
{ 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
{ 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
{ 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
@@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */
{ } { }
}; };

View File

@@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
}, },
},
{
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),

View File

@@ -157,6 +157,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "5663", .data = &gt1x_chip_data }, { .id = "5663", .data = &gt1x_chip_data },
{ .id = "5688", .data = &gt1x_chip_data }, { .id = "5688", .data = &gt1x_chip_data },
{ .id = "917S", .data = &gt1x_chip_data }, { .id = "917S", .data = &gt1x_chip_data },
{ .id = "9286", .data = &gt1x_chip_data },
{ .id = "911", .data = &gt911_chip_data }, { .id = "911", .data = &gt911_chip_data },
{ .id = "9271", .data = &gt911_chip_data }, { .id = "9271", .data = &gt911_chip_data },
@@ -1445,6 +1446,7 @@ static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt927" }, { .compatible = "goodix,gt927" },
{ .compatible = "goodix,gt9271" }, { .compatible = "goodix,gt9271" },
{ .compatible = "goodix,gt928" }, { .compatible = "goodix,gt928" },
{ .compatible = "goodix,gt9286" },
{ .compatible = "goodix,gt967" }, { .compatible = "goodix,gt967" },
{ } { }
}; };

View File

@@ -29,11 +29,13 @@ struct ili2xxx_chip {
void *buf, size_t len); void *buf, size_t len);
int (*get_touch_data)(struct i2c_client *client, u8 *data); int (*get_touch_data)(struct i2c_client *client, u8 *data);
bool (*parse_touch_data)(const u8 *data, unsigned int finger, bool (*parse_touch_data)(const u8 *data, unsigned int finger,
unsigned int *x, unsigned int *y); unsigned int *x, unsigned int *y,
unsigned int *z);
bool (*continue_polling)(const u8 *data, bool touch); bool (*continue_polling)(const u8 *data, bool touch);
unsigned int max_touches; unsigned int max_touches;
unsigned int resolution; unsigned int resolution;
bool has_calibrate_reg; bool has_calibrate_reg;
bool has_pressure_reg;
}; };
struct ili210x { struct ili210x {
@@ -82,7 +84,8 @@ static int ili210x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili210x_touchdata_to_coords(const u8 *touchdata, static bool ili210x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger, unsigned int finger,
unsigned int *x, unsigned int *y) unsigned int *x, unsigned int *y,
unsigned int *z)
{ {
if (touchdata[0] & BIT(finger)) if (touchdata[0] & BIT(finger))
return false; return false;
@@ -137,7 +140,8 @@ static int ili211x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili211x_touchdata_to_coords(const u8 *touchdata, static bool ili211x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger, unsigned int finger,
unsigned int *x, unsigned int *y) unsigned int *x, unsigned int *y,
unsigned int *z)
{ {
u32 data; u32 data;
@@ -169,7 +173,8 @@ static const struct ili2xxx_chip ili211x_chip = {
static bool ili212x_touchdata_to_coords(const u8 *touchdata, static bool ili212x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger, unsigned int finger,
unsigned int *x, unsigned int *y) unsigned int *x, unsigned int *y,
unsigned int *z)
{ {
u16 val; u16 val;
@@ -235,7 +240,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
static bool ili251x_touchdata_to_coords(const u8 *touchdata, static bool ili251x_touchdata_to_coords(const u8 *touchdata,
unsigned int finger, unsigned int finger,
unsigned int *x, unsigned int *y) unsigned int *x, unsigned int *y,
unsigned int *z)
{ {
u16 val; u16 val;
@@ -245,6 +251,7 @@ static bool ili251x_touchdata_to_coords(const u8 *touchdata,
*x = val & 0x3fff; *x = val & 0x3fff;
*y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2); *y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
*z = touchdata[1 + (finger * 5) + 4];
return true; return true;
} }
@@ -261,6 +268,7 @@ static const struct ili2xxx_chip ili251x_chip = {
.continue_polling = ili251x_check_continue_polling, .continue_polling = ili251x_check_continue_polling,
.max_touches = 10, .max_touches = 10,
.has_calibrate_reg = true, .has_calibrate_reg = true,
.has_pressure_reg = true,
}; };
static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata) static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
@@ -268,14 +276,16 @@ static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
struct input_dev *input = priv->input; struct input_dev *input = priv->input;
int i; int i;
bool contact = false, touch; bool contact = false, touch;
unsigned int x = 0, y = 0; unsigned int x = 0, y = 0, z = 0;
for (i = 0; i < priv->chip->max_touches; i++) { for (i = 0; i < priv->chip->max_touches; i++) {
touch = priv->chip->parse_touch_data(touchdata, i, &x, &y); touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z);
input_mt_slot(input, i); input_mt_slot(input, i);
if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) { if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
touchscreen_report_pos(input, &priv->prop, x, y, true); touchscreen_report_pos(input, &priv->prop, x, y, true);
if (priv->chip->has_pressure_reg)
input_report_abs(input, ABS_MT_PRESSURE, z);
contact = true; contact = true;
} }
} }
@@ -437,6 +447,8 @@ static int ili210x_i2c_probe(struct i2c_client *client,
max_xy = (chip->resolution ?: SZ_64K) - 1; max_xy = (chip->resolution ?: SZ_64K) - 1;
input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
if (priv->chip->has_pressure_reg)
input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
touchscreen_parse_properties(input, true, &priv->prop); touchscreen_parse_properties(input, true, &priv->prop);
error = input_mt_init_slots(input, priv->chip->max_touches, error = input_mt_init_slots(input, priv->chip->max_touches,

View File

@@ -639,8 +639,10 @@ static void md_submit_flush_data(struct work_struct *ws)
* could wait for this and below md_handle_request could wait for those * could wait for this and below md_handle_request could wait for those
* bios because of suspend check * bios because of suspend check
*/ */
spin_lock_irq(&mddev->lock);
mddev->last_flush = mddev->start_flush; mddev->last_flush = mddev->start_flush;
mddev->flush_bio = NULL; mddev->flush_bio = NULL;
spin_unlock_irq(&mddev->lock);
wake_up(&mddev->sb_wait); wake_up(&mddev->sb_wait);
if (bio->bi_iter.bi_size == 0) { if (bio->bi_iter.bi_size == 0) {

View File

@@ -20,6 +20,8 @@
#include "sdio_cis.h" #include "sdio_cis.h"
#include "sdio_ops.h" #include "sdio_ops.h"
#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */
static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
const unsigned char *buf, unsigned size) const unsigned char *buf, unsigned size)
{ {
@@ -274,6 +276,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
do { do {
unsigned char tpl_code, tpl_link; unsigned char tpl_code, tpl_link;
unsigned long timeout = jiffies +
msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
if (ret) if (ret)
@@ -326,6 +330,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
prev = &this->next; prev = &this->next;
if (ret == -ENOENT) { if (ret == -ENOENT) {
if (time_after(jiffies, timeout))
break;
/* warn about unknown tuples */ /* warn about unknown tuples */
pr_warn_ratelimited("%s: queuing unknown" pr_warn_ratelimited("%s: queuing unknown"
" CIS tuple 0x%02x (%u bytes)\n", " CIS tuple 0x%02x (%u bytes)\n",

View File

@@ -111,8 +111,13 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
return host->private; return host->private;
} }
extern const struct dev_pm_ops sdhci_pltfm_pmops;
#ifdef CONFIG_PM_SLEEP
int sdhci_pltfm_suspend(struct device *dev); int sdhci_pltfm_suspend(struct device *dev);
int sdhci_pltfm_resume(struct device *dev); int sdhci_pltfm_resume(struct device *dev);
extern const struct dev_pm_ops sdhci_pltfm_pmops; #else
static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; }
static inline int sdhci_pltfm_resume(struct device *dev) { return 0; }
#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */

View File

@@ -1669,7 +1669,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
if (!entry.portvec) if (!entry.portvec)
entry.state = 0; entry.state = 0;
} else { } else {
entry.portvec |= BIT(port); if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
entry.portvec = BIT(port);
else
entry.portvec |= BIT(port);
entry.state = state; entry.state = state;
} }

View File

@@ -5339,11 +5339,6 @@ static int ibmvnic_remove(struct vio_dev *dev)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&adapter->state_lock, flags); spin_lock_irqsave(&adapter->state_lock, flags);
if (test_bit(0, &adapter->resetting)) {
spin_unlock_irqrestore(&adapter->state_lock, flags);
return -EBUSY;
}
adapter->state = VNIC_REMOVING; adapter->state = VNIC_REMOVING;
spin_unlock_irqrestore(&adapter->state_lock, flags); spin_unlock_irqrestore(&adapter->state_lock, flags);

View File

@@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO; pfe.severity = PF_EVENT_SEVERITY_INFO;
if (vf->link_forced) {
/* Always report link is down if the VF queues aren't enabled */
if (!vf->queues_enabled) {
pfe.event_data.link_event.link_status = false;
pfe.event_data.link_event.link_speed = 0;
} else if (vf->link_forced) {
pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_status = vf->link_up;
pfe.event_data.link_event.link_speed = pfe.event_data.link_event.link_speed =
(vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
@@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
pfe.event_data.link_event.link_speed = pfe.event_data.link_event.link_speed =
i40e_virtchnl_link_speed(ls->link_speed); i40e_virtchnl_link_speed(ls->link_speed);
} }
i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
0, (u8 *)&pfe, sizeof(pfe), NULL); 0, (u8 *)&pfe, sizeof(pfe), NULL);
} }
@@ -2443,8 +2437,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
} }
} }
vf->queues_enabled = true;
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2466,9 +2458,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf; struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0; i40e_status aq_ret = 0;
/* Immediately mark queues as disabled */
vf->queues_enabled = false;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM; aq_ret = I40E_ERR_PARAM;
goto error_param; goto error_param;

View File

@@ -98,7 +98,6 @@ struct i40e_vf {
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced; bool link_forced;
bool link_up; /* only valid if VF link is forced */ bool link_up; /* only valid if VF link is forced */
bool queues_enabled; /* true if the VF queues are enabled */
bool spoofchk; bool spoofchk;
u16 num_vlan; u16 num_vlan;

View File

@@ -1714,7 +1714,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
Asym_Pause); Asym_Pause);
} }
status = rd32(IGC_STATUS); status = pm_runtime_suspended(&adapter->pdev->dev) ?
0 : rd32(IGC_STATUS);
if (status & IGC_STATUS_LU) { if (status & IGC_STATUS_LU) {
if (status & IGC_STATUS_SPEED_1000) { if (status & IGC_STATUS_SPEED_1000) {

View File

@@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
u16 *data) u16 *data)
{ {
struct igc_nvm_info *nvm = &hw->nvm; struct igc_nvm_info *nvm = &hw->nvm;
s32 ret_val = -IGC_ERR_NVM;
u32 attempts = 100000; u32 attempts = 100000;
u32 i, k, eewr = 0; u32 i, k, eewr = 0;
s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words, /* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words. * too many words for the offset, and not enough words.
@@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
words == 0) { words == 0) {
hw_dbg("nvm parameter(s) out of bounds\n"); hw_dbg("nvm parameter(s) out of bounds\n");
ret_val = -IGC_ERR_NVM;
goto out; goto out;
} }

View File

@@ -638,7 +638,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
} }
out: out:
return 0; return ret_val;
} }
/** /**

View File

@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
/* Clear entry invalidation bit */ /* Clear entry invalidation bit */
pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
/* Write sram index - indirect access */ /* Write sram index - indirect access */
mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
/* Write tcam index - indirect access */
mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
return 0; return 0;
} }

View File

@@ -3593,12 +3593,10 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
err = mlx5e_safe_switch_channels(priv, &new_channels, err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_num_channels_changed_ctx, NULL); mlx5e_num_channels_changed_ctx, NULL);
if (err)
goto out;
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
new_channels.params.num_tc);
out: out:
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
priv->channels.params.num_tc);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
return err; return err;
} }

View File

@@ -1262,8 +1262,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe)) if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb(cqe, skb)) if (!mlx5e_tc_update_skb(cqe, skb)) {
dev_kfree_skb_any(skb);
goto free_wqe; goto free_wqe;
}
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
@@ -1316,8 +1318,10 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb)) if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb); skb_vlan_pop(skb);
if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
dev_kfree_skb_any(skb);
goto free_wqe; goto free_wqe;
}
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
@@ -1371,8 +1375,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
dev_kfree_skb_any(skb);
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
}
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
@@ -1528,8 +1534,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe)) if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb(cqe, skb)) if (!mlx5e_tc_update_skb(cqe, skb)) {
dev_kfree_skb_any(skb);
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
}
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);

View File

@@ -1764,6 +1764,7 @@ search_again_locked:
if (!fte_tmp) if (!fte_tmp)
continue; continue;
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
/* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false); up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false); tree_put_node(&fte_tmp->node, false);
kmem_cache_free(steering->ftes_cache, fte); kmem_cache_free(steering->ftes_cache, fte);
@@ -1816,6 +1817,8 @@ skip_search:
up_write_ref_node(&g->node, false); up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false); up_write_ref_node(&fte->node, false);
if (IS_ERR(rule))
tree_put_node(&fte->node, false);
return rule; return rule;
} }
rule = ERR_PTR(-ENOENT); rule = ERR_PTR(-ENOENT);
@@ -1914,6 +1917,8 @@ search_again_locked:
up_write_ref_node(&g->node, false); up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false); up_write_ref_node(&fte->node, false);
if (IS_ERR(rule))
tree_put_node(&fte->node, false);
tree_put_node(&g->node, false); tree_put_node(&g->node, false);
return rule; return rule;

View File

@@ -76,7 +76,7 @@ enum {
static u32 get_function(u16 func_id, bool ec_function) static u32 get_function(u16 func_id, bool ec_function)
{ {
return func_id & (ec_function << 16); return (u32)func_id | (ec_function << 16);
} }
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)

View File

@@ -4082,17 +4082,72 @@ err_out:
return -EIO; return -EIO;
} }
static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp) static bool rtl_skb_is_udp(struct sk_buff *skb)
{ {
int no = skb_network_offset(skb);
struct ipv6hdr *i6h, _i6h;
struct iphdr *ih, _ih;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih);
return ih && ih->protocol == IPPROTO_UDP;
case htons(ETH_P_IPV6):
i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h);
return i6h && i6h->nexthdr == IPPROTO_UDP;
default:
return false;
}
}
#define RTL_MIN_PATCH_LEN 47
/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */
static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
struct sk_buff *skb)
{
unsigned int padto = 0, len = skb->len;
if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
unsigned int trans_data_len = skb_tail_pointer(skb) -
skb_transport_header(skb);
if (trans_data_len >= offsetof(struct udphdr, len) &&
trans_data_len < RTL_MIN_PATCH_LEN) {
u16 dest = ntohs(udp_hdr(skb)->dest);
/* dest is a standard PTP port */
if (dest == 319 || dest == 320)
padto = len + RTL_MIN_PATCH_LEN - trans_data_len;
}
if (trans_data_len < sizeof(struct udphdr))
padto = max_t(unsigned int, padto,
len + sizeof(struct udphdr) - trans_data_len);
}
return padto;
}
static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
struct sk_buff *skb)
{
unsigned int padto;
padto = rtl8125_quirk_udp_padto(tp, skb);
switch (tp->mac_version) { switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_60: case RTL_GIGA_MAC_VER_60:
case RTL_GIGA_MAC_VER_61: case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63: case RTL_GIGA_MAC_VER_63:
return true; padto = max_t(unsigned int, padto, ETH_ZLEN);
default: default:
return false; break;
} }
return padto;
} }
static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
@@ -4164,9 +4219,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
opts[1] |= transport_offset << TCPHO_SHIFT; opts[1] |= transport_offset << TCPHO_SHIFT;
} else { } else {
if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp))) unsigned int padto = rtl_quirk_packet_padto(tp, skb);
/* eth_skb_pad would free the skb on error */
return !__skb_put_padto(skb, ETH_ZLEN, false); /* skb_padto would free the skb on error */
return !__skb_put_padto(skb, padto, false);
} }
return true; return true;
@@ -4349,6 +4405,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
if (skb->len < ETH_ZLEN) if (skb->len < ETH_ZLEN)
features &= ~NETIF_F_CSUM_MASK; features &= ~NETIF_F_CSUM_MASK;
if (rtl_quirk_packet_padto(tp, skb))
features &= ~NETIF_F_CSUM_MASK;
if (transport_offset > TCPHO_MAX && if (transport_offset > TCPHO_MAX &&
rtl_chip_supports_csum_v2(tp)) rtl_chip_supports_csum_v2(tp))
features &= ~NETIF_F_CSUM_MASK; features &= ~NETIF_F_CSUM_MASK;
@@ -4694,10 +4753,10 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work); cancel_work_sync(&tp->wk.work);
phy_disconnect(tp->phydev);
free_irq(pci_irq_vector(pdev, 0), tp); free_irq(pci_irq_vector(pdev, 0), tp);
phy_disconnect(tp->phydev);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr); tp->RxPhyAddr);
dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,

View File

@@ -1256,7 +1256,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
/* Hardware requires a 2^n ring size, with alignment equal to size */ /* Hardware requires a 2^n ring size, with alignment equal to size */
ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
if (ring->virt && addr % size) { if (ring->virt && addr % size) {
dma_free_coherent(dev, size, ring->virt, ring->addr); dma_free_coherent(dev, size, ring->virt, addr);
dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
size); size);
return -EINVAL; /* Not a good error value, but distinct */ return -EINVAL; /* Not a good error value, but distinct */

View File

@@ -335,16 +335,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
} }
static DEVICE_ATTR_RO(state); static DEVICE_ATTR_RO(state);
static ssize_t available_slots_show(struct device *dev, static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
struct device_attribute *attr, char *buf)
{ {
struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); struct device *dev;
ssize_t rc; ssize_t rc;
u32 nfree; u32 nfree;
if (!ndd) if (!ndd)
return -ENXIO; return -ENXIO;
dev = ndd->dev;
nvdimm_bus_lock(dev); nvdimm_bus_lock(dev);
nfree = nd_label_nfree(ndd); nfree = nd_label_nfree(ndd);
if (nfree - 1 > nfree) { if (nfree - 1 > nfree) {
@@ -356,6 +356,18 @@ static ssize_t available_slots_show(struct device *dev,
nvdimm_bus_unlock(dev); nvdimm_bus_unlock(dev);
return rc; return rc;
} }
static ssize_t available_slots_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t rc;
nd_device_lock(dev);
rc = __available_slots_show(dev_get_drvdata(dev), buf);
nd_device_unlock(dev);
return rc;
}
static DEVICE_ATTR_RO(available_slots); static DEVICE_ATTR_RO(available_slots);
__weak ssize_t security_show(struct device *dev, __weak ssize_t security_show(struct device *dev,

View File

@@ -1635,11 +1635,11 @@ static umode_t namespace_visible(struct kobject *kobj,
return a->mode; return a->mode;
} }
if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr /* base is_namespace_io() attributes */
|| a == &dev_attr_holder.attr if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
|| a == &dev_attr_holder_class.attr a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
|| a == &dev_attr_force_raw.attr a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
|| a == &dev_attr_mode.attr) a == &dev_attr_resource.attr)
return a->mode; return a->mode;
return 0; return 0;

View File

@@ -3262,6 +3262,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
.driver_data = NVME_QUIRK_SINGLE_VECTOR }, .driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },

View File

@@ -305,7 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
length = cmd->pdu_len; length = cmd->pdu_len;
cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
offset = cmd->rbytes_done; offset = cmd->rbytes_done;
cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE; sg_offset = offset % PAGE_SIZE;
sg = &cmd->req.sg[cmd->sg_idx]; sg = &cmd->req.sg[cmd->sg_idx];
@@ -318,6 +318,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
length -= iov_len; length -= iov_len;
sg = sg_next(sg); sg = sg_next(sg);
iov++; iov++;
sg_offset = 0;
} }
iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,

View File

@@ -56,7 +56,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
* managed with the xHCI and the SuperSpeed hub so we create the * managed with the xHCI and the SuperSpeed hub so we create the
* link from xHCI instead. * link from xHCI instead.
*/ */
while (!dev_is_pci(dev)) while (dev && !dev_is_pci(dev))
dev = dev->parent; dev = dev->parent;
if (!dev) if (!dev)

View File

@@ -1329,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL; return -EINVAL;
alts = usblp->protocol[protocol].alt_setting; /* Don't unnecessarily set the interface if there's a single alt. */
if (alts < 0) if (usblp->intf->num_altsetting > 1) {
return -EINVAL; alts = usblp->protocol[protocol].alt_setting;
r = usb_set_interface(usblp->dev, usblp->ifnum, alts); if (alts < 0)
if (r < 0) { return -EINVAL;
printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
alts, usblp->ifnum); if (r < 0) {
return r; printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
alts, usblp->ifnum);
return r;
}
} }
usblp->bidir = (usblp->protocol[protocol].epread != NULL); usblp->bidir = (usblp->protocol[protocol].epread != NULL);

View File

@@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
u32 windex) u32 windex)
{ {
struct dwc2_hsotg_ep *ep;
int dir = (windex & USB_DIR_IN) ? 1 : 0; int dir = (windex & USB_DIR_IN) ? 1 : 0;
int idx = windex & 0x7F; int idx = windex & 0x7F;
@@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
if (idx > hsotg->num_of_eps) if (idx > hsotg->num_of_eps)
return NULL; return NULL;
ep = index_to_ep(hsotg, idx, dir); return index_to_ep(hsotg, idx, dir);
if (idx && ep->dir_in != dir)
return NULL;
return ep;
} }
/** /**

View File

@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc; struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget); usb_desc = usb_otg_descriptor_alloc(gadget);
if (!usb_desc) if (!usb_desc) {
status = -ENOMEM;
goto fail1; goto fail1;
}
usb_otg_descriptor_init(gadget, usb_desc); usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc; otg_desc[0] = usb_desc;
otg_desc[1] = NULL; otg_desc[1] = NULL;

View File

@@ -999,8 +999,10 @@ static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub,
str_array[offset].s = NULL; str_array[offset].s = NULL;
ret = ast_vhub_str_alloc_add(vhub, &lang_str); ret = ast_vhub_str_alloc_add(vhub, &lang_str);
if (ret) if (ret) {
of_node_put(child);
break; break;
}
} }
return ret; return ret;

View File

@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
} }
usbhs_pipe_clear_without_sequence(pipe, 0, 0); usbhs_pipe_clear_without_sequence(pipe, 0, 0);
usbhs_pipe_running(pipe, 0);
__usbhsf_pkt_del(pkt); __usbhsf_pkt_del(pkt);
} }

View File

@@ -64,6 +64,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
{ USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
@@ -204,6 +205,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
{ USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */

View File

@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
#define CINTERION_PRODUCT_CLS8 0x00b0 #define CINTERION_PRODUCT_CLS8 0x00b0
#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
/* Olivetti products */ /* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_VENDOR_ID 0x0b3c
@@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
.driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
.driver_info = RSVD(0)},
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) }, .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),

View File

@@ -15,6 +15,7 @@ struct mlx5_vdpa_direct_mr {
struct sg_table sg_head; struct sg_table sg_head;
int log_size; int log_size;
int nsg; int nsg;
int nent;
struct list_head list; struct list_head list;
u64 offset; u64 offset;
}; };

View File

@@ -25,17 +25,6 @@ static int get_octo_len(u64 len, int page_shift)
return (npages + 1) / 2; return (npages + 1) / 2;
} }
static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
{
struct scatterlist *sg;
__be64 *pas;
int i;
pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
(*pas) = cpu_to_be64(sg_dma_address(sg));
}
static void mlx5_set_access_mode(void *mkc, int mode) static void mlx5_set_access_mode(void *mkc, int mode)
{ {
MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
@@ -45,10 +34,18 @@ static void mlx5_set_access_mode(void *mkc, int mode)
static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
{ {
struct scatterlist *sg; struct scatterlist *sg;
int nsg = mr->nsg;
u64 dma_addr;
u64 dma_len;
int j = 0;
int i; int i;
for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i) for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
mtt[i] = cpu_to_be64(sg_dma_address(sg)); for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
nsg && dma_len;
nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
mtt[j++] = cpu_to_be64(dma_addr);
}
} }
static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
@@ -64,7 +61,6 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
return -ENOMEM; return -ENOMEM;
MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
fill_sg(mr, in);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO)); MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO)); MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
@@ -276,8 +272,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
done: done:
mr->log_size = log_entity_size; mr->log_size = log_entity_size;
mr->nsg = nsg; mr->nsg = nsg;
err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
if (!err) if (!mr->nent)
goto err_map; goto err_map;
err = create_direct_mr(mvdev, mr); err = create_direct_mr(mvdev, mr);

View File

@@ -77,6 +77,7 @@ struct mlx5_vq_restore_info {
u64 device_addr; u64 device_addr;
u64 driver_addr; u64 driver_addr;
u16 avail_index; u16 avail_index;
u16 used_index;
bool ready; bool ready;
struct vdpa_callback cb; struct vdpa_callback cb;
bool restore; bool restore;
@@ -111,6 +112,7 @@ struct mlx5_vdpa_virtqueue {
u32 virtq_id; u32 virtq_id;
struct mlx5_vdpa_net *ndev; struct mlx5_vdpa_net *ndev;
u16 avail_idx; u16 avail_idx;
u16 used_idx;
int fw_state; int fw_state;
/* keep last in the struct */ /* keep last in the struct */
@@ -789,6 +791,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context); obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3, MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
get_features_12_3(ndev->mvdev.actual_features)); get_features_12_3(ndev->mvdev.actual_features));
vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context); vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
@@ -1007,6 +1010,7 @@ static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
struct mlx5_virtq_attr { struct mlx5_virtq_attr {
u8 state; u8 state;
u16 available_index; u16 available_index;
u16 used_index;
}; };
static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
@@ -1037,6 +1041,7 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu
memset(attr, 0, sizeof(*attr)); memset(attr, 0, sizeof(*attr));
attr->state = MLX5_GET(virtio_net_q_object, obj_context, state); attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index); attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
kfree(out); kfree(out);
return 0; return 0;
@@ -1520,6 +1525,16 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
} }
} }
static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
{
int i;
for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
ndev->vqs[i].avail_idx = 0;
ndev->vqs[i].used_idx = 0;
}
}
/* TODO: cross-endian support */ /* TODO: cross-endian support */
static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
{ {
@@ -1595,6 +1610,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
return err; return err;
ri->avail_index = attr.available_index; ri->avail_index = attr.available_index;
ri->used_index = attr.used_index;
ri->ready = mvq->ready; ri->ready = mvq->ready;
ri->num_ent = mvq->num_ent; ri->num_ent = mvq->num_ent;
ri->desc_addr = mvq->desc_addr; ri->desc_addr = mvq->desc_addr;
@@ -1639,6 +1655,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
continue; continue;
mvq->avail_idx = ri->avail_index; mvq->avail_idx = ri->avail_index;
mvq->used_idx = ri->used_index;
mvq->ready = ri->ready; mvq->ready = ri->ready;
mvq->num_ent = ri->num_ent; mvq->num_ent = ri->num_ent;
mvq->desc_addr = ri->desc_addr; mvq->desc_addr = ri->desc_addr;
@@ -1753,6 +1770,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
if (!status) { if (!status) {
mlx5_vdpa_info(mvdev, "performing device reset\n"); mlx5_vdpa_info(mvdev, "performing device reset\n");
teardown_driver(ndev); teardown_driver(ndev);
clear_virtqueues(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev); mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0; ndev->mvdev.status = 0;
ndev->mvdev.mlx_features = 0; ndev->mvdev.mlx_features = 0;

View File

@@ -193,7 +193,7 @@ static int __init afs_init(void)
goto error_cache; goto error_cache;
#endif #endif
ret = register_pernet_subsys(&afs_net_ops); ret = register_pernet_device(&afs_net_ops);
if (ret < 0) if (ret < 0)
goto error_net; goto error_net;
@@ -213,7 +213,7 @@ static int __init afs_init(void)
error_proc: error_proc:
afs_fs_exit(); afs_fs_exit();
error_fs: error_fs:
unregister_pernet_subsys(&afs_net_ops); unregister_pernet_device(&afs_net_ops);
error_net: error_net:
#ifdef CONFIG_AFS_FSCACHE #ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs); fscache_unregister_netfs(&afs_cache_netfs);
@@ -244,7 +244,7 @@ static void __exit afs_exit(void)
proc_remove(afs_proc_symlink); proc_remove(afs_proc_symlink);
afs_fs_exit(); afs_fs_exit();
unregister_pernet_subsys(&afs_net_ops); unregister_pernet_device(&afs_net_ops);
#ifdef CONFIG_AFS_FSCACHE #ifdef CONFIG_AFS_FSCACHE
fscache_unregister_netfs(&afs_cache_netfs); fscache_unregister_netfs(&afs_cache_netfs);
#endif #endif

View File

@@ -736,6 +736,7 @@ static int
cifs_d_revalidate(struct dentry *direntry, unsigned int flags) cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
{ {
struct inode *inode; struct inode *inode;
int rc;
if (flags & LOOKUP_RCU) if (flags & LOOKUP_RCU)
return -ECHILD; return -ECHILD;
@@ -745,8 +746,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode))) if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
CIFS_I(inode)->time = 0; /* force reval */ CIFS_I(inode)->time = 0; /* force reval */
if (cifs_revalidate_dentry(direntry)) rc = cifs_revalidate_dentry(direntry);
return 0; if (rc) {
cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
switch (rc) {
case -ENOENT:
case -ESTALE:
/*
* Those errors mean the dentry is invalid
* (file was deleted or recreated)
*/
return 0;
default:
/*
* Otherwise some unexpected error happened
* report it as-is to VFS layer
*/
return rc;
}
}
else { else {
/* /*
* If the inode wasn't known to be a dfs entry when * If the inode wasn't known to be a dfs entry when

View File

@@ -286,7 +286,7 @@ struct smb2_negotiate_req {
__le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */ __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
__le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */ __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */
__le16 Reserved2; __le16 Reserved2;
__le16 Dialects[1]; /* One dialect (vers=) at a time for now */ __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
} __packed; } __packed;
/* Dialects */ /* Dialects */

View File

@@ -655,10 +655,22 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num,
spin_lock(&server->req_lock); spin_lock(&server->req_lock);
if (*credits < num) { if (*credits < num) {
/* /*
* Return immediately if not too many requests in flight since * If the server is tight on resources or just gives us less
* we will likely be stuck on waiting for credits. * credits for other reasons (e.g. requests are coming out of
* order and the server delays granting more credits until it
* processes a missing mid) and we exhausted most available
* credits there may be situations when we try to send
* a compound request but we don't have enough credits. At this
* point the client needs to decide if it should wait for
* additional credits or fail the request. If at least one
* request is in flight there is a high probability that the
* server will return enough credits to satisfy this compound
* request.
*
* Return immediately if no requests in flight since we will be
* stuck on waiting for credits.
*/ */
if (server->in_flight < num - *credits) { if (server->in_flight == 0) {
spin_unlock(&server->req_lock); spin_unlock(&server->req_lock);
return -ENOTSUPP; return -ENOTSUPP;
} }

View File

@@ -735,9 +735,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
mutex_unlock(&hugetlb_fault_mutex_table[hash]); mutex_unlock(&hugetlb_fault_mutex_table[hash]);
set_page_huge_active(page);
/* /*
* unlock_page because locked by add_to_page_cache() * unlock_page because locked by add_to_page_cache()
* page_put due to reference from alloc_huge_page() * put_page() due to reference from alloc_huge_page()
*/ */
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);

View File

@@ -8782,12 +8782,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
atomic_dec(&task->io_uring->in_idle); atomic_dec(&task->io_uring->in_idle);
/*
* If the files that are going away are the ones in the thread
* identity, clear them out.
*/
if (task->io_uring->identity->files == files)
task->io_uring->identity->files = NULL;
io_sq_thread_unpark(ctx->sq_data); io_sq_thread_unpark(ctx->sq_data);
} }
} }

View File

@@ -995,8 +995,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect)
buflen -= thislen; buflen -= thislen;
memcpy(&buf[buflen], name, thislen); memcpy(&buf[buflen], name, thislen);
tmp = dget_dlock(d->d_parent);
spin_unlock(&d->d_lock); spin_unlock(&d->d_lock);
tmp = dget_parent(d);
dput(d); dput(d);
d = tmp; d = tmp;

View File

@@ -433,8 +433,9 @@ static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
const struct cred *old_cred; const struct cred *old_cred;
int ret; int ret;
if (!ovl_should_sync(OVL_FS(file_inode(file)->i_sb))) ret = ovl_sync_status(OVL_FS(file_inode(file)->i_sb));
return 0; if (ret <= 0)
return ret;
ret = ovl_real_fdget_meta(file, &real, !datasync); ret = ovl_real_fdget_meta(file, &real, !datasync);
if (ret) if (ret)

View File

@@ -325,6 +325,7 @@ int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct dentry *dentry);
bool ovl_is_metacopy_dentry(struct dentry *dentry); bool ovl_is_metacopy_dentry(struct dentry *dentry);
char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry, char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
int padding); int padding);
int ovl_sync_status(struct ovl_fs *ofs);
static inline bool ovl_is_impuredir(struct super_block *sb, static inline bool ovl_is_impuredir(struct super_block *sb,
struct dentry *dentry) struct dentry *dentry)

View File

@@ -80,6 +80,8 @@ struct ovl_fs {
atomic_long_t last_ino; atomic_long_t last_ino;
/* Whiteout dentry cache */ /* Whiteout dentry cache */
struct dentry *whiteout; struct dentry *whiteout;
/* r/o snapshot of upperdir sb's only taken on volatile mounts */
errseq_t errseq;
}; };
static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs) static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)

View File

@@ -865,7 +865,7 @@ struct file *ovl_dir_real_file(const struct file *file, bool want_upper)
struct ovl_dir_file *od = file->private_data; struct ovl_dir_file *od = file->private_data;
struct dentry *dentry = file->f_path.dentry; struct dentry *dentry = file->f_path.dentry;
struct file *realfile = od->realfile; struct file *old, *realfile = od->realfile;
if (!OVL_TYPE_UPPER(ovl_path_type(dentry))) if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
return want_upper ? NULL : realfile; return want_upper ? NULL : realfile;
@@ -874,29 +874,20 @@ struct file *ovl_dir_real_file(const struct file *file, bool want_upper)
* Need to check if we started out being a lower dir, but got copied up * Need to check if we started out being a lower dir, but got copied up
*/ */
if (!od->is_upper) { if (!od->is_upper) {
struct inode *inode = file_inode(file);
realfile = READ_ONCE(od->upperfile); realfile = READ_ONCE(od->upperfile);
if (!realfile) { if (!realfile) {
struct path upperpath; struct path upperpath;
ovl_path_upper(dentry, &upperpath); ovl_path_upper(dentry, &upperpath);
realfile = ovl_dir_open_realfile(file, &upperpath); realfile = ovl_dir_open_realfile(file, &upperpath);
if (IS_ERR(realfile))
return realfile;
inode_lock(inode); old = cmpxchg_release(&od->upperfile, NULL, realfile);
if (!od->upperfile) { if (old) {
if (IS_ERR(realfile)) { fput(realfile);
inode_unlock(inode); realfile = old;
return realfile;
}
smp_store_release(&od->upperfile, realfile);
} else {
/* somebody has beaten us to it */
if (!IS_ERR(realfile))
fput(realfile);
realfile = od->upperfile;
} }
inode_unlock(inode);
} }
} }
@@ -909,8 +900,9 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
struct file *realfile; struct file *realfile;
int err; int err;
if (!ovl_should_sync(OVL_FS(file->f_path.dentry->d_sb))) err = ovl_sync_status(OVL_FS(file->f_path.dentry->d_sb));
return 0; if (err <= 0)
return err;
realfile = ovl_dir_real_file(file, true); realfile = ovl_dir_real_file(file, true);
err = PTR_ERR_OR_ZERO(realfile); err = PTR_ERR_OR_ZERO(realfile);

View File

@@ -266,11 +266,20 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
struct super_block *upper_sb; struct super_block *upper_sb;
int ret; int ret;
if (!ovl_upper_mnt(ofs)) ret = ovl_sync_status(ofs);
return 0; /*
* We have to always set the err, because the return value isn't
* checked in syncfs, and instead indirectly return an error via
* the sb's writeback errseq, which VFS inspects after this call.
*/
if (ret < 0) {
errseq_set(&sb->s_wb_err, -EIO);
return -EIO;
}
if (!ret)
return ret;
if (!ovl_should_sync(ofs))
return 0;
/* /*
* Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC). * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
* All the super blocks will be iterated, including upper_sb. * All the super blocks will be iterated, including upper_sb.
@@ -1950,6 +1959,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &ovl_super_operations; sb->s_op = &ovl_super_operations;
if (ofs->config.upperdir) { if (ofs->config.upperdir) {
struct super_block *upper_sb;
if (!ofs->config.workdir) { if (!ofs->config.workdir) {
pr_err("missing 'workdir'\n"); pr_err("missing 'workdir'\n");
goto out_err; goto out_err;
@@ -1959,6 +1970,16 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (err) if (err)
goto out_err; goto out_err;
upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
if (!ovl_should_sync(ofs)) {
ofs->errseq = errseq_sample(&upper_sb->s_wb_err);
if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) {
err = -EIO;
pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n");
goto out_err;
}
}
err = ovl_get_workdir(sb, ofs, &upperpath); err = ovl_get_workdir(sb, ofs, &upperpath);
if (err) if (err)
goto out_err; goto out_err;
@@ -1966,9 +1987,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (!ofs->workdir) if (!ofs->workdir)
sb->s_flags |= SB_RDONLY; sb->s_flags |= SB_RDONLY;
sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth; sb->s_stack_depth = upper_sb->s_stack_depth;
sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran; sb->s_time_gran = upper_sb->s_time_gran;
} }
oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers); oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers);
err = PTR_ERR(oe); err = PTR_ERR(oe);

View File

@@ -958,3 +958,30 @@ err_free:
kfree(buf); kfree(buf);
return ERR_PTR(res); return ERR_PTR(res);
} }
/*
* ovl_sync_status() - Check fs sync status for volatile mounts
*
* Returns 1 if this is not a volatile mount and a real sync is required.
*
* Returns 0 if syncing can be skipped because mount is volatile, and no errors
* have occurred on the upperdir since the mount.
*
* Returns -errno if it is a volatile mount, and the error that occurred since
* the last mount. If the error code changes, it'll return the latest error
* code.
*/
int ovl_sync_status(struct ovl_fs *ofs)
{
struct vfsmount *mnt;
if (ovl_should_sync(ofs))
return 1;
mnt = ovl_upper_mnt(ofs);
if (!mnt)
return 0;
return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq);
}

View File

@@ -783,6 +783,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);

View File

@@ -770,6 +770,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
} }
#endif #endif
void set_page_huge_active(struct page *page);
#else /* CONFIG_HUGETLB_PAGE */ #else /* CONFIG_HUGETLB_PAGE */
struct hstate {}; struct hstate {};

View File

@@ -632,7 +632,10 @@ static inline void dev_iommu_fwspec_set(struct device *dev,
static inline void *dev_iommu_priv_get(struct device *dev) static inline void *dev_iommu_priv_get(struct device *dev)
{ {
return dev->iommu->priv; if (dev->iommu)
return dev->iommu->priv;
else
return NULL;
} }
static inline void dev_iommu_priv_set(struct device *dev, void *priv) static inline void dev_iommu_priv_set(struct device *dev, void *priv)

View File

@@ -922,7 +922,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
__irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL)
#define irq_alloc_desc(node) \ #define irq_alloc_desc(node) \
irq_alloc_descs(-1, 0, 1, node) irq_alloc_descs(-1, 1, 1, node)
#define irq_alloc_desc_at(at, node) \ #define irq_alloc_desc_at(at, node) \
irq_alloc_descs(at, at, 1, node) irq_alloc_descs(at, at, 1, node)
@@ -937,7 +937,7 @@ int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
__devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL) __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL)
#define devm_irq_alloc_desc(dev, node) \ #define devm_irq_alloc_desc(dev, node) \
devm_irq_alloc_descs(dev, -1, 0, 1, node) devm_irq_alloc_descs(dev, -1, 1, 1, node)
#define devm_irq_alloc_desc_at(dev, at, node) \ #define devm_irq_alloc_desc_at(dev, at, node) \
devm_irq_alloc_descs(dev, at, at, 1, node) devm_irq_alloc_descs(dev, at, at, 1, node)

View File

@@ -251,7 +251,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr); extern bool arch_within_kprobe_blacklist(unsigned long addr);
extern int arch_populate_kprobe_blacklist(void); extern int arch_populate_kprobe_blacklist(void);
extern bool arch_kprobe_on_func_entry(unsigned long offset); extern bool arch_kprobe_on_func_entry(unsigned long offset);
extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr); extern bool within_kprobe_blacklist(unsigned long addr);
extern int kprobe_add_ksym_blacklist(unsigned long entry); extern int kprobe_add_ksym_blacklist(unsigned long entry);

View File

@@ -139,6 +139,12 @@ struct msi_desc {
list_for_each_entry((desc), dev_to_msi_list((dev)), list) list_for_each_entry((desc), dev_to_msi_list((dev)), list)
#define for_each_msi_entry_safe(desc, tmp, dev) \ #define for_each_msi_entry_safe(desc, tmp, dev) \
list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
#define for_each_msi_vector(desc, __irq, dev) \
for_each_msi_entry((desc), (dev)) \
if ((desc)->irq) \
for (__irq = (desc)->irq; \
__irq < ((desc)->irq + (desc)->nvec_used); \
__irq++)
#ifdef CONFIG_IRQ_MSI_IOMMU #ifdef CONFIG_IRQ_MSI_IOMMU
static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)

View File

@@ -307,11 +307,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
\ \
it_func_ptr = \ it_func_ptr = \
rcu_dereference_raw((&__tracepoint_##_name)->funcs); \ rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
do { \ if (it_func_ptr) { \
it_func = (it_func_ptr)->func; \ do { \
__data = (it_func_ptr)->data; \ it_func = (it_func_ptr)->func; \
((void(*)(void *, proto))(it_func))(__data, args); \ __data = (it_func_ptr)->data; \
} while ((++it_func_ptr)->func); \ ((void(*)(void *, proto))(it_func))(__data, args); \
} while ((++it_func_ptr)->func); \
} \
return 0; \ return 0; \
} \ } \
DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);

View File

@@ -24,7 +24,8 @@ struct notifier_block; /* in notifier.h */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */ #define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
#define VM_MAP_PUT_PAGES 0x00000100 /* put pages and free array in vfree */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
/* /*
* VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC. * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
@@ -37,12 +38,6 @@ struct notifier_block; /* in notifier.h */
* determine which allocations need the module shadow freed. * determine which allocations need the module shadow freed.
*/ */
/*
* Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
* vfree_atomic().
*/
#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
/* bits [20..32] reserved for arch specific ioremap internals */ /* bits [20..32] reserved for arch specific ioremap internals */
/* /*

Some files were not shown because too many files have changed in this diff Show More