Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
@@ -16,11 +16,13 @@ Required Properties:
|
|||||||
|
|
||||||
- clocks:
|
- clocks:
|
||||||
Array of clocks required for SDHC.
|
Array of clocks required for SDHC.
|
||||||
Require at least input clock for Xenon IP core.
|
Require at least input clock for Xenon IP core. For Armada AP806 and
|
||||||
|
CP110, the AXI clock is also mandatory.
|
||||||
|
|
||||||
- clock-names:
|
- clock-names:
|
||||||
Array of names corresponding to clocks property.
|
Array of names corresponding to clocks property.
|
||||||
The input clock for Xenon IP core should be named as "core".
|
The input clock for Xenon IP core should be named as "core".
|
||||||
|
The input clock for the AXI bus must be named as "axi".
|
||||||
|
|
||||||
- reg:
|
- reg:
|
||||||
* For "marvell,armada-3700-sdhci", two register areas.
|
* For "marvell,armada-3700-sdhci", two register areas.
|
||||||
@@ -106,8 +108,8 @@ Example:
|
|||||||
compatible = "marvell,armada-ap806-sdhci";
|
compatible = "marvell,armada-ap806-sdhci";
|
||||||
reg = <0xaa0000 0x1000>;
|
reg = <0xaa0000 0x1000>;
|
||||||
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>
|
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>
|
||||||
clocks = <&emmc_clk>;
|
clocks = <&emmc_clk>,<&axi_clk>;
|
||||||
clock-names = "core";
|
clock-names = "core", "axi";
|
||||||
bus-width = <4>;
|
bus-width = <4>;
|
||||||
marvell,xenon-phy-slow-mode;
|
marvell,xenon-phy-slow-mode;
|
||||||
marvell,xenon-tun-count = <11>;
|
marvell,xenon-tun-count = <11>;
|
||||||
@@ -126,8 +128,8 @@ Example:
|
|||||||
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>
|
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>
|
||||||
vqmmc-supply = <&sd_vqmmc_regulator>;
|
vqmmc-supply = <&sd_vqmmc_regulator>;
|
||||||
vmmc-supply = <&sd_vmmc_regulator>;
|
vmmc-supply = <&sd_vmmc_regulator>;
|
||||||
clocks = <&sdclk>;
|
clocks = <&sdclk>, <&axi_clk>;
|
||||||
clock-names = "core";
|
clock-names = "core", "axi";
|
||||||
bus-width = <4>;
|
bus-width = <4>;
|
||||||
marvell,xenon-tun-count = <9>;
|
marvell,xenon-tun-count = <9>;
|
||||||
};
|
};
|
||||||
|
@@ -210,8 +210,11 @@ path as another overlay mount and it may use a lower layer path that is
|
|||||||
beneath or above the path of another overlay lower layer path.
|
beneath or above the path of another overlay lower layer path.
|
||||||
|
|
||||||
Using an upper layer path and/or a workdir path that are already used by
|
Using an upper layer path and/or a workdir path that are already used by
|
||||||
another overlay mount is not allowed and will fail with EBUSY. Using
|
another overlay mount is not allowed and may fail with EBUSY. Using
|
||||||
partially overlapping paths is not allowed but will not fail with EBUSY.
|
partially overlapping paths is not allowed but will not fail with EBUSY.
|
||||||
|
If files are accessed from two overlayfs mounts which share or overlap the
|
||||||
|
upper layer and/or workdir path the behavior of the overlay is undefined,
|
||||||
|
though it will not result in a crash or deadlock.
|
||||||
|
|
||||||
Mounting an overlay using an upper layer path, where the upper layer path
|
Mounting an overlay using an upper layer path, where the upper layer path
|
||||||
was previously used by another mounted overlay in combination with a
|
was previously used by another mounted overlay in combination with a
|
||||||
|
@@ -36,6 +36,7 @@ Supported adapters:
|
|||||||
* Intel Gemini Lake (SOC)
|
* Intel Gemini Lake (SOC)
|
||||||
* Intel Cannon Lake-H (PCH)
|
* Intel Cannon Lake-H (PCH)
|
||||||
* Intel Cannon Lake-LP (PCH)
|
* Intel Cannon Lake-LP (PCH)
|
||||||
|
* Intel Cedar Fork (PCH)
|
||||||
Datasheets: Publicly available at the Intel website
|
Datasheets: Publicly available at the Intel website
|
||||||
|
|
||||||
On Intel Patsburg and later chipsets, both the normal host SMBus controller
|
On Intel Patsburg and later chipsets, both the normal host SMBus controller
|
||||||
|
@@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this
|
|||||||
and packet type ID), so in a "gatewayed" configuration, all
|
and packet type ID), so in a "gatewayed" configuration, all
|
||||||
outgoing traffic will generally use the same device. Incoming
|
outgoing traffic will generally use the same device. Incoming
|
||||||
traffic may also end up on a single device, but that is
|
traffic may also end up on a single device, but that is
|
||||||
dependent upon the balancing policy of the peer's 8023.ad
|
dependent upon the balancing policy of the peer's 802.3ad
|
||||||
implementation. In a "local" configuration, traffic will be
|
implementation. In a "local" configuration, traffic will be
|
||||||
distributed across the devices in the bond.
|
distributed across the devices in the bond.
|
||||||
|
|
||||||
|
@@ -5259,7 +5259,8 @@ S: Maintained
|
|||||||
F: drivers/iommu/exynos-iommu.c
|
F: drivers/iommu/exynos-iommu.c
|
||||||
|
|
||||||
EZchip NPS platform support
|
EZchip NPS platform support
|
||||||
M: Noam Camus <noamc@ezchip.com>
|
M: Elad Kanfi <eladkan@mellanox.com>
|
||||||
|
M: Vineet Gupta <vgupta@synopsys.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: arch/arc/plat-eznps
|
F: arch/arc/plat-eznps
|
||||||
F: arch/arc/boot/dts/eznps.dts
|
F: arch/arc/boot/dts/eznps.dts
|
||||||
@@ -9360,7 +9361,7 @@ NETWORK BLOCK DEVICE (NBD)
|
|||||||
M: Josef Bacik <jbacik@fb.com>
|
M: Josef Bacik <jbacik@fb.com>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
L: linux-block@vger.kernel.org
|
L: linux-block@vger.kernel.org
|
||||||
L: nbd-general@lists.sourceforge.net
|
L: nbd@other.debian.org
|
||||||
F: Documentation/blockdev/nbd.txt
|
F: Documentation/blockdev/nbd.txt
|
||||||
F: drivers/block/nbd.c
|
F: drivers/block/nbd.c
|
||||||
F: include/uapi/linux/nbd.h
|
F: include/uapi/linux/nbd.h
|
||||||
|
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 14
|
PATCHLEVEL = 14
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc3
|
EXTRAVERSION = -rc4
|
||||||
NAME = Fearless Coyote
|
NAME = Fearless Coyote
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@@ -937,9 +937,6 @@ config STRICT_MODULE_RWX
|
|||||||
and non-text memory will be made non-executable. This provides
|
and non-text memory will be made non-executable. This provides
|
||||||
protection against certain security exploits (e.g. writing to text)
|
protection against certain security exploits (e.g. writing to text)
|
||||||
|
|
||||||
config ARCH_WANT_RELAX_ORDER
|
|
||||||
bool
|
|
||||||
|
|
||||||
config ARCH_HAS_REFCOUNT
|
config ARCH_HAS_REFCOUNT
|
||||||
bool
|
bool
|
||||||
help
|
help
|
||||||
|
@@ -24,7 +24,7 @@ config ARC
|
|||||||
select GENERIC_SMP_IDLE_THREAD
|
select GENERIC_SMP_IDLE_THREAD
|
||||||
select HAVE_ARCH_KGDB
|
select HAVE_ARCH_KGDB
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_FUTEX_CMPXCHG
|
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||||
select HAVE_IOREMAP_PROT
|
select HAVE_IOREMAP_PROT
|
||||||
select HAVE_KPROBES
|
select HAVE_KPROBES
|
||||||
select HAVE_KRETPROBES
|
select HAVE_KRETPROBES
|
||||||
|
@@ -6,8 +6,6 @@
|
|||||||
# published by the Free Software Foundation.
|
# published by the Free Software Foundation.
|
||||||
#
|
#
|
||||||
|
|
||||||
UTS_MACHINE := arc
|
|
||||||
|
|
||||||
ifeq ($(CROSS_COMPILE),)
|
ifeq ($(CROSS_COMPILE),)
|
||||||
ifndef CONFIG_CPU_BIG_ENDIAN
|
ifndef CONFIG_CPU_BIG_ENDIAN
|
||||||
CROSS_COMPILE := arc-linux-
|
CROSS_COMPILE := arc-linux-
|
||||||
|
@@ -44,7 +44,14 @@
|
|||||||
|
|
||||||
mmcclk: mmcclk {
|
mmcclk: mmcclk {
|
||||||
compatible = "fixed-clock";
|
compatible = "fixed-clock";
|
||||||
clock-frequency = <50000000>;
|
/*
|
||||||
|
* DW sdio controller has external ciu clock divider
|
||||||
|
* controlled via register in SDIO IP. It divides
|
||||||
|
* sdio_ref_clk (which comes from CGU) by 16 for
|
||||||
|
* default. So default mmcclk clock (which comes
|
||||||
|
* to sdk_in) is 25000000 Hz.
|
||||||
|
*/
|
||||||
|
clock-frequency = <25000000>;
|
||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -12,6 +12,7 @@
|
|||||||
/dts-v1/;
|
/dts-v1/;
|
||||||
|
|
||||||
#include <dt-bindings/net/ti-dp83867.h>
|
#include <dt-bindings/net/ti-dp83867.h>
|
||||||
|
#include <dt-bindings/reset/snps,hsdk-reset.h>
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "snps,hsdk";
|
model = "snps,hsdk";
|
||||||
@@ -57,10 +58,10 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
core_clk: core-clk {
|
input_clk: input-clk {
|
||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
compatible = "fixed-clock";
|
compatible = "fixed-clock";
|
||||||
clock-frequency = <500000000>;
|
clock-frequency = <33333333>;
|
||||||
};
|
};
|
||||||
|
|
||||||
cpu_intc: cpu-interrupt-controller {
|
cpu_intc: cpu-interrupt-controller {
|
||||||
@@ -102,6 +103,19 @@
|
|||||||
|
|
||||||
ranges = <0x00000000 0xf0000000 0x10000000>;
|
ranges = <0x00000000 0xf0000000 0x10000000>;
|
||||||
|
|
||||||
|
cgu_rst: reset-controller@8a0 {
|
||||||
|
compatible = "snps,hsdk-reset";
|
||||||
|
#reset-cells = <1>;
|
||||||
|
reg = <0x8A0 0x4>, <0xFF0 0x4>;
|
||||||
|
};
|
||||||
|
|
||||||
|
core_clk: core-clk@0 {
|
||||||
|
compatible = "snps,hsdk-core-pll-clock";
|
||||||
|
reg = <0x00 0x10>, <0x14B8 0x4>;
|
||||||
|
#clock-cells = <0>;
|
||||||
|
clocks = <&input_clk>;
|
||||||
|
};
|
||||||
|
|
||||||
serial: serial@5000 {
|
serial: serial@5000 {
|
||||||
compatible = "snps,dw-apb-uart";
|
compatible = "snps,dw-apb-uart";
|
||||||
reg = <0x5000 0x100>;
|
reg = <0x5000 0x100>;
|
||||||
@@ -120,7 +134,17 @@
|
|||||||
|
|
||||||
mmcclk_ciu: mmcclk-ciu {
|
mmcclk_ciu: mmcclk-ciu {
|
||||||
compatible = "fixed-clock";
|
compatible = "fixed-clock";
|
||||||
clock-frequency = <100000000>;
|
/*
|
||||||
|
* DW sdio controller has external ciu clock divider
|
||||||
|
* controlled via register in SDIO IP. Due to its
|
||||||
|
* unexpected default value (it should devide by 1
|
||||||
|
* but it devides by 8) SDIO IP uses wrong clock and
|
||||||
|
* works unstable (see STAR 9001204800)
|
||||||
|
* So add temporary fix and change clock frequency
|
||||||
|
* from 100000000 to 12500000 Hz until we fix dw sdio
|
||||||
|
* driver itself.
|
||||||
|
*/
|
||||||
|
clock-frequency = <12500000>;
|
||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -141,6 +165,8 @@
|
|||||||
clocks = <&gmacclk>;
|
clocks = <&gmacclk>;
|
||||||
clock-names = "stmmaceth";
|
clock-names = "stmmaceth";
|
||||||
phy-handle = <&phy0>;
|
phy-handle = <&phy0>;
|
||||||
|
resets = <&cgu_rst HSDK_ETH_RESET>;
|
||||||
|
reset-names = "stmmaceth";
|
||||||
|
|
||||||
mdio {
|
mdio {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
|
@@ -105,7 +105,7 @@ CONFIG_NLS_ISO8859_1=y
|
|||||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||||
CONFIG_STRIP_ASM_SYMS=y
|
CONFIG_STRIP_ASM_SYMS=y
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
||||||
# CONFIG_SCHED_DEBUG is not set
|
# CONFIG_SCHED_DEBUG is not set
|
||||||
# CONFIG_DEBUG_PREEMPT is not set
|
# CONFIG_DEBUG_PREEMPT is not set
|
||||||
|
@@ -104,7 +104,7 @@ CONFIG_NLS_ISO8859_1=y
|
|||||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||||
CONFIG_STRIP_ASM_SYMS=y
|
CONFIG_STRIP_ASM_SYMS=y
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
||||||
# CONFIG_SCHED_DEBUG is not set
|
# CONFIG_SCHED_DEBUG is not set
|
||||||
# CONFIG_DEBUG_PREEMPT is not set
|
# CONFIG_DEBUG_PREEMPT is not set
|
||||||
|
@@ -107,7 +107,7 @@ CONFIG_NLS_ISO8859_1=y
|
|||||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||||
CONFIG_STRIP_ASM_SYMS=y
|
CONFIG_STRIP_ASM_SYMS=y
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
||||||
# CONFIG_SCHED_DEBUG is not set
|
# CONFIG_SCHED_DEBUG is not set
|
||||||
# CONFIG_DEBUG_PREEMPT is not set
|
# CONFIG_DEBUG_PREEMPT is not set
|
||||||
|
@@ -84,5 +84,5 @@ CONFIG_TMPFS=y
|
|||||||
CONFIG_NFS_FS=y
|
CONFIG_NFS_FS=y
|
||||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
# CONFIG_DEBUG_PREEMPT is not set
|
# CONFIG_DEBUG_PREEMPT is not set
|
||||||
|
@@ -63,6 +63,7 @@ CONFIG_MMC_SDHCI=y
|
|||||||
CONFIG_MMC_SDHCI_PLTFM=y
|
CONFIG_MMC_SDHCI_PLTFM=y
|
||||||
CONFIG_MMC_DW=y
|
CONFIG_MMC_DW=y
|
||||||
# CONFIG_IOMMU_SUPPORT is not set
|
# CONFIG_IOMMU_SUPPORT is not set
|
||||||
|
CONFIG_RESET_HSDK=y
|
||||||
CONFIG_EXT3_FS=y
|
CONFIG_EXT3_FS=y
|
||||||
CONFIG_VFAT_FS=y
|
CONFIG_VFAT_FS=y
|
||||||
CONFIG_TMPFS=y
|
CONFIG_TMPFS=y
|
||||||
@@ -72,7 +73,7 @@ CONFIG_NLS_ISO8859_1=y
|
|||||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||||
CONFIG_STRIP_ASM_SYMS=y
|
CONFIG_STRIP_ASM_SYMS=y
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
||||||
# CONFIG_SCHED_DEBUG is not set
|
# CONFIG_SCHED_DEBUG is not set
|
||||||
# CONFIG_DEBUG_PREEMPT is not set
|
# CONFIG_DEBUG_PREEMPT is not set
|
||||||
|
@@ -94,7 +94,7 @@ CONFIG_NLS_ISO8859_1=y
|
|||||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||||
CONFIG_STRIP_ASM_SYMS=y
|
CONFIG_STRIP_ASM_SYMS=y
|
||||||
CONFIG_DEBUG_SHIRQ=y
|
CONFIG_DEBUG_SHIRQ=y
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
||||||
# CONFIG_SCHED_DEBUG is not set
|
# CONFIG_SCHED_DEBUG is not set
|
||||||
# CONFIG_DEBUG_PREEMPT is not set
|
# CONFIG_DEBUG_PREEMPT is not set
|
||||||
|
@@ -98,7 +98,7 @@ CONFIG_NLS_ISO8859_1=y
|
|||||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||||
CONFIG_STRIP_ASM_SYMS=y
|
CONFIG_STRIP_ASM_SYMS=y
|
||||||
CONFIG_DEBUG_SHIRQ=y
|
CONFIG_DEBUG_SHIRQ=y
|
||||||
CONFIG_LOCKUP_DETECTOR=y
|
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||||
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10
|
||||||
# CONFIG_SCHED_DEBUG is not set
|
# CONFIG_SCHED_DEBUG is not set
|
||||||
# CONFIG_DEBUG_PREEMPT is not set
|
# CONFIG_DEBUG_PREEMPT is not set
|
||||||
|
@@ -98,6 +98,7 @@
|
|||||||
|
|
||||||
/* Auxiliary registers */
|
/* Auxiliary registers */
|
||||||
#define AUX_IDENTITY 4
|
#define AUX_IDENTITY 4
|
||||||
|
#define AUX_EXEC_CTRL 8
|
||||||
#define AUX_INTR_VEC_BASE 0x25
|
#define AUX_INTR_VEC_BASE 0x25
|
||||||
#define AUX_VOL 0x5e
|
#define AUX_VOL 0x5e
|
||||||
|
|
||||||
@@ -135,12 +136,12 @@ struct bcr_identity {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bcr_isa {
|
struct bcr_isa_arcv2 {
|
||||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||||
unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
|
unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
|
||||||
pad1:11, atomic1:1, ver:8;
|
pad1:12, ver:8;
|
||||||
#else
|
#else
|
||||||
unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1,
|
unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1,
|
||||||
ldd:1, pad2:4, div_rem:4;
|
ldd:1, pad2:4, div_rem:4;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
@@ -263,13 +264,13 @@ struct cpuinfo_arc {
|
|||||||
struct cpuinfo_arc_mmu mmu;
|
struct cpuinfo_arc_mmu mmu;
|
||||||
struct cpuinfo_arc_bpu bpu;
|
struct cpuinfo_arc_bpu bpu;
|
||||||
struct bcr_identity core;
|
struct bcr_identity core;
|
||||||
struct bcr_isa isa;
|
struct bcr_isa_arcv2 isa;
|
||||||
const char *details, *name;
|
const char *details, *name;
|
||||||
unsigned int vec_base;
|
unsigned int vec_base;
|
||||||
struct cpuinfo_arc_ccm iccm, dccm;
|
struct cpuinfo_arc_ccm iccm, dccm;
|
||||||
struct {
|
struct {
|
||||||
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
|
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
|
||||||
fpu_sp:1, fpu_dp:1, pad2:6,
|
fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4,
|
||||||
debug:1, ap:1, smart:1, rtt:1, pad3:4,
|
debug:1, ap:1, smart:1, rtt:1, pad3:4,
|
||||||
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
|
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
|
||||||
} extn;
|
} extn;
|
||||||
|
@@ -51,6 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = {
|
|||||||
{ 0x51, "R2.0" },
|
{ 0x51, "R2.0" },
|
||||||
{ 0x52, "R2.1" },
|
{ 0x52, "R2.1" },
|
||||||
{ 0x53, "R3.0" },
|
{ 0x53, "R3.0" },
|
||||||
|
{ 0x54, "R4.0" },
|
||||||
#endif
|
#endif
|
||||||
{ 0x00, NULL }
|
{ 0x00, NULL }
|
||||||
};
|
};
|
||||||
@@ -62,6 +63,7 @@ static const struct id_to_str arc_cpu_nm[] = {
|
|||||||
#else
|
#else
|
||||||
{ 0x40, "ARC EM" },
|
{ 0x40, "ARC EM" },
|
||||||
{ 0x50, "ARC HS38" },
|
{ 0x50, "ARC HS38" },
|
||||||
|
{ 0x54, "ARC HS48" },
|
||||||
#endif
|
#endif
|
||||||
{ 0x00, "Unknown" }
|
{ 0x00, "Unknown" }
|
||||||
};
|
};
|
||||||
@@ -119,11 +121,11 @@ static void read_arc_build_cfg_regs(void)
|
|||||||
struct bcr_generic bcr;
|
struct bcr_generic bcr;
|
||||||
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
||||||
const struct id_to_str *tbl;
|
const struct id_to_str *tbl;
|
||||||
|
struct bcr_isa_arcv2 isa;
|
||||||
|
|
||||||
FIX_PTR(cpu);
|
FIX_PTR(cpu);
|
||||||
|
|
||||||
READ_BCR(AUX_IDENTITY, cpu->core);
|
READ_BCR(AUX_IDENTITY, cpu->core);
|
||||||
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
|
|
||||||
|
|
||||||
for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
|
for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) {
|
||||||
if (cpu->core.family == tbl->id) {
|
if (cpu->core.family == tbl->id) {
|
||||||
@@ -133,7 +135,7 @@ static void read_arc_build_cfg_regs(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
|
for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) {
|
||||||
if ((cpu->core.family & 0xF0) == tbl->id)
|
if ((cpu->core.family & 0xF4) == tbl->id)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
cpu->name = tbl->str;
|
cpu->name = tbl->str;
|
||||||
@@ -192,6 +194,14 @@ static void read_arc_build_cfg_regs(void)
|
|||||||
cpu->bpu.full = bpu.ft;
|
cpu->bpu.full = bpu.ft;
|
||||||
cpu->bpu.num_cache = 256 << bpu.bce;
|
cpu->bpu.num_cache = 256 << bpu.bce;
|
||||||
cpu->bpu.num_pred = 2048 << bpu.pte;
|
cpu->bpu.num_pred = 2048 << bpu.pte;
|
||||||
|
|
||||||
|
if (cpu->core.family >= 0x54) {
|
||||||
|
unsigned int exec_ctrl;
|
||||||
|
|
||||||
|
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
|
||||||
|
cpu->extn.dual_iss_exist = 1;
|
||||||
|
cpu->extn.dual_iss_enb = exec_ctrl & 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
READ_BCR(ARC_REG_AP_BCR, bcr);
|
READ_BCR(ARC_REG_AP_BCR, bcr);
|
||||||
@@ -205,18 +215,25 @@ static void read_arc_build_cfg_regs(void)
|
|||||||
|
|
||||||
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
|
cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt;
|
||||||
|
|
||||||
|
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
|
||||||
|
|
||||||
/* some hacks for lack of feature BCR info in old ARC700 cores */
|
/* some hacks for lack of feature BCR info in old ARC700 cores */
|
||||||
if (is_isa_arcompact()) {
|
if (is_isa_arcompact()) {
|
||||||
if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
|
if (!isa.ver) /* ISA BCR absent, use Kconfig info */
|
||||||
cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
|
cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
|
||||||
else
|
else {
|
||||||
cpu->isa.atomic = cpu->isa.atomic1;
|
/* ARC700_BUILD only has 2 bits of isa info */
|
||||||
|
struct bcr_generic bcr = *(struct bcr_generic *)&isa;
|
||||||
|
cpu->isa.atomic = bcr.info & 1;
|
||||||
|
}
|
||||||
|
|
||||||
cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
|
cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
|
||||||
|
|
||||||
/* there's no direct way to distinguish 750 vs. 770 */
|
/* there's no direct way to distinguish 750 vs. 770 */
|
||||||
if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
|
if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
|
||||||
cpu->name = "ARC750";
|
cpu->name = "ARC750";
|
||||||
|
} else {
|
||||||
|
cpu->isa = isa;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -232,10 +249,11 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
|
|||||||
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
|
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
|
||||||
core->family, core->cpu_id, core->chip_id);
|
core->family, core->cpu_id, core->chip_id);
|
||||||
|
|
||||||
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n",
|
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
|
||||||
cpu_id, cpu->name, cpu->details,
|
cpu_id, cpu->name, cpu->details,
|
||||||
is_isa_arcompact() ? "ARCompact" : "ARCv2",
|
is_isa_arcompact() ? "ARCompact" : "ARCv2",
|
||||||
IS_AVAIL1(cpu->isa.be, "[Big-Endian]"));
|
IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
|
||||||
|
IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue"));
|
||||||
|
|
||||||
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
|
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
|
||||||
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
|
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
|
||||||
|
@@ -111,6 +111,13 @@ static void __init axs10x_early_init(void)
|
|||||||
|
|
||||||
axs10x_enable_gpio_intc_wire();
|
axs10x_enable_gpio_intc_wire();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reset ethernet IP core.
|
||||||
|
* TODO: get rid of this quirk after axs10x reset driver (or simple
|
||||||
|
* reset driver) will be available in upstream.
|
||||||
|
*/
|
||||||
|
iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET);
|
||||||
|
|
||||||
scnprintf(mb, 32, "MainBoard v%d", mb_rev);
|
scnprintf(mb, 32, "MainBoard v%d", mb_rev);
|
||||||
axs10x_print_board_ver(CREG_MB_VER, mb);
|
axs10x_print_board_ver(CREG_MB_VER, mb);
|
||||||
}
|
}
|
||||||
|
@@ -7,3 +7,4 @@
|
|||||||
|
|
||||||
menuconfig ARC_SOC_HSDK
|
menuconfig ARC_SOC_HSDK
|
||||||
bool "ARC HS Development Kit SOC"
|
bool "ARC HS Development Kit SOC"
|
||||||
|
select CLK_HSDK
|
||||||
|
@@ -38,6 +38,42 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
|
|||||||
#define CREG_PAE (CREG_BASE + 0x180)
|
#define CREG_PAE (CREG_BASE + 0x180)
|
||||||
#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
|
#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
|
||||||
|
|
||||||
|
#define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8)
|
||||||
|
#define CREG_CORE_IF_CLK_DIV_2 0x1
|
||||||
|
#define CGU_BASE ARC_PERIPHERAL_BASE
|
||||||
|
#define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4)
|
||||||
|
#define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0)
|
||||||
|
#define CGU_PLL_STATUS_LOCK BIT(0)
|
||||||
|
#define CGU_PLL_STATUS_ERR BIT(1)
|
||||||
|
#define CGU_PLL_CTRL_1GHZ 0x3A10
|
||||||
|
#define HSDK_PLL_LOCK_TIMEOUT 500
|
||||||
|
|
||||||
|
#define HSDK_PLL_LOCKED() \
|
||||||
|
!!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
|
||||||
|
|
||||||
|
#define HSDK_PLL_ERR() \
|
||||||
|
!!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
|
||||||
|
|
||||||
|
static void __init hsdk_set_cpu_freq_1ghz(void)
|
||||||
|
{
|
||||||
|
u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* As we set cpu clock which exceeds 500MHz, the divider for the interface
|
||||||
|
* clock must be programmed to div-by-2.
|
||||||
|
*/
|
||||||
|
iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
|
||||||
|
|
||||||
|
/* Set cpu clock to 1GHz */
|
||||||
|
iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
|
||||||
|
|
||||||
|
while (!HSDK_PLL_LOCKED() && timeout--)
|
||||||
|
cpu_relax();
|
||||||
|
|
||||||
|
if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
|
||||||
|
pr_err("Failed to setup CPU frequency to 1GHz!");
|
||||||
|
}
|
||||||
|
|
||||||
static void __init hsdk_init_early(void)
|
static void __init hsdk_init_early(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@@ -52,6 +88,12 @@ static void __init hsdk_init_early(void)
|
|||||||
|
|
||||||
/* Really apply settings made above */
|
/* Really apply settings made above */
|
||||||
writel(1, (void __iomem *) CREG_PAE_UPDATE);
|
writel(1, (void __iomem *) CREG_PAE_UPDATE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Setup CPU frequency to 1GHz.
|
||||||
|
* TODO: remove it after smart hsdk pll driver will be introduced.
|
||||||
|
*/
|
||||||
|
hsdk_set_cpu_freq_1ghz();
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char *hsdk_compat[] __initconst = {
|
static const char *hsdk_compat[] __initconst = {
|
||||||
|
@@ -95,16 +95,19 @@
|
|||||||
#define KERNEL_END _end
|
#define KERNEL_END _end
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The size of the KASAN shadow region. This should be 1/8th of the
|
* KASAN requires 1/8th of the kernel virtual address space for the shadow
|
||||||
* size of the entire kernel virtual address space.
|
* region. KASAN can bloat the stack significantly, so double the (minimum)
|
||||||
|
* stack size when KASAN is in use.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_KASAN
|
#ifdef CONFIG_KASAN
|
||||||
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
|
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
|
||||||
|
#define KASAN_THREAD_SHIFT 1
|
||||||
#else
|
#else
|
||||||
#define KASAN_SHADOW_SIZE (0)
|
#define KASAN_SHADOW_SIZE (0)
|
||||||
|
#define KASAN_THREAD_SHIFT 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define MIN_THREAD_SHIFT 14
|
#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VMAP'd stacks are allocated at page granularity, so we must ensure that such
|
* VMAP'd stacks are allocated at page granularity, so we must ensure that such
|
||||||
|
@@ -649,4 +649,4 @@ static int __init armv8_deprecated_init(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
late_initcall(armv8_deprecated_init);
|
core_initcall(armv8_deprecated_init);
|
||||||
|
@@ -1307,4 +1307,4 @@ static int __init enable_mrs_emulation(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
late_initcall(enable_mrs_emulation);
|
core_initcall(enable_mrs_emulation);
|
||||||
|
@@ -444,4 +444,4 @@ static int __init fpsimd_init(void)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
late_initcall(fpsimd_init);
|
core_initcall(fpsimd_init);
|
||||||
|
@@ -97,7 +97,7 @@ static void data_abort_decode(unsigned int esr)
|
|||||||
(esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
|
(esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
|
||||||
(esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
|
(esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
|
||||||
} else {
|
} else {
|
||||||
pr_alert(" ISV = 0, ISS = 0x%08lu\n", esr & ESR_ELx_ISS_MASK);
|
pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_alert(" CM = %lu, WnR = %lu\n",
|
pr_alert(" CM = %lu, WnR = %lu\n",
|
||||||
|
@@ -146,7 +146,7 @@ void machine_power_off(void)
|
|||||||
|
|
||||||
/* prevent soft lockup/stalled CPU messages for endless loop. */
|
/* prevent soft lockup/stalled CPU messages for endless loop. */
|
||||||
rcu_sysrq_start();
|
rcu_sysrq_start();
|
||||||
lockup_detector_suspend();
|
lockup_detector_soft_poweroff();
|
||||||
for (;;);
|
for (;;);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -102,10 +102,10 @@ static void cpufeatures_flush_tlb(void)
|
|||||||
case PVR_POWER8:
|
case PVR_POWER8:
|
||||||
case PVR_POWER8E:
|
case PVR_POWER8E:
|
||||||
case PVR_POWER8NVL:
|
case PVR_POWER8NVL:
|
||||||
__flush_tlb_power8(POWER8_TLB_SETS);
|
__flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL);
|
||||||
break;
|
break;
|
||||||
case PVR_POWER9:
|
case PVR_POWER9:
|
||||||
__flush_tlb_power9(POWER9_TLB_SETS_HASH);
|
__flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_err("unknown CPU version for boot TLB flush\n");
|
pr_err("unknown CPU version for boot TLB flush\n");
|
||||||
|
@@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100)
|
|||||||
EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
|
EXC_VIRT(program_check, 0x4700, 0x100, 0x700)
|
||||||
TRAMP_KVM(PACA_EXGEN, 0x700)
|
TRAMP_KVM(PACA_EXGEN, 0x700)
|
||||||
EXC_COMMON_BEGIN(program_check_common)
|
EXC_COMMON_BEGIN(program_check_common)
|
||||||
EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
|
/*
|
||||||
|
* It's possible to receive a TM Bad Thing type program check with
|
||||||
|
* userspace register values (in particular r1), but with SRR1 reporting
|
||||||
|
* that we came from the kernel. Normally that would confuse the bad
|
||||||
|
* stack logic, and we would report a bad kernel stack pointer. Instead
|
||||||
|
* we switch to the emergency stack if we're taking a TM Bad Thing from
|
||||||
|
* the kernel.
|
||||||
|
*/
|
||||||
|
li r10,MSR_PR /* Build a mask of MSR_PR .. */
|
||||||
|
oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */
|
||||||
|
and r10,r10,r12 /* Mask SRR1 with that. */
|
||||||
|
srdi r10,r10,8 /* Shift it so we can compare */
|
||||||
|
cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */
|
||||||
|
bne 1f /* If != go to normal path. */
|
||||||
|
|
||||||
|
/* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */
|
||||||
|
andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */
|
||||||
|
/* 3 in EXCEPTION_PROLOG_COMMON */
|
||||||
|
mr r10,r1 /* Save r1 */
|
||||||
|
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
|
||||||
|
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
|
||||||
|
b 3f /* Jump into the macro !! */
|
||||||
|
1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
|
||||||
bl save_nvgprs
|
bl save_nvgprs
|
||||||
RECONCILE_IRQ_STATE(r10, r11)
|
RECONCILE_IRQ_STATE(r10, r11)
|
||||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||||
|
@@ -624,5 +624,18 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
|
|||||||
|
|
||||||
long __machine_check_early_realmode_p9(struct pt_regs *regs)
|
long __machine_check_early_realmode_p9(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* On POWER9 DD2.1 and below, it's possible to get a machine check
|
||||||
|
* caused by a paste instruction where only DSISR bit 25 is set. This
|
||||||
|
* will result in the MCE handler seeing an unknown event and the kernel
|
||||||
|
* crashing. An MCE that occurs like this is spurious, so we don't need
|
||||||
|
* to do anything in terms of servicing it. If there is something that
|
||||||
|
* needs to be serviced, the CPU will raise the MCE again with the
|
||||||
|
* correct DSISR so that it can be serviced properly. So detect this
|
||||||
|
* case and mark it as handled.
|
||||||
|
*/
|
||||||
|
if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
|
||||||
|
return 1;
|
||||||
|
|
||||||
return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
|
return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
|
||||||
}
|
}
|
||||||
|
@@ -904,9 +904,6 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_64K_PAGES
|
|
||||||
init_mm.context.pte_frag = NULL;
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
||||||
mm_iommu_init(&init_mm);
|
mm_iommu_init(&init_mm);
|
||||||
#endif
|
#endif
|
||||||
|
@@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
|
|||||||
if (MSR_TM_RESV(msr))
|
if (MSR_TM_RESV(msr))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* pull in MSR TM from user context */
|
/* pull in MSR TS bits from user context */
|
||||||
regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
|
regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that TM is enabled in regs->msr before we leave the signal
|
||||||
|
* handler. It could be the case that (a) user disabled the TM bit
|
||||||
|
* through the manipulation of the MSR bits in uc_mcontext or (b) the
|
||||||
|
* TM bit was disabled because a sufficient number of context switches
|
||||||
|
* happened whilst in the signal handler and load_tm overflowed,
|
||||||
|
* disabling the TM bit. In either case we can end up with an illegal
|
||||||
|
* TM state leading to a TM Bad Thing when we return to userspace.
|
||||||
|
*/
|
||||||
|
regs->msr |= MSR_TM;
|
||||||
|
|
||||||
/* pull in MSR LE from user context */
|
/* pull in MSR LE from user context */
|
||||||
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
|
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
|
||||||
|
|
||||||
|
@@ -310,9 +310,6 @@ static int start_wd_on_cpu(unsigned int cpu)
|
|||||||
if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
|
if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (watchdog_suspended)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
|
if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@@ -358,36 +355,39 @@ static void watchdog_calc_timeouts(void)
|
|||||||
wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
|
wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
void watchdog_nmi_reconfigure(void)
|
void watchdog_nmi_stop(void)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
for_each_cpu(cpu, &wd_cpus_enabled)
|
||||||
|
stop_wd_on_cpu(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
void watchdog_nmi_start(void)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
watchdog_calc_timeouts();
|
watchdog_calc_timeouts();
|
||||||
|
|
||||||
for_each_cpu(cpu, &wd_cpus_enabled)
|
|
||||||
stop_wd_on_cpu(cpu);
|
|
||||||
|
|
||||||
for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
|
for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
|
||||||
start_wd_on_cpu(cpu);
|
start_wd_on_cpu(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This runs after lockup_detector_init() which sets up watchdog_cpumask.
|
* Invoked from core watchdog init.
|
||||||
*/
|
*/
|
||||||
static int __init powerpc_watchdog_init(void)
|
int __init watchdog_nmi_probe(void)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
watchdog_calc_timeouts();
|
err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
|
||||||
|
"powerpc/watchdog:online",
|
||||||
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online",
|
|
||||||
start_wd_on_cpu, stop_wd_on_cpu);
|
start_wd_on_cpu, stop_wd_on_cpu);
|
||||||
if (err < 0)
|
if (err < 0) {
|
||||||
pr_warn("Watchdog could not be initialized");
|
pr_warn("Watchdog could not be initialized");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
arch_initcall(powerpc_watchdog_init);
|
|
||||||
|
|
||||||
static void handle_backtrace_ipi(struct pt_regs *regs)
|
static void handle_backtrace_ipi(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
@@ -622,7 +622,7 @@ int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
state = &sb->irq_state[idx];
|
state = &sb->irq_state[idx];
|
||||||
arch_spin_lock(&sb->lock);
|
arch_spin_lock(&sb->lock);
|
||||||
*server = state->guest_server;
|
*server = state->act_server;
|
||||||
*priority = state->guest_priority;
|
*priority = state->guest_priority;
|
||||||
arch_spin_unlock(&sb->lock);
|
arch_spin_unlock(&sb->lock);
|
||||||
|
|
||||||
@@ -1331,7 +1331,7 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
|
|||||||
xive->saved_src_count++;
|
xive->saved_src_count++;
|
||||||
|
|
||||||
/* Convert saved state into something compatible with xics */
|
/* Convert saved state into something compatible with xics */
|
||||||
val = state->guest_server;
|
val = state->act_server;
|
||||||
prio = state->saved_scan_prio;
|
prio = state->saved_scan_prio;
|
||||||
|
|
||||||
if (prio == MASKED) {
|
if (prio == MASKED) {
|
||||||
@@ -1507,7 +1507,6 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
|
|||||||
/* First convert prio and mark interrupt as untargetted */
|
/* First convert prio and mark interrupt as untargetted */
|
||||||
act_prio = xive_prio_from_guest(guest_prio);
|
act_prio = xive_prio_from_guest(guest_prio);
|
||||||
state->act_priority = MASKED;
|
state->act_priority = MASKED;
|
||||||
state->guest_server = server;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to drop the lock due to the mutex below. Hopefully
|
* We need to drop the lock due to the mutex below. Hopefully
|
||||||
|
@@ -35,7 +35,6 @@ struct kvmppc_xive_irq_state {
|
|||||||
struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
|
struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
|
||||||
|
|
||||||
/* Targetting as set by guest */
|
/* Targetting as set by guest */
|
||||||
u32 guest_server; /* Current guest selected target */
|
|
||||||
u8 guest_priority; /* Guest set priority */
|
u8 guest_priority; /* Guest set priority */
|
||||||
u8 saved_priority; /* Saved priority when masking */
|
u8 saved_priority; /* Saved priority when masking */
|
||||||
|
|
||||||
|
@@ -361,9 +361,9 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
wmb();
|
wmb();
|
||||||
|
local_irq_restore(flags);
|
||||||
flush_tlb_kernel_range((unsigned long)page_address(start),
|
flush_tlb_kernel_range((unsigned long)page_address(start),
|
||||||
(unsigned long)page_address(page));
|
(unsigned long)page_address(page));
|
||||||
local_irq_restore(flags);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -272,6 +272,14 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
|
|||||||
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
|
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
|
||||||
static unsigned long pnv_memory_block_size(void)
|
static unsigned long pnv_memory_block_size(void)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* We map the kernel linear region with 1GB large pages on radix. For
|
||||||
|
* memory hot unplug to work our memory block size must be at least
|
||||||
|
* this size.
|
||||||
|
*/
|
||||||
|
if (radix_enabled())
|
||||||
|
return 1UL * 1024 * 1024 * 1024;
|
||||||
|
else
|
||||||
return 256UL * 1024 * 1024;
|
return 256UL * 1024 * 1024;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@@ -1402,6 +1402,14 @@ void xive_teardown_cpu(void)
|
|||||||
|
|
||||||
if (xive_ops->teardown_cpu)
|
if (xive_ops->teardown_cpu)
|
||||||
xive_ops->teardown_cpu(cpu, xc);
|
xive_ops->teardown_cpu(cpu, xc);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* Get rid of IPI */
|
||||||
|
xive_cleanup_cpu_ipi(cpu, xc);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Disable and free the queues */
|
||||||
|
xive_cleanup_cpu_queues(cpu, xc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void xive_kexec_teardown_cpu(int secondary)
|
void xive_kexec_teardown_cpu(int secondary)
|
||||||
|
@@ -431,7 +431,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
|
|||||||
|
|
||||||
static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
|
static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
|
||||||
{
|
{
|
||||||
|
if (!xc->hw_ipi)
|
||||||
|
return;
|
||||||
|
|
||||||
xive_irq_bitmap_free(xc->hw_ipi);
|
xive_irq_bitmap_free(xc->hw_ipi);
|
||||||
|
xc->hw_ipi = 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
@@ -44,7 +44,6 @@ config SPARC
|
|||||||
select ARCH_HAS_SG_CHAIN
|
select ARCH_HAS_SG_CHAIN
|
||||||
select CPU_NO_EFFICIENT_FFS
|
select CPU_NO_EFFICIENT_FFS
|
||||||
select LOCKDEP_SMALL if LOCKDEP
|
select LOCKDEP_SMALL if LOCKDEP
|
||||||
select ARCH_WANT_RELAX_ORDER
|
|
||||||
|
|
||||||
config SPARC32
|
config SPARC32
|
||||||
def_bool !64BIT
|
def_bool !64BIT
|
||||||
|
@@ -4409,10 +4409,9 @@ static __init int fixup_ht_bug(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lockup_detector_suspend() != 0) {
|
cpus_read_lock();
|
||||||
pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n");
|
|
||||||
return 0;
|
hardlockup_detector_perf_stop();
|
||||||
}
|
|
||||||
|
|
||||||
x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
|
x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
|
||||||
|
|
||||||
@@ -4420,9 +4419,7 @@ static __init int fixup_ht_bug(void)
|
|||||||
x86_pmu.commit_scheduling = NULL;
|
x86_pmu.commit_scheduling = NULL;
|
||||||
x86_pmu.stop_scheduling = NULL;
|
x86_pmu.stop_scheduling = NULL;
|
||||||
|
|
||||||
lockup_detector_resume();
|
hardlockup_detector_perf_restart();
|
||||||
|
|
||||||
cpus_read_lock();
|
|
||||||
|
|
||||||
for_each_online_cpu(c)
|
for_each_online_cpu(c)
|
||||||
free_excl_cntrs(c);
|
free_excl_cntrs(c);
|
||||||
|
@@ -88,7 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
|
|||||||
bool kvm_para_available(void);
|
bool kvm_para_available(void);
|
||||||
unsigned int kvm_arch_para_features(void);
|
unsigned int kvm_arch_para_features(void);
|
||||||
void __init kvm_guest_init(void);
|
void __init kvm_guest_init(void);
|
||||||
void kvm_async_pf_task_wait(u32 token);
|
void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
|
||||||
void kvm_async_pf_task_wake(u32 token);
|
void kvm_async_pf_task_wake(u32 token);
|
||||||
u32 kvm_read_and_reset_pf_reason(void);
|
u32 kvm_read_and_reset_pf_reason(void);
|
||||||
extern void kvm_disable_steal_time(void);
|
extern void kvm_disable_steal_time(void);
|
||||||
@@ -103,7 +103,7 @@ static inline void kvm_spinlock_init(void)
|
|||||||
|
|
||||||
#else /* CONFIG_KVM_GUEST */
|
#else /* CONFIG_KVM_GUEST */
|
||||||
#define kvm_guest_init() do {} while (0)
|
#define kvm_guest_init() do {} while (0)
|
||||||
#define kvm_async_pf_task_wait(T) do {} while(0)
|
#define kvm_async_pf_task_wait(T, I) do {} while(0)
|
||||||
#define kvm_async_pf_task_wake(T) do {} while(0)
|
#define kvm_async_pf_task_wake(T) do {} while(0)
|
||||||
|
|
||||||
static inline bool kvm_para_available(void)
|
static inline bool kvm_para_available(void)
|
||||||
|
@@ -117,7 +117,11 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_async_pf_task_wait(u32 token)
|
/*
|
||||||
|
* @interrupt_kernel: Is this called from a routine which interrupts the kernel
|
||||||
|
* (other than user space)?
|
||||||
|
*/
|
||||||
|
void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
|
||||||
{
|
{
|
||||||
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
|
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
|
||||||
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
|
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
|
||||||
@@ -140,8 +144,10 @@ void kvm_async_pf_task_wait(u32 token)
|
|||||||
|
|
||||||
n.token = token;
|
n.token = token;
|
||||||
n.cpu = smp_processor_id();
|
n.cpu = smp_processor_id();
|
||||||
n.halted = is_idle_task(current) || preempt_count() > 1 ||
|
n.halted = is_idle_task(current) ||
|
||||||
rcu_preempt_depth();
|
(IS_ENABLED(CONFIG_PREEMPT_COUNT)
|
||||||
|
? preempt_count() > 1 || rcu_preempt_depth()
|
||||||
|
: interrupt_kernel);
|
||||||
init_swait_queue_head(&n.wq);
|
init_swait_queue_head(&n.wq);
|
||||||
hlist_add_head(&n.link, &b->list);
|
hlist_add_head(&n.link, &b->list);
|
||||||
raw_spin_unlock(&b->lock);
|
raw_spin_unlock(&b->lock);
|
||||||
@@ -269,7 +275,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|||||||
case KVM_PV_REASON_PAGE_NOT_PRESENT:
|
case KVM_PV_REASON_PAGE_NOT_PRESENT:
|
||||||
/* page is swapped out by the host. */
|
/* page is swapped out by the host. */
|
||||||
prev_state = exception_enter();
|
prev_state = exception_enter();
|
||||||
kvm_async_pf_task_wait((u32)read_cr2());
|
kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
|
||||||
exception_exit(prev_state);
|
exception_exit(prev_state);
|
||||||
break;
|
break;
|
||||||
case KVM_PV_REASON_PAGE_READY:
|
case KVM_PV_REASON_PAGE_READY:
|
||||||
|
@@ -23,6 +23,7 @@ config KVM
|
|||||||
depends on HIGH_RES_TIMERS
|
depends on HIGH_RES_TIMERS
|
||||||
# for TASKSTATS/TASK_DELAY_ACCT:
|
# for TASKSTATS/TASK_DELAY_ACCT:
|
||||||
depends on NET && MULTIUSER
|
depends on NET && MULTIUSER
|
||||||
|
depends on X86_LOCAL_APIC
|
||||||
select PREEMPT_NOTIFIERS
|
select PREEMPT_NOTIFIERS
|
||||||
select MMU_NOTIFIER
|
select MMU_NOTIFIER
|
||||||
select ANON_INODES
|
select ANON_INODES
|
||||||
|
@@ -425,8 +425,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
|
|||||||
#op " %al \n\t" \
|
#op " %al \n\t" \
|
||||||
FOP_RET
|
FOP_RET
|
||||||
|
|
||||||
asm(".global kvm_fastop_exception \n"
|
asm(".pushsection .fixup, \"ax\"\n"
|
||||||
"kvm_fastop_exception: xor %esi, %esi; ret");
|
".global kvm_fastop_exception \n"
|
||||||
|
"kvm_fastop_exception: xor %esi, %esi; ret\n"
|
||||||
|
".popsection");
|
||||||
|
|
||||||
FOP_START(setcc)
|
FOP_START(setcc)
|
||||||
FOP_SETCC(seto)
|
FOP_SETCC(seto)
|
||||||
|
@@ -3837,7 +3837,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
|
|||||||
case KVM_PV_REASON_PAGE_NOT_PRESENT:
|
case KVM_PV_REASON_PAGE_NOT_PRESENT:
|
||||||
vcpu->arch.apf.host_apf_reason = 0;
|
vcpu->arch.apf.host_apf_reason = 0;
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
kvm_async_pf_task_wait(fault_address);
|
kvm_async_pf_task_wait(fault_address, 0);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
break;
|
break;
|
||||||
case KVM_PV_REASON_PAGE_READY:
|
case KVM_PV_REASON_PAGE_READY:
|
||||||
|
@@ -815,10 +815,14 @@ int blk_mq_debugfs_register(struct request_queue *q)
|
|||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir
|
* blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
|
||||||
* didn't exist yet (because we don't know what to name the directory
|
* didn't exist yet (because we don't know what to name the directory
|
||||||
* until the queue is registered to a gendisk).
|
* until the queue is registered to a gendisk).
|
||||||
*/
|
*/
|
||||||
|
if (q->elevator && !q->sched_debugfs_dir)
|
||||||
|
blk_mq_debugfs_register_sched(q);
|
||||||
|
|
||||||
|
/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
|
if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
|
||||||
goto err;
|
goto err;
|
||||||
|
@@ -1911,11 +1911,11 @@ static void throtl_upgrade_state(struct throtl_data *td)
|
|||||||
|
|
||||||
tg->disptime = jiffies - 1;
|
tg->disptime = jiffies - 1;
|
||||||
throtl_select_dispatch(sq);
|
throtl_select_dispatch(sq);
|
||||||
throtl_schedule_next_dispatch(sq, false);
|
throtl_schedule_next_dispatch(sq, true);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
throtl_select_dispatch(&td->service_queue);
|
throtl_select_dispatch(&td->service_queue);
|
||||||
throtl_schedule_next_dispatch(&td->service_queue, false);
|
throtl_schedule_next_dispatch(&td->service_queue, true);
|
||||||
queue_work(kthrotld_workqueue, &td->dispatch_work);
|
queue_work(kthrotld_workqueue, &td->dispatch_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -207,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
|
|||||||
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||||
struct scsi_request *sreq = &job->sreq;
|
struct scsi_request *sreq = &job->sreq;
|
||||||
|
|
||||||
memset(job, 0, sizeof(*job));
|
/* called right after the request is allocated for the request_queue */
|
||||||
|
|
||||||
scsi_req_init(sreq);
|
sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
|
||||||
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
|
|
||||||
sreq->sense = kzalloc(sreq->sense_len, gfp);
|
|
||||||
if (!sreq->sense)
|
if (!sreq->sense)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bsg_initialize_rq(struct request *req)
|
||||||
|
{
|
||||||
|
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||||
|
struct scsi_request *sreq = &job->sreq;
|
||||||
|
void *sense = sreq->sense;
|
||||||
|
|
||||||
|
/* called right before the request is given to the request_queue user */
|
||||||
|
|
||||||
|
memset(job, 0, sizeof(*job));
|
||||||
|
|
||||||
|
scsi_req_init(sreq);
|
||||||
|
|
||||||
|
sreq->sense = sense;
|
||||||
|
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
|
||||||
|
|
||||||
job->req = req;
|
job->req = req;
|
||||||
job->reply = sreq->sense;
|
job->reply = sense;
|
||||||
job->reply_len = sreq->sense_len;
|
job->reply_len = sreq->sense_len;
|
||||||
job->dd_data = job + 1;
|
job->dd_data = job + 1;
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bsg_exit_rq(struct request_queue *q, struct request *req)
|
static void bsg_exit_rq(struct request_queue *q, struct request *req)
|
||||||
@@ -251,6 +265,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
|
|||||||
q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
|
q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
|
||||||
q->init_rq_fn = bsg_init_rq;
|
q->init_rq_fn = bsg_init_rq;
|
||||||
q->exit_rq_fn = bsg_exit_rq;
|
q->exit_rq_fn = bsg_exit_rq;
|
||||||
|
q->initialize_rq_fn = bsg_initialize_rq;
|
||||||
q->request_fn = bsg_request_fn;
|
q->request_fn = bsg_request_fn;
|
||||||
|
|
||||||
ret = blk_init_allocated_queue(q);
|
ret = blk_init_allocated_queue(q);
|
||||||
|
@@ -1178,12 +1178,44 @@ dev_put:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
|
||||||
|
{
|
||||||
|
if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
|
||||||
|
struct acpi_iort_node *parent;
|
||||||
|
struct acpi_iort_id_mapping *map;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
|
||||||
|
iort_node->mapping_offset);
|
||||||
|
|
||||||
|
for (i = 0; i < iort_node->mapping_count; i++, map++) {
|
||||||
|
if (!map->output_reference)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
parent = ACPI_ADD_PTR(struct acpi_iort_node,
|
||||||
|
iort_table, map->output_reference);
|
||||||
|
/*
|
||||||
|
* If we detect a RC->SMMU mapping, make sure
|
||||||
|
* we enable ACS on the system.
|
||||||
|
*/
|
||||||
|
if ((parent->type == ACPI_IORT_NODE_SMMU) ||
|
||||||
|
(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
|
||||||
|
pci_request_acs();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void __init iort_init_platform_devices(void)
|
static void __init iort_init_platform_devices(void)
|
||||||
{
|
{
|
||||||
struct acpi_iort_node *iort_node, *iort_end;
|
struct acpi_iort_node *iort_node, *iort_end;
|
||||||
struct acpi_table_iort *iort;
|
struct acpi_table_iort *iort;
|
||||||
struct fwnode_handle *fwnode;
|
struct fwnode_handle *fwnode;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
bool acs_enabled = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* iort_table and iort both point to the start of IORT table, but
|
* iort_table and iort both point to the start of IORT table, but
|
||||||
@@ -1203,6 +1235,9 @@ static void __init iort_init_platform_devices(void)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!acs_enabled)
|
||||||
|
acs_enabled = iort_enable_acs(iort_node);
|
||||||
|
|
||||||
if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
|
if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
|
||||||
(iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
|
(iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
|
||||||
|
|
||||||
|
@@ -17,7 +17,7 @@ if BLK_DEV
|
|||||||
|
|
||||||
config BLK_DEV_NULL_BLK
|
config BLK_DEV_NULL_BLK
|
||||||
tristate "Null test block driver"
|
tristate "Null test block driver"
|
||||||
depends on CONFIGFS_FS
|
select CONFIGFS_FS
|
||||||
|
|
||||||
config BLK_DEV_FD
|
config BLK_DEV_FD
|
||||||
tristate "Normal floppy disk support"
|
tristate "Normal floppy disk support"
|
||||||
|
@@ -820,9 +820,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
* appropriate.
|
* appropriate.
|
||||||
*/
|
*/
|
||||||
ret = nbd_handle_cmd(cmd, hctx->queue_num);
|
ret = nbd_handle_cmd(cmd, hctx->queue_num);
|
||||||
|
if (ret < 0)
|
||||||
|
ret = BLK_STS_IOERR;
|
||||||
|
else if (!ret)
|
||||||
|
ret = BLK_STS_OK;
|
||||||
complete(&cmd->send_complete);
|
complete(&cmd->send_complete);
|
||||||
|
|
||||||
return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
|
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
|
||||||
|
@@ -105,6 +105,7 @@ err:
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(clk_bulk_prepare);
|
||||||
|
|
||||||
#endif /* CONFIG_HAVE_CLK_PREPARE */
|
#endif /* CONFIG_HAVE_CLK_PREPARE */
|
||||||
|
|
||||||
|
@@ -315,13 +315,13 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
|
|||||||
RK2928_CLKGATE_CON(10), 8, GFLAGS),
|
RK2928_CLKGATE_CON(10), 8, GFLAGS),
|
||||||
|
|
||||||
GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0,
|
GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0,
|
||||||
RK2928_CLKGATE_CON(10), 8, GFLAGS),
|
RK2928_CLKGATE_CON(10), 0, GFLAGS),
|
||||||
GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0,
|
GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0,
|
||||||
RK2928_CLKGATE_CON(10), 8, GFLAGS),
|
RK2928_CLKGATE_CON(10), 1, GFLAGS),
|
||||||
GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0,
|
GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0,
|
||||||
RK2928_CLKGATE_CON(10), 8, GFLAGS),
|
RK2928_CLKGATE_CON(10), 2, GFLAGS),
|
||||||
GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
|
GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
|
||||||
RK2928_CLKGATE_CON(10), 8, GFLAGS),
|
RK2928_CLKGATE_CON(2), 15, GFLAGS),
|
||||||
|
|
||||||
COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
|
COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
|
||||||
RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
|
RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
|
||||||
@@ -541,7 +541,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
|
|||||||
GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
|
GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
|
||||||
GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS),
|
GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS),
|
||||||
|
|
||||||
GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS),
|
GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS),
|
||||||
GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS),
|
GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS),
|
||||||
|
|
||||||
/* PD_MMC */
|
/* PD_MMC */
|
||||||
@@ -577,6 +577,8 @@ static const char *const rk3128_critical_clocks[] __initconst = {
|
|||||||
"aclk_peri",
|
"aclk_peri",
|
||||||
"hclk_peri",
|
"hclk_peri",
|
||||||
"pclk_peri",
|
"pclk_peri",
|
||||||
|
"pclk_pmu",
|
||||||
|
"sclk_timer5",
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np)
|
static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np)
|
||||||
|
@@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
|
|||||||
#define PLL_ENABLED (1 << 31)
|
#define PLL_ENABLED (1 << 31)
|
||||||
#define PLL_LOCKED (1 << 29)
|
#define PLL_LOCKED (1 << 29)
|
||||||
|
|
||||||
|
static void exynos4_clk_enable_pll(u32 reg)
|
||||||
|
{
|
||||||
|
u32 pll_con = readl(reg_base + reg);
|
||||||
|
pll_con |= PLL_ENABLED;
|
||||||
|
writel(pll_con, reg_base + reg);
|
||||||
|
|
||||||
|
while (!(pll_con & PLL_LOCKED)) {
|
||||||
|
cpu_relax();
|
||||||
|
pll_con = readl(reg_base + reg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void exynos4_clk_wait_for_pll(u32 reg)
|
static void exynos4_clk_wait_for_pll(u32 reg)
|
||||||
{
|
{
|
||||||
u32 pll_con;
|
u32 pll_con;
|
||||||
@@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void)
|
|||||||
samsung_clk_save(reg_base, exynos4_save_pll,
|
samsung_clk_save(reg_base, exynos4_save_pll,
|
||||||
ARRAY_SIZE(exynos4_clk_pll_regs));
|
ARRAY_SIZE(exynos4_clk_pll_regs));
|
||||||
|
|
||||||
|
exynos4_clk_enable_pll(EPLL_CON0);
|
||||||
|
exynos4_clk_enable_pll(VPLL_CON0);
|
||||||
|
|
||||||
if (exynos4_soc == EXYNOS4210) {
|
if (exynos4_soc == EXYNOS4210) {
|
||||||
samsung_clk_save(reg_base, exynos4_save_soc,
|
samsung_clk_save(reg_base, exynos4_save_soc,
|
||||||
ARRAY_SIZE(exynos4210_clk_save));
|
ARRAY_SIZE(exynos4210_clk_save));
|
||||||
|
@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
|
|||||||
connector->encoder->base.id,
|
connector->encoder->base.id,
|
||||||
connector->encoder->name);
|
connector->encoder->name);
|
||||||
|
|
||||||
/* ELD Conn_Type */
|
|
||||||
connector->eld[5] &= ~(3 << 2);
|
|
||||||
if (intel_crtc_has_dp_encoder(crtc_state))
|
|
||||||
connector->eld[5] |= (1 << 2);
|
|
||||||
|
|
||||||
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
|
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
|
||||||
|
|
||||||
if (dev_priv->display.audio_codec_enable)
|
if (dev_priv->display.audio_codec_enable)
|
||||||
|
@@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
|||||||
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
|
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
|
||||||
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
|
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
|
||||||
|
|
||||||
|
if (port == PORT_A && is_dvi) {
|
||||||
|
DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
|
||||||
|
is_hdmi ? "/HDMI" : "");
|
||||||
|
is_dvi = false;
|
||||||
|
is_hdmi = false;
|
||||||
|
}
|
||||||
|
|
||||||
info->supports_dvi = is_dvi;
|
info->supports_dvi = is_dvi;
|
||||||
info->supports_hdmi = is_hdmi;
|
info->supports_hdmi = is_hdmi;
|
||||||
info->supports_dp = is_dp;
|
info->supports_dp = is_dp;
|
||||||
|
@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
|
|||||||
|
|
||||||
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
|
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
|
||||||
|
|
||||||
if (IS_BROXTON(dev_priv))
|
if (IS_GEN9_LP(dev_priv))
|
||||||
mask |= DC_STATE_DEBUG_MASK_CORES;
|
mask |= DC_STATE_DEBUG_MASK_CORES;
|
||||||
|
|
||||||
/* The below bit doesn't need to be cleared ever afterwards */
|
/* The below bit doesn't need to be cleared ever afterwards */
|
||||||
|
@@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
|
|||||||
out:
|
out:
|
||||||
if (ret && IS_GEN9_LP(dev_priv)) {
|
if (ret && IS_GEN9_LP(dev_priv)) {
|
||||||
tmp = I915_READ(BXT_PHY_CTL(port));
|
tmp = I915_READ(BXT_PHY_CTL(port));
|
||||||
if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
|
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
|
||||||
|
BXT_PHY_LANE_POWERDOWN_ACK |
|
||||||
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
|
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
|
||||||
DRM_ERROR("Port %c enabled but PHY powered down? "
|
DRM_ERROR("Port %c enabled but PHY powered down? "
|
||||||
"(PHY_CTL %08x)\n", port_name(port), tmp);
|
"(PHY_CTL %08x)\n", port_name(port), tmp);
|
||||||
|
@@ -12359,7 +12359,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
|||||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
struct intel_crtc_state *intel_cstate;
|
struct intel_crtc_state *intel_cstate;
|
||||||
bool hw_check = intel_state->modeset;
|
|
||||||
u64 put_domains[I915_MAX_PIPES] = {};
|
u64 put_domains[I915_MAX_PIPES] = {};
|
||||||
unsigned crtc_vblank_mask = 0;
|
unsigned crtc_vblank_mask = 0;
|
||||||
int i;
|
int i;
|
||||||
@@ -12376,7 +12375,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
|||||||
|
|
||||||
if (needs_modeset(new_crtc_state) ||
|
if (needs_modeset(new_crtc_state) ||
|
||||||
to_intel_crtc_state(new_crtc_state)->update_pipe) {
|
to_intel_crtc_state(new_crtc_state)->update_pipe) {
|
||||||
hw_check = true;
|
|
||||||
|
|
||||||
put_domains[to_intel_crtc(crtc)->pipe] =
|
put_domains[to_intel_crtc(crtc)->pipe] =
|
||||||
modeset_get_crtc_power_domains(crtc,
|
modeset_get_crtc_power_domains(crtc,
|
||||||
|
@@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
|
|
||||||
{
|
|
||||||
return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
|
|
||||||
BIT(phy_info->channel[DPIO_CH0].port);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct bxt_ddi_phy_info *
|
static const struct bxt_ddi_phy_info *
|
||||||
bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
|
bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
|
||||||
{
|
{
|
||||||
@@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
|
|||||||
enum dpio_phy phy)
|
enum dpio_phy phy)
|
||||||
{
|
{
|
||||||
const struct bxt_ddi_phy_info *phy_info;
|
const struct bxt_ddi_phy_info *phy_info;
|
||||||
enum port port;
|
|
||||||
|
|
||||||
phy_info = bxt_get_phy_info(dev_priv, phy);
|
phy_info = bxt_get_phy_info(dev_priv, phy);
|
||||||
|
|
||||||
@@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
|
|
||||||
u32 tmp = I915_READ(BXT_PHY_CTL(port));
|
|
||||||
|
|
||||||
if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
|
|
||||||
DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
|
|
||||||
"for port %c powered down "
|
|
||||||
"(PHY_CTL %08x)\n",
|
|
||||||
phy, port_name(port), tmp);
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -30,6 +30,21 @@
|
|||||||
#include "intel_drv.h"
|
#include "intel_drv.h"
|
||||||
#include "i915_drv.h"
|
#include "i915_drv.h"
|
||||||
|
|
||||||
|
static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
|
||||||
|
{
|
||||||
|
u8 conn_type;
|
||||||
|
|
||||||
|
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
|
||||||
|
connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
|
||||||
|
conn_type = DRM_ELD_CONN_TYPE_DP;
|
||||||
|
} else {
|
||||||
|
conn_type = DRM_ELD_CONN_TYPE_HDMI;
|
||||||
|
}
|
||||||
|
|
||||||
|
connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
|
||||||
|
connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_connector_update_modes - update connector from edid
|
* intel_connector_update_modes - update connector from edid
|
||||||
* @connector: DRM connector device to use
|
* @connector: DRM connector device to use
|
||||||
@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
|
|||||||
ret = drm_add_edid_modes(connector, edid);
|
ret = drm_add_edid_modes(connector, edid);
|
||||||
drm_edid_to_eld(connector, edid);
|
drm_edid_to_eld(connector, edid);
|
||||||
|
|
||||||
|
intel_connector_update_eld_conn_type(connector);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
|
|||||||
|
|
||||||
/* 6. Enable DBUF */
|
/* 6. Enable DBUF */
|
||||||
gen9_dbuf_enable(dev_priv);
|
gen9_dbuf_enable(dev_priv);
|
||||||
|
|
||||||
|
if (resume && dev_priv->csr.dmc_payload)
|
||||||
|
intel_csr_load_program(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef CNL_PROCMON_IDX
|
#undef CNL_PROCMON_IDX
|
||||||
|
@@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
|
|||||||
hdmi->mod_clk = devm_clk_get(dev, "mod");
|
hdmi->mod_clk = devm_clk_get(dev, "mod");
|
||||||
if (IS_ERR(hdmi->mod_clk)) {
|
if (IS_ERR(hdmi->mod_clk)) {
|
||||||
dev_err(dev, "Couldn't get the HDMI mod clock\n");
|
dev_err(dev, "Couldn't get the HDMI mod clock\n");
|
||||||
return PTR_ERR(hdmi->mod_clk);
|
ret = PTR_ERR(hdmi->mod_clk);
|
||||||
|
goto err_disable_bus_clk;
|
||||||
}
|
}
|
||||||
clk_prepare_enable(hdmi->mod_clk);
|
clk_prepare_enable(hdmi->mod_clk);
|
||||||
|
|
||||||
hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
|
hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
|
||||||
if (IS_ERR(hdmi->pll0_clk)) {
|
if (IS_ERR(hdmi->pll0_clk)) {
|
||||||
dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
|
dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
|
||||||
return PTR_ERR(hdmi->pll0_clk);
|
ret = PTR_ERR(hdmi->pll0_clk);
|
||||||
|
goto err_disable_mod_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
|
hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
|
||||||
if (IS_ERR(hdmi->pll1_clk)) {
|
if (IS_ERR(hdmi->pll1_clk)) {
|
||||||
dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
|
dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
|
||||||
return PTR_ERR(hdmi->pll1_clk);
|
ret = PTR_ERR(hdmi->pll1_clk);
|
||||||
|
goto err_disable_mod_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = sun4i_tmds_create(hdmi);
|
ret = sun4i_tmds_create(hdmi);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "Couldn't create the TMDS clock\n");
|
dev_err(dev, "Couldn't create the TMDS clock\n");
|
||||||
return ret;
|
goto err_disable_mod_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
|
writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
|
||||||
@@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
|
|||||||
ret = sun4i_hdmi_i2c_create(dev, hdmi);
|
ret = sun4i_hdmi_i2c_create(dev, hdmi);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
|
dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
|
||||||
return ret;
|
goto err_disable_mod_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_encoder_helper_add(&hdmi->encoder,
|
drm_encoder_helper_add(&hdmi->encoder,
|
||||||
@@ -422,6 +425,10 @@ err_cleanup_connector:
|
|||||||
drm_encoder_cleanup(&hdmi->encoder);
|
drm_encoder_cleanup(&hdmi->encoder);
|
||||||
err_del_i2c_adapter:
|
err_del_i2c_adapter:
|
||||||
i2c_del_adapter(hdmi->i2c);
|
i2c_del_adapter(hdmi->i2c);
|
||||||
|
err_disable_mod_clk:
|
||||||
|
clk_disable_unprepare(hdmi->mod_clk);
|
||||||
|
err_disable_bus_clk:
|
||||||
|
clk_disable_unprepare(hdmi->bus_clk);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
|
|||||||
drm_connector_cleanup(&hdmi->connector);
|
drm_connector_cleanup(&hdmi->connector);
|
||||||
drm_encoder_cleanup(&hdmi->encoder);
|
drm_encoder_cleanup(&hdmi->encoder);
|
||||||
i2c_del_adapter(hdmi->i2c);
|
i2c_del_adapter(hdmi->i2c);
|
||||||
|
clk_disable_unprepare(hdmi->mod_clk);
|
||||||
|
clk_disable_unprepare(hdmi->bus_clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct component_ops sun4i_hdmi_ops = {
|
static const struct component_ops sun4i_hdmi_ops = {
|
||||||
|
@@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
|
|||||||
sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
|
sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out_mbox_free;
|
return -ENOMEM;
|
||||||
|
|
||||||
INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
|
INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
|
||||||
|
|
||||||
@@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
|
|||||||
if (IS_ERR(ctx->mbox_chan)) {
|
if (IS_ERR(ctx->mbox_chan)) {
|
||||||
dev_err(&pdev->dev,
|
dev_err(&pdev->dev,
|
||||||
"SLIMpro mailbox channel request failed\n");
|
"SLIMpro mailbox channel request failed\n");
|
||||||
return -ENODEV;
|
rc = -ENODEV;
|
||||||
|
goto out_mbox_free;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
struct acpi_pcct_hw_reduced *cppc_ss;
|
struct acpi_pcct_hw_reduced *cppc_ss;
|
||||||
@@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
|
|||||||
if (device_property_read_u32(&pdev->dev, "pcc-channel",
|
if (device_property_read_u32(&pdev->dev, "pcc-channel",
|
||||||
&ctx->mbox_idx)) {
|
&ctx->mbox_idx)) {
|
||||||
dev_err(&pdev->dev, "no pcc-channel property\n");
|
dev_err(&pdev->dev, "no pcc-channel property\n");
|
||||||
return -ENODEV;
|
rc = -ENODEV;
|
||||||
|
goto out_mbox_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
cl->rx_callback = xgene_hwmon_pcc_rx_cb;
|
cl->rx_callback = xgene_hwmon_pcc_rx_cb;
|
||||||
@@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
|
|||||||
if (IS_ERR(ctx->mbox_chan)) {
|
if (IS_ERR(ctx->mbox_chan)) {
|
||||||
dev_err(&pdev->dev,
|
dev_err(&pdev->dev,
|
||||||
"PPC channel request failed\n");
|
"PPC channel request failed\n");
|
||||||
return -ENODEV;
|
rc = -ENODEV;
|
||||||
|
goto out_mbox_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
|
|||||||
if (!cppc_ss) {
|
if (!cppc_ss) {
|
||||||
dev_err(&pdev->dev, "PPC subspace not found\n");
|
dev_err(&pdev->dev, "PPC subspace not found\n");
|
||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
goto out_mbox_free;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ctx->mbox_chan->mbox->txdone_irq) {
|
if (!ctx->mbox_chan->mbox->txdone_irq) {
|
||||||
dev_err(&pdev->dev, "PCC IRQ not supported\n");
|
dev_err(&pdev->dev, "PCC IRQ not supported\n");
|
||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
goto out_mbox_free;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
|
|||||||
} else {
|
} else {
|
||||||
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
|
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
|
||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
goto out_mbox_free;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ctx->pcc_comm_addr) {
|
if (!ctx->pcc_comm_addr) {
|
||||||
dev_err(&pdev->dev,
|
dev_err(&pdev->dev,
|
||||||
"Failed to ioremap PCC comm region\n");
|
"Failed to ioremap PCC comm region\n");
|
||||||
rc = -ENOMEM;
|
rc = -ENOMEM;
|
||||||
goto out_mbox_free;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -131,6 +131,7 @@ config I2C_I801
|
|||||||
Gemini Lake (SOC)
|
Gemini Lake (SOC)
|
||||||
Cannon Lake-H (PCH)
|
Cannon Lake-H (PCH)
|
||||||
Cannon Lake-LP (PCH)
|
Cannon Lake-LP (PCH)
|
||||||
|
Cedar Fork (PCH)
|
||||||
|
|
||||||
This driver can also be built as a module. If so, the module
|
This driver can also be built as a module. If so, the module
|
||||||
will be called i2c-i801.
|
will be called i2c-i801.
|
||||||
|
@@ -68,6 +68,7 @@
|
|||||||
* Gemini Lake (SOC) 0x31d4 32 hard yes yes yes
|
* Gemini Lake (SOC) 0x31d4 32 hard yes yes yes
|
||||||
* Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes
|
* Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes
|
||||||
* Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
|
* Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
|
||||||
|
* Cedar Fork (PCH) 0x18df 32 hard yes yes yes
|
||||||
*
|
*
|
||||||
* Features supported by this driver:
|
* Features supported by this driver:
|
||||||
* Software PEC no
|
* Software PEC no
|
||||||
@@ -204,6 +205,7 @@
|
|||||||
|
|
||||||
/* Older devices have their ID defined in <linux/pci_ids.h> */
|
/* Older devices have their ID defined in <linux/pci_ids.h> */
|
||||||
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
|
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df
|
||||||
#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
|
#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
|
||||||
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
|
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
|
||||||
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
|
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
|
||||||
@@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = {
|
|||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
|
||||||
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
|
||||||
@@ -1513,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||||||
case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
|
case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
|
||||||
case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
|
case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
|
||||||
case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
|
case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
|
||||||
|
case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
|
||||||
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
|
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
|
||||||
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
|
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
|
||||||
priv->features |= FEATURE_I2C_BLOCK_READ;
|
priv->features |= FEATURE_I2C_BLOCK_READ;
|
||||||
|
@@ -627,6 +627,7 @@ static const struct dev_pm_ops sprd_i2c_pm_ops = {
|
|||||||
|
|
||||||
static const struct of_device_id sprd_i2c_of_match[] = {
|
static const struct of_device_id sprd_i2c_of_match[] = {
|
||||||
{ .compatible = "sprd,sc9860-i2c", },
|
{ .compatible = "sprd,sc9860-i2c", },
|
||||||
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct platform_driver sprd_i2c_driver = {
|
static struct platform_driver sprd_i2c_driver = {
|
||||||
|
@@ -215,7 +215,7 @@ struct stm32f7_i2c_dev {
|
|||||||
unsigned int msg_num;
|
unsigned int msg_num;
|
||||||
unsigned int msg_id;
|
unsigned int msg_id;
|
||||||
struct stm32f7_i2c_msg f7_msg;
|
struct stm32f7_i2c_msg f7_msg;
|
||||||
struct stm32f7_i2c_setup *setup;
|
struct stm32f7_i2c_setup setup;
|
||||||
struct stm32f7_i2c_timings timing;
|
struct stm32f7_i2c_timings timing;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -265,7 +265,7 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
struct stm32f7_i2c_setup stm32f7_setup = {
|
static const struct stm32f7_i2c_setup stm32f7_setup = {
|
||||||
.rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
|
.rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
|
||||||
.fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
|
.fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
|
||||||
.dnf = STM32F7_I2C_DNF_DEFAULT,
|
.dnf = STM32F7_I2C_DNF_DEFAULT,
|
||||||
@@ -537,7 +537,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
|
|||||||
writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
|
writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
|
||||||
|
|
||||||
/* Enable I2C */
|
/* Enable I2C */
|
||||||
if (i2c_dev->setup->analog_filter)
|
if (i2c_dev->setup.analog_filter)
|
||||||
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
|
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
|
||||||
STM32F7_I2C_CR1_ANFOFF);
|
STM32F7_I2C_CR1_ANFOFF);
|
||||||
else
|
else
|
||||||
@@ -887,22 +887,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
setup = of_device_get_match_data(&pdev->dev);
|
setup = of_device_get_match_data(&pdev->dev);
|
||||||
i2c_dev->setup->rise_time = setup->rise_time;
|
i2c_dev->setup = *setup;
|
||||||
i2c_dev->setup->fall_time = setup->fall_time;
|
|
||||||
i2c_dev->setup->dnf = setup->dnf;
|
|
||||||
i2c_dev->setup->analog_filter = setup->analog_filter;
|
|
||||||
|
|
||||||
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
|
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
|
||||||
&rise_time);
|
&rise_time);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
i2c_dev->setup->rise_time = rise_time;
|
i2c_dev->setup.rise_time = rise_time;
|
||||||
|
|
||||||
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
|
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
|
||||||
&fall_time);
|
&fall_time);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
i2c_dev->setup->fall_time = fall_time;
|
i2c_dev->setup.fall_time = fall_time;
|
||||||
|
|
||||||
ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup);
|
ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto clk_free;
|
goto clk_free;
|
||||||
|
|
||||||
|
@@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
|
|||||||
if (hwif_init(hwif) == 0) {
|
if (hwif_init(hwif) == 0) {
|
||||||
printk(KERN_INFO "%s: failed to initialize IDE "
|
printk(KERN_INFO "%s: failed to initialize IDE "
|
||||||
"interface\n", hwif->name);
|
"interface\n", hwif->name);
|
||||||
|
device_unregister(hwif->portdev);
|
||||||
device_unregister(&hwif->gendev);
|
device_unregister(&hwif->gendev);
|
||||||
ide_disable_port(hwif);
|
ide_disable_port(hwif);
|
||||||
continue;
|
continue;
|
||||||
|
@@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
|
|||||||
{
|
{
|
||||||
struct list_head *l;
|
struct list_head *l;
|
||||||
struct pci_driver *d;
|
struct pci_driver *d;
|
||||||
|
int ret;
|
||||||
|
|
||||||
list_for_each(l, &ide_pci_drivers) {
|
list_for_each(l, &ide_pci_drivers) {
|
||||||
d = list_entry(l, struct pci_driver, node);
|
d = list_entry(l, struct pci_driver, node);
|
||||||
@@ -63,13 +64,17 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
|
|||||||
const struct pci_device_id *id =
|
const struct pci_device_id *id =
|
||||||
pci_match_id(d->id_table, dev);
|
pci_match_id(d->id_table, dev);
|
||||||
|
|
||||||
if (id != NULL && d->probe(dev, id) >= 0) {
|
if (id != NULL) {
|
||||||
|
pci_assign_irq(dev);
|
||||||
|
ret = d->probe(dev, id);
|
||||||
|
if (ret >= 0) {
|
||||||
dev->driver = d;
|
dev->driver = d;
|
||||||
pci_dev_get(dev);
|
pci_dev_get(dev);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
|
|||||||
/**
|
/**
|
||||||
* ide_pci_enable - do PCI enables
|
* ide_pci_enable - do PCI enables
|
||||||
* @dev: PCI device
|
* @dev: PCI device
|
||||||
|
* @bars: PCI BARs mask
|
||||||
* @d: IDE port info
|
* @d: IDE port info
|
||||||
*
|
*
|
||||||
* Enable the IDE PCI device. We attempt to enable the device in full
|
* Enable the IDE PCI device. We attempt to enable the device in full
|
||||||
@@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
|
|||||||
* Returns zero on success or an error code
|
* Returns zero on success or an error code
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
|
static int ide_pci_enable(struct pci_dev *dev, int bars,
|
||||||
|
const struct ide_port_info *d)
|
||||||
{
|
{
|
||||||
int ret, bars;
|
int ret;
|
||||||
|
|
||||||
if (pci_enable_device(dev)) {
|
if (pci_enable_device(dev)) {
|
||||||
ret = pci_enable_device_io(dev);
|
ret = pci_enable_device_io(dev);
|
||||||
@@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (d->host_flags & IDE_HFLAG_SINGLE)
|
|
||||||
bars = (1 << 2) - 1;
|
|
||||||
else
|
|
||||||
bars = (1 << 4) - 1;
|
|
||||||
|
|
||||||
if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
|
|
||||||
if (d->host_flags & IDE_HFLAG_CS5520)
|
|
||||||
bars |= (1 << 2);
|
|
||||||
else
|
|
||||||
bars |= (1 << 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = pci_request_selected_regions(dev, bars, d->name);
|
ret = pci_request_selected_regions(dev, bars, d->name);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
printk(KERN_ERR "%s %s: can't reserve resources\n",
|
printk(KERN_ERR "%s %s: can't reserve resources\n",
|
||||||
@@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
|
|||||||
/**
|
/**
|
||||||
* ide_setup_pci_controller - set up IDE PCI
|
* ide_setup_pci_controller - set up IDE PCI
|
||||||
* @dev: PCI device
|
* @dev: PCI device
|
||||||
|
* @bars: PCI BARs mask
|
||||||
* @d: IDE port info
|
* @d: IDE port info
|
||||||
* @noisy: verbose flag
|
* @noisy: verbose flag
|
||||||
*
|
*
|
||||||
@@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
|
|||||||
* and enables it if need be
|
* and enables it if need be
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int ide_setup_pci_controller(struct pci_dev *dev,
|
static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
|
||||||
const struct ide_port_info *d, int noisy)
|
const struct ide_port_info *d, int noisy)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
|
|||||||
if (noisy)
|
if (noisy)
|
||||||
ide_setup_pci_noise(dev, d);
|
ide_setup_pci_noise(dev, d);
|
||||||
|
|
||||||
ret = ide_pci_enable(dev, d);
|
ret = ide_pci_enable(dev, bars, d);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
|
|||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
printk(KERN_ERR "%s %s: error accessing PCI regs\n",
|
printk(KERN_ERR "%s %s: error accessing PCI regs\n",
|
||||||
d->name, pci_name(dev));
|
d->name, pci_name(dev));
|
||||||
goto out;
|
goto out_free_bars;
|
||||||
}
|
}
|
||||||
if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
|
if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
|
||||||
ret = ide_pci_configure(dev, d);
|
ret = ide_pci_configure(dev, d);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out_free_bars;
|
||||||
printk(KERN_INFO "%s %s: device enabled (Linux)\n",
|
printk(KERN_INFO "%s %s: device enabled (Linux)\n",
|
||||||
d->name, pci_name(dev));
|
d->name, pci_name(dev));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
out_free_bars:
|
||||||
|
pci_release_selected_regions(dev, bars);
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
|
|||||||
{
|
{
|
||||||
struct pci_dev *pdev[] = { dev1, dev2 };
|
struct pci_dev *pdev[] = { dev1, dev2 };
|
||||||
struct ide_host *host;
|
struct ide_host *host;
|
||||||
int ret, i, n_ports = dev2 ? 4 : 2;
|
int ret, i, n_ports = dev2 ? 4 : 2, bars;
|
||||||
struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
|
struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
|
||||||
|
|
||||||
|
if (d->host_flags & IDE_HFLAG_SINGLE)
|
||||||
|
bars = (1 << 2) - 1;
|
||||||
|
else
|
||||||
|
bars = (1 << 4) - 1;
|
||||||
|
|
||||||
|
if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
|
||||||
|
if (d->host_flags & IDE_HFLAG_CS5520)
|
||||||
|
bars |= (1 << 2);
|
||||||
|
else
|
||||||
|
bars |= (1 << 4);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < n_ports / 2; i++) {
|
for (i = 0; i < n_ports / 2; i++) {
|
||||||
ret = ide_setup_pci_controller(pdev[i], d, !i);
|
ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
|
||||||
if (ret < 0)
|
if (ret < 0) {
|
||||||
|
if (i == 1)
|
||||||
|
pci_release_selected_regions(pdev[0], bars);
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
|
ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
|
||||||
}
|
}
|
||||||
@@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
|
|||||||
host = ide_host_alloc(d, hws, n_ports);
|
host = ide_host_alloc(d, hws, n_ports);
|
||||||
if (host == NULL) {
|
if (host == NULL) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto out;
|
goto out_free_bars;
|
||||||
}
|
}
|
||||||
|
|
||||||
host->dev[0] = &dev1->dev;
|
host->dev[0] = &dev1->dev;
|
||||||
@@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
|
|||||||
* do_ide_setup_pci_device() on the first device!
|
* do_ide_setup_pci_device() on the first device!
|
||||||
*/
|
*/
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out_free_bars;
|
||||||
|
|
||||||
/* fixup IRQ */
|
/* fixup IRQ */
|
||||||
if (ide_pci_is_in_compatibility_mode(pdev[i])) {
|
if (ide_pci_is_in_compatibility_mode(pdev[i])) {
|
||||||
@@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
|
|||||||
ret = ide_host_register(host, d, hws);
|
ret = ide_host_register(host, d, hws);
|
||||||
if (ret)
|
if (ret)
|
||||||
ide_host_free(host);
|
ide_host_free(host);
|
||||||
|
else
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
out_free_bars:
|
||||||
|
i = n_ports / 2;
|
||||||
|
while (i--)
|
||||||
|
pci_release_selected_regions(pdev[i], bars);
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto pid_query_error;
|
goto pid_query_error;
|
||||||
|
|
||||||
|
nlmsg_end(skb, nlh);
|
||||||
|
|
||||||
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
|
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
|
||||||
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
|
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
|
||||||
|
|
||||||
@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
|
|||||||
&pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
|
&pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto add_mapping_error;
|
goto add_mapping_error;
|
||||||
|
|
||||||
|
nlmsg_end(skb, nlh);
|
||||||
nlmsg_request->req_buffer = pm_msg;
|
nlmsg_request->req_buffer = pm_msg;
|
||||||
|
|
||||||
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
|
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
|
||||||
@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
|
|||||||
&pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
|
&pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto query_mapping_error;
|
goto query_mapping_error;
|
||||||
|
|
||||||
|
nlmsg_end(skb, nlh);
|
||||||
nlmsg_request->req_buffer = pm_msg;
|
nlmsg_request->req_buffer = pm_msg;
|
||||||
|
|
||||||
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
|
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
|
||||||
@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto remove_mapping_error;
|
goto remove_mapping_error;
|
||||||
|
|
||||||
|
nlmsg_end(skb, nlh);
|
||||||
|
|
||||||
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
|
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
skb = NULL; /* skb is freed in the netlink send-op handling */
|
skb = NULL; /* skb is freed in the netlink send-op handling */
|
||||||
|
@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
|
|||||||
&mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
|
&mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto mapinfo_num_error;
|
goto mapinfo_num_error;
|
||||||
|
|
||||||
|
nlmsg_end(skb, nlh);
|
||||||
|
|
||||||
ret = rdma_nl_unicast(skb, iwpm_pid);
|
ret = rdma_nl_unicast(skb, iwpm_pid);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
skb = NULL;
|
skb = NULL;
|
||||||
@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto send_mapping_info_unlock;
|
goto send_mapping_info_unlock;
|
||||||
|
|
||||||
|
nlmsg_end(skb, nlh);
|
||||||
|
|
||||||
iwpm_print_sockaddr(&map_info->local_sockaddr,
|
iwpm_print_sockaddr(&map_info->local_sockaddr,
|
||||||
"send_mapping_info: Local sockaddr:");
|
"send_mapping_info: Local sockaddr:");
|
||||||
iwpm_print_sockaddr(&map_info->mapped_sockaddr,
|
iwpm_print_sockaddr(&map_info->mapped_sockaddr,
|
||||||
|
@@ -48,7 +48,7 @@
|
|||||||
* @wqe: cqp wqe for header
|
* @wqe: cqp wqe for header
|
||||||
* @header: header for the cqp wqe
|
* @header: header for the cqp wqe
|
||||||
*/
|
*/
|
||||||
static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
|
void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
|
||||||
{
|
{
|
||||||
wmb(); /* make sure WQE is populated before polarity is set */
|
wmb(); /* make sure WQE is populated before polarity is set */
|
||||||
set_64bit_val(wqe, 24, header);
|
set_64bit_val(wqe, 24, header);
|
||||||
|
@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
|
|||||||
struct i40iw_fast_reg_stag_info *info,
|
struct i40iw_fast_reg_stag_info *info,
|
||||||
bool post_sq);
|
bool post_sq);
|
||||||
|
|
||||||
|
void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
|
||||||
|
|
||||||
/* HMC/FPM functions */
|
/* HMC/FPM functions */
|
||||||
enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
|
enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
|
||||||
u8 hmc_fn_id);
|
u8 hmc_fn_id);
|
||||||
|
@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
|
|||||||
get_64bit_val(wqe, 24, &offset24);
|
get_64bit_val(wqe, 24, &offset24);
|
||||||
|
|
||||||
offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
|
offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
|
||||||
set_64bit_val(wqe, 24, offset24);
|
|
||||||
|
|
||||||
set_64bit_val(wqe, 0, buf->mem.pa);
|
set_64bit_val(wqe, 0, buf->mem.pa);
|
||||||
set_64bit_val(wqe, 8,
|
set_64bit_val(wqe, 8,
|
||||||
LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
|
LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
|
||||||
set_64bit_val(wqe, 24, offset24);
|
i40iw_insert_wqe_hdr(wqe, offset24);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
|
|||||||
set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
|
set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
|
||||||
set_64bit_val(wqe, 16, header[0]);
|
set_64bit_val(wqe, 16, header[0]);
|
||||||
|
|
||||||
/* Ensure all data is written before writing valid bit */
|
i40iw_insert_wqe_hdr(wqe, header[1]);
|
||||||
wmb();
|
|
||||||
set_64bit_val(wqe, 24, header[1]);
|
|
||||||
|
|
||||||
i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
|
i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
|
||||||
i40iw_qp_post_wr(&qp->qp_uk);
|
i40iw_qp_post_wr(&qp->qp_uk);
|
||||||
@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct
|
|||||||
LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
|
LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
|
||||||
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
|
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
|
||||||
|
|
||||||
set_64bit_val(wqe, 24, header);
|
i40iw_insert_wqe_hdr(wqe, header);
|
||||||
|
|
||||||
i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
|
i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
|
||||||
i40iw_sc_cqp_post_sq(cqp);
|
i40iw_sc_cqp_post_sq(cqp);
|
||||||
@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct
|
|||||||
LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
|
LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
|
||||||
LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
|
LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
|
||||||
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
|
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
|
||||||
set_64bit_val(wqe, 24, header);
|
i40iw_insert_wqe_hdr(wqe, header);
|
||||||
|
|
||||||
i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
|
i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
|
||||||
wqe, I40IW_CQP_WQE_SIZE * 8);
|
wqe, I40IW_CQP_WQE_SIZE * 8);
|
||||||
|
@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
|
|||||||
attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
|
attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
|
||||||
attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
|
attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
|
||||||
attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
|
attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
|
||||||
|
attr->port_num = 1;
|
||||||
init_attr->event_handler = iwqp->ibqp.event_handler;
|
init_attr->event_handler = iwqp->ibqp.event_handler;
|
||||||
init_attr->qp_context = iwqp->ibqp.qp_context;
|
init_attr->qp_context = iwqp->ibqp.qp_context;
|
||||||
init_attr->send_cq = iwqp->ibqp.send_cq;
|
init_attr->send_cq = iwqp->ibqp.send_cq;
|
||||||
init_attr->recv_cq = iwqp->ibqp.recv_cq;
|
init_attr->recv_cq = iwqp->ibqp.recv_cq;
|
||||||
init_attr->srq = iwqp->ibqp.srq;
|
init_attr->srq = iwqp->ibqp.srq;
|
||||||
init_attr->cap = attr->cap;
|
init_attr->cap = attr->cap;
|
||||||
|
init_attr->port_num = 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -4174,9 +4174,9 @@ err_bfreg:
|
|||||||
err_uar_page:
|
err_uar_page:
|
||||||
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
|
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
|
||||||
|
|
||||||
err_cnt:
|
|
||||||
mlx5_ib_cleanup_cong_debugfs(dev);
|
|
||||||
err_cong:
|
err_cong:
|
||||||
|
mlx5_ib_cleanup_cong_debugfs(dev);
|
||||||
|
err_cnt:
|
||||||
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
|
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
|
||||||
mlx5_ib_dealloc_counters(dev);
|
mlx5_ib_dealloc_counters(dev);
|
||||||
|
|
||||||
|
@@ -387,7 +387,7 @@ struct qedr_qp {
|
|||||||
u8 wqe_size;
|
u8 wqe_size;
|
||||||
|
|
||||||
u8 smac[ETH_ALEN];
|
u8 smac[ETH_ALEN];
|
||||||
u16 vlan_id;
|
u16 vlan;
|
||||||
int rc;
|
int rc;
|
||||||
} *rqe_wr_id;
|
} *rqe_wr_id;
|
||||||
|
|
||||||
|
@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt,
|
|||||||
|
|
||||||
qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
|
qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
|
||||||
-EINVAL : 0;
|
-EINVAL : 0;
|
||||||
qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
|
qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
|
||||||
/* note: length stands for data length i.e. GRH is excluded */
|
/* note: length stands for data length i.e. GRH is excluded */
|
||||||
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
|
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
|
||||||
data->length.data_length;
|
data->length.data_length;
|
||||||
@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
|||||||
struct qedr_cq *cq = get_qedr_cq(ibcq);
|
struct qedr_cq *cq = get_qedr_cq(ibcq);
|
||||||
struct qedr_qp *qp = dev->gsi_qp;
|
struct qedr_qp *qp = dev->gsi_qp;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
u16 vlan_id;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||||
@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
|
|||||||
wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
|
wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
|
||||||
ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
|
ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
|
||||||
wc[i].wc_flags |= IB_WC_WITH_SMAC;
|
wc[i].wc_flags |= IB_WC_WITH_SMAC;
|
||||||
if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
|
|
||||||
|
vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
|
||||||
|
VLAN_VID_MASK;
|
||||||
|
if (vlan_id) {
|
||||||
wc[i].wc_flags |= IB_WC_WITH_VLAN;
|
wc[i].wc_flags |= IB_WC_WITH_VLAN;
|
||||||
wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
|
wc[i].vlan_id = vlan_id;
|
||||||
|
wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
|
||||||
|
VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
qedr_inc_sw_cons(&qp->rq);
|
qedr_inc_sw_cons(&qp->rq);
|
||||||
|
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
|
|||||||
void __closure_wake_up(struct closure_waitlist *wait_list)
|
void __closure_wake_up(struct closure_waitlist *wait_list)
|
||||||
{
|
{
|
||||||
struct llist_node *list;
|
struct llist_node *list;
|
||||||
struct closure *cl;
|
struct closure *cl, *t;
|
||||||
struct llist_node *reverse = NULL;
|
struct llist_node *reverse = NULL;
|
||||||
|
|
||||||
list = llist_del_all(&wait_list->list);
|
list = llist_del_all(&wait_list->list);
|
||||||
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
|
|||||||
reverse = llist_reverse_order(list);
|
reverse = llist_reverse_order(list);
|
||||||
|
|
||||||
/* Then do the wakeups */
|
/* Then do the wakeups */
|
||||||
llist_for_each_entry(cl, reverse, list) {
|
llist_for_each_entry_safe(cl, t, reverse, list) {
|
||||||
closure_set_waiting(cl, 0);
|
closure_set_waiting(cl, 0);
|
||||||
closure_sub(cl, CLOSURE_WAITING + 1);
|
closure_sub(cl, CLOSURE_WAITING + 1);
|
||||||
}
|
}
|
||||||
|
@@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
|
|||||||
|
|
||||||
down_read(&mm->mmap_sem);
|
down_read(&mm->mmap_sem);
|
||||||
|
|
||||||
for (dar = addr; dar < addr + size; dar += page_size) {
|
vma = find_vma(mm, addr);
|
||||||
if (!vma || dar < vma->vm_start || dar > vma->vm_end) {
|
if (!vma) {
|
||||||
|
pr_err("Can't find vma for addr %016llx\n", addr);
|
||||||
|
rc = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
/* get the size of the pages allocated */
|
||||||
|
page_size = vma_kernel_pagesize(vma);
|
||||||
|
|
||||||
|
for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
|
||||||
|
if (dar < vma->vm_start || dar >= vma->vm_end) {
|
||||||
vma = find_vma(mm, addr);
|
vma = find_vma(mm, addr);
|
||||||
if (!vma) {
|
if (!vma) {
|
||||||
pr_err("Can't find vma for addr %016llx\n", addr);
|
pr_err("Can't find vma for addr %016llx\n", addr);
|
||||||
|
@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
|
|||||||
}
|
}
|
||||||
|
|
||||||
mqrq->areq.mrq = &brq->mrq;
|
mqrq->areq.mrq = &brq->mrq;
|
||||||
|
|
||||||
mmc_queue_bounce_pre(mqrq);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
|
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
|
||||||
@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
|
|||||||
brq = &mq_rq->brq;
|
brq = &mq_rq->brq;
|
||||||
old_req = mmc_queue_req_to_req(mq_rq);
|
old_req = mmc_queue_req_to_req(mq_rq);
|
||||||
type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
|
type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
|
||||||
mmc_queue_bounce_post(mq_rq);
|
|
||||||
|
|
||||||
switch (status) {
|
switch (status) {
|
||||||
case MMC_BLK_SUCCESS:
|
case MMC_BLK_SUCCESS:
|
||||||
|
@@ -1286,6 +1286,23 @@ out_err:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mmc_select_driver_type(struct mmc_card *card)
|
||||||
|
{
|
||||||
|
int card_drv_type, drive_strength, drv_type;
|
||||||
|
|
||||||
|
card_drv_type = card->ext_csd.raw_driver_strength |
|
||||||
|
mmc_driver_type_mask(0);
|
||||||
|
|
||||||
|
drive_strength = mmc_select_drive_strength(card,
|
||||||
|
card->ext_csd.hs200_max_dtr,
|
||||||
|
card_drv_type, &drv_type);
|
||||||
|
|
||||||
|
card->drive_strength = drive_strength;
|
||||||
|
|
||||||
|
if (drv_type)
|
||||||
|
mmc_set_driver_type(card->host, drv_type);
|
||||||
|
}
|
||||||
|
|
||||||
static int mmc_select_hs400es(struct mmc_card *card)
|
static int mmc_select_hs400es(struct mmc_card *card)
|
||||||
{
|
{
|
||||||
struct mmc_host *host = card->host;
|
struct mmc_host *host = card->host;
|
||||||
@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
|
|||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mmc_select_driver_type(card);
|
||||||
|
|
||||||
/* Switch card to HS400 */
|
/* Switch card to HS400 */
|
||||||
val = EXT_CSD_TIMING_HS400 |
|
val = EXT_CSD_TIMING_HS400 |
|
||||||
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
|
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
|
||||||
@@ -1374,23 +1393,6 @@ out_err:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmc_select_driver_type(struct mmc_card *card)
|
|
||||||
{
|
|
||||||
int card_drv_type, drive_strength, drv_type;
|
|
||||||
|
|
||||||
card_drv_type = card->ext_csd.raw_driver_strength |
|
|
||||||
mmc_driver_type_mask(0);
|
|
||||||
|
|
||||||
drive_strength = mmc_select_drive_strength(card,
|
|
||||||
card->ext_csd.hs200_max_dtr,
|
|
||||||
card_drv_type, &drv_type);
|
|
||||||
|
|
||||||
card->drive_strength = drive_strength;
|
|
||||||
|
|
||||||
if (drv_type)
|
|
||||||
mmc_set_driver_type(card->host, drv_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For device supporting HS200 mode, the following sequence
|
* For device supporting HS200 mode, the following sequence
|
||||||
* should be done before executing the tuning process.
|
* should be done before executing the tuning process.
|
||||||
|
@@ -23,8 +23,6 @@
|
|||||||
#include "core.h"
|
#include "core.h"
|
||||||
#include "card.h"
|
#include "card.h"
|
||||||
|
|
||||||
#define MMC_QUEUE_BOUNCESZ 65536
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prepare a MMC request. This just filters out odd stuff.
|
* Prepare a MMC request. This just filters out odd stuff.
|
||||||
*/
|
*/
|
||||||
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
|
|||||||
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
|
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
|
|
||||||
{
|
|
||||||
unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
|
|
||||||
|
|
||||||
if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (bouncesz > host->max_req_size)
|
|
||||||
bouncesz = host->max_req_size;
|
|
||||||
if (bouncesz > host->max_seg_size)
|
|
||||||
bouncesz = host->max_seg_size;
|
|
||||||
if (bouncesz > host->max_blk_count * 512)
|
|
||||||
bouncesz = host->max_blk_count * 512;
|
|
||||||
|
|
||||||
if (bouncesz <= 512)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return bouncesz;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mmc_init_request() - initialize the MMC-specific per-request data
|
* mmc_init_request() - initialize the MMC-specific per-request data
|
||||||
* @q: the request queue
|
* @q: the request queue
|
||||||
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
|
|||||||
struct mmc_card *card = mq->card;
|
struct mmc_card *card = mq->card;
|
||||||
struct mmc_host *host = card->host;
|
struct mmc_host *host = card->host;
|
||||||
|
|
||||||
if (card->bouncesz) {
|
|
||||||
mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
|
|
||||||
if (!mq_rq->bounce_buf)
|
|
||||||
return -ENOMEM;
|
|
||||||
if (card->bouncesz > 512) {
|
|
||||||
mq_rq->sg = mmc_alloc_sg(1, gfp);
|
|
||||||
if (!mq_rq->sg)
|
|
||||||
return -ENOMEM;
|
|
||||||
mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
|
|
||||||
gfp);
|
|
||||||
if (!mq_rq->bounce_sg)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mq_rq->bounce_buf = NULL;
|
|
||||||
mq_rq->bounce_sg = NULL;
|
|
||||||
mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
|
mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
|
||||||
if (!mq_rq->sg)
|
if (!mq_rq->sg)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
|
|||||||
{
|
{
|
||||||
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
|
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
|
||||||
|
|
||||||
/* It is OK to kfree(NULL) so this will be smooth */
|
|
||||||
kfree(mq_rq->bounce_sg);
|
|
||||||
mq_rq->bounce_sg = NULL;
|
|
||||||
|
|
||||||
kfree(mq_rq->bounce_buf);
|
|
||||||
mq_rq->bounce_buf = NULL;
|
|
||||||
|
|
||||||
kfree(mq_rq->sg);
|
kfree(mq_rq->sg);
|
||||||
mq_rq->sg = NULL;
|
mq_rq->sg = NULL;
|
||||||
}
|
}
|
||||||
@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|||||||
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
|
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
|
||||||
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
|
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
|
||||||
|
|
||||||
/*
|
|
||||||
* mmc_init_request() depends on card->bouncesz so it must be calculated
|
|
||||||
* before blk_init_allocated_queue() starts allocating requests.
|
|
||||||
*/
|
|
||||||
card->bouncesz = mmc_queue_calc_bouncesz(host);
|
|
||||||
|
|
||||||
mq->card = card;
|
mq->card = card;
|
||||||
mq->queue = blk_alloc_queue(GFP_KERNEL);
|
mq->queue = blk_alloc_queue(GFP_KERNEL);
|
||||||
if (!mq->queue)
|
if (!mq->queue)
|
||||||
@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
|
|||||||
if (mmc_can_erase(card))
|
if (mmc_can_erase(card))
|
||||||
mmc_queue_setup_discard(mq->queue, card);
|
mmc_queue_setup_discard(mq->queue, card);
|
||||||
|
|
||||||
if (card->bouncesz) {
|
|
||||||
blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
|
|
||||||
blk_queue_max_segments(mq->queue, card->bouncesz / 512);
|
|
||||||
blk_queue_max_segment_size(mq->queue, card->bouncesz);
|
|
||||||
} else {
|
|
||||||
blk_queue_bounce_limit(mq->queue, limit);
|
blk_queue_bounce_limit(mq->queue, limit);
|
||||||
blk_queue_max_hw_sectors(mq->queue,
|
blk_queue_max_hw_sectors(mq->queue,
|
||||||
min(host->max_blk_count, host->max_req_size / 512));
|
min(host->max_blk_count, host->max_req_size / 512));
|
||||||
blk_queue_max_segments(mq->queue, host->max_segs);
|
blk_queue_max_segments(mq->queue, host->max_segs);
|
||||||
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
|
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
|
||||||
}
|
|
||||||
|
|
||||||
sema_init(&mq->thread_sem, 1);
|
sema_init(&mq->thread_sem, 1);
|
||||||
|
|
||||||
@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
|
|||||||
*/
|
*/
|
||||||
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
|
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
|
||||||
{
|
{
|
||||||
unsigned int sg_len;
|
|
||||||
size_t buflen;
|
|
||||||
struct scatterlist *sg;
|
|
||||||
struct request *req = mmc_queue_req_to_req(mqrq);
|
struct request *req = mmc_queue_req_to_req(mqrq);
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!mqrq->bounce_buf)
|
|
||||||
return blk_rq_map_sg(mq->queue, req, mqrq->sg);
|
return blk_rq_map_sg(mq->queue, req, mqrq->sg);
|
||||||
|
|
||||||
sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
|
|
||||||
|
|
||||||
mqrq->bounce_sg_len = sg_len;
|
|
||||||
|
|
||||||
buflen = 0;
|
|
||||||
for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
|
|
||||||
buflen += sg->length;
|
|
||||||
|
|
||||||
sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If writing, bounce the data to the buffer before the request
|
|
||||||
* is sent to the host driver
|
|
||||||
*/
|
|
||||||
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
|
|
||||||
{
|
|
||||||
if (!mqrq->bounce_buf)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
|
|
||||||
return;
|
|
||||||
|
|
||||||
sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
|
|
||||||
mqrq->bounce_buf, mqrq->sg[0].length);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If reading, bounce the data from the buffer after the request
|
|
||||||
* has been handled by the host driver
|
|
||||||
*/
|
|
||||||
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
|
|
||||||
{
|
|
||||||
if (!mqrq->bounce_buf)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
|
|
||||||
return;
|
|
||||||
|
|
||||||
sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
|
|
||||||
mqrq->bounce_buf, mqrq->sg[0].length);
|
|
||||||
}
|
}
|
||||||
|
@@ -49,9 +49,6 @@ enum mmc_drv_op {
|
|||||||
struct mmc_queue_req {
|
struct mmc_queue_req {
|
||||||
struct mmc_blk_request brq;
|
struct mmc_blk_request brq;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
char *bounce_buf;
|
|
||||||
struct scatterlist *bounce_sg;
|
|
||||||
unsigned int bounce_sg_len;
|
|
||||||
struct mmc_async_req areq;
|
struct mmc_async_req areq;
|
||||||
enum mmc_drv_op drv_op;
|
enum mmc_drv_op drv_op;
|
||||||
int drv_op_result;
|
int drv_op_result;
|
||||||
@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
|
|||||||
extern void mmc_cleanup_queue(struct mmc_queue *);
|
extern void mmc_cleanup_queue(struct mmc_queue *);
|
||||||
extern void mmc_queue_suspend(struct mmc_queue *);
|
extern void mmc_queue_suspend(struct mmc_queue *);
|
||||||
extern void mmc_queue_resume(struct mmc_queue *);
|
extern void mmc_queue_resume(struct mmc_queue *);
|
||||||
|
|
||||||
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
|
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
|
||||||
struct mmc_queue_req *);
|
struct mmc_queue_req *);
|
||||||
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
|
|
||||||
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
|
|
||||||
|
|
||||||
extern int mmc_access_rpmb(struct mmc_queue *);
|
extern int mmc_access_rpmb(struct mmc_queue *);
|
||||||
|
|
||||||
|
@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
|
|||||||
*/
|
*/
|
||||||
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
|
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
|
||||||
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
|
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
|
||||||
MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF;
|
MMC_CAP_3_3V_DDR;
|
||||||
|
|
||||||
if (host->use_sg)
|
if (host->use_sg)
|
||||||
mmc->max_segs = 16;
|
mmc->max_segs = 16;
|
||||||
|
@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
|
|||||||
div->shift = __ffs(CLK_DIV_MASK);
|
div->shift = __ffs(CLK_DIV_MASK);
|
||||||
div->width = __builtin_popcountl(CLK_DIV_MASK);
|
div->width = __builtin_popcountl(CLK_DIV_MASK);
|
||||||
div->hw.init = &init;
|
div->hw.init = &init;
|
||||||
div->flags = (CLK_DIVIDER_ONE_BASED |
|
div->flags = CLK_DIVIDER_ONE_BASED;
|
||||||
CLK_DIVIDER_ROUND_CLOSEST);
|
|
||||||
|
|
||||||
clk = devm_clk_register(host->dev, &div->hw);
|
clk = devm_clk_register(host->dev, &div->hw);
|
||||||
if (WARN_ON(IS_ERR(clk)))
|
if (WARN_ON(IS_ERR(clk)))
|
||||||
@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
|
|||||||
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||||
{
|
{
|
||||||
struct meson_host *host = mmc_priv(mmc);
|
struct meson_host *host = mmc_priv(mmc);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this is the initial tuning, try to get a sane Rx starting
|
||||||
|
* phase before doing the actual tuning.
|
||||||
|
*/
|
||||||
|
if (!mmc->doing_retune) {
|
||||||
|
ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
|
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
|
||||||
}
|
}
|
||||||
@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||||||
case MMC_POWER_UP:
|
case MMC_POWER_UP:
|
||||||
if (!IS_ERR(mmc->supply.vmmc))
|
if (!IS_ERR(mmc->supply.vmmc))
|
||||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
||||||
|
|
||||||
|
/* Reset phases */
|
||||||
|
clk_set_phase(host->rx_clk, 0);
|
||||||
|
clk_set_phase(host->tx_clk, 270);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case MMC_POWER_ON:
|
case MMC_POWER_ON:
|
||||||
@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||||||
host->vqmmc_enabled = true;
|
host->vqmmc_enabled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Reset rx phase */
|
|
||||||
clk_set_phase(host->rx_clk, 0);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
pxamci_init_ocr(host);
|
pxamci_init_ocr(host);
|
||||||
|
|
||||||
/*
|
mmc->caps = 0;
|
||||||
* This architecture used to disable bounce buffers through its
|
|
||||||
* defconfig, now it is done at runtime as a host property.
|
|
||||||
*/
|
|
||||||
mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
|
|
||||||
host->cmdat = 0;
|
host->cmdat = 0;
|
||||||
if (!cpu_is_pxa25x()) {
|
if (!cpu_is_pxa25x()) {
|
||||||
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
|
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
|
||||||
|
@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev)
|
|||||||
{
|
{
|
||||||
struct sdhci_pltfm_host *pltfm_host;
|
struct sdhci_pltfm_host *pltfm_host;
|
||||||
struct sdhci_host *host;
|
struct sdhci_host *host;
|
||||||
|
struct xenon_priv *priv;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
|
host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
|
||||||
@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev)
|
|||||||
return PTR_ERR(host);
|
return PTR_ERR(host);
|
||||||
|
|
||||||
pltfm_host = sdhci_priv(host);
|
pltfm_host = sdhci_priv(host);
|
||||||
|
priv = sdhci_pltfm_priv(pltfm_host);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Link Xenon specific mmc_host_ops function,
|
* Link Xenon specific mmc_host_ops function,
|
||||||
@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev)
|
|||||||
if (err)
|
if (err)
|
||||||
goto free_pltfm;
|
goto free_pltfm;
|
||||||
|
|
||||||
err = mmc_of_parse(host->mmc);
|
priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
|
||||||
|
if (IS_ERR(priv->axi_clk)) {
|
||||||
|
err = PTR_ERR(priv->axi_clk);
|
||||||
|
if (err == -EPROBE_DEFER)
|
||||||
|
goto err_clk;
|
||||||
|
} else {
|
||||||
|
err = clk_prepare_enable(priv->axi_clk);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_clk;
|
goto err_clk;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = mmc_of_parse(host->mmc);
|
||||||
|
if (err)
|
||||||
|
goto err_clk_axi;
|
||||||
|
|
||||||
sdhci_get_of_property(pdev);
|
sdhci_get_of_property(pdev);
|
||||||
|
|
||||||
@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev)
|
|||||||
/* Xenon specific dt parse */
|
/* Xenon specific dt parse */
|
||||||
err = xenon_probe_dt(pdev);
|
err = xenon_probe_dt(pdev);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_clk;
|
goto err_clk_axi;
|
||||||
|
|
||||||
err = xenon_sdhc_prepare(host);
|
err = xenon_sdhc_prepare(host);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_clk;
|
goto err_clk_axi;
|
||||||
|
|
||||||
pm_runtime_get_noresume(&pdev->dev);
|
pm_runtime_get_noresume(&pdev->dev);
|
||||||
pm_runtime_set_active(&pdev->dev);
|
pm_runtime_set_active(&pdev->dev);
|
||||||
@@ -527,6 +540,8 @@ remove_sdhc:
|
|||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
pm_runtime_put_noidle(&pdev->dev);
|
pm_runtime_put_noidle(&pdev->dev);
|
||||||
xenon_sdhc_unprepare(host);
|
xenon_sdhc_unprepare(host);
|
||||||
|
err_clk_axi:
|
||||||
|
clk_disable_unprepare(priv->axi_clk);
|
||||||
err_clk:
|
err_clk:
|
||||||
clk_disable_unprepare(pltfm_host->clk);
|
clk_disable_unprepare(pltfm_host->clk);
|
||||||
free_pltfm:
|
free_pltfm:
|
||||||
@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev)
|
|||||||
{
|
{
|
||||||
struct sdhci_host *host = platform_get_drvdata(pdev);
|
struct sdhci_host *host = platform_get_drvdata(pdev);
|
||||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||||
|
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
|
||||||
|
|
||||||
pm_runtime_get_sync(&pdev->dev);
|
pm_runtime_get_sync(&pdev->dev);
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev)
|
|||||||
sdhci_remove_host(host, 0);
|
sdhci_remove_host(host, 0);
|
||||||
|
|
||||||
xenon_sdhc_unprepare(host);
|
xenon_sdhc_unprepare(host);
|
||||||
|
clk_disable_unprepare(priv->axi_clk);
|
||||||
clk_disable_unprepare(pltfm_host->clk);
|
clk_disable_unprepare(pltfm_host->clk);
|
||||||
|
|
||||||
sdhci_pltfm_free(pdev);
|
sdhci_pltfm_free(pdev);
|
||||||
|
@@ -83,6 +83,7 @@ struct xenon_priv {
|
|||||||
unsigned char bus_width;
|
unsigned char bus_width;
|
||||||
unsigned char timing;
|
unsigned char timing;
|
||||||
unsigned int clock;
|
unsigned int clock;
|
||||||
|
struct clk *axi_clk;
|
||||||
|
|
||||||
int phy_type;
|
int phy_type;
|
||||||
/*
|
/*
|
||||||
|
@@ -566,8 +566,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
|
|||||||
return true;
|
return true;
|
||||||
default:
|
default:
|
||||||
bpf_warn_invalid_xdp_action(action);
|
bpf_warn_invalid_xdp_action(action);
|
||||||
|
/* fall through */
|
||||||
case XDP_ABORTED:
|
case XDP_ABORTED:
|
||||||
trace_xdp_exception(nic->netdev, prog, action);
|
trace_xdp_exception(nic->netdev, prog, action);
|
||||||
|
/* fall through */
|
||||||
case XDP_DROP:
|
case XDP_DROP:
|
||||||
/* Check if it's a recycled page, if not
|
/* Check if it's a recycled page, if not
|
||||||
* unmap the DMA mapping.
|
* unmap the DMA mapping.
|
||||||
|
@@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
|
|||||||
**/
|
**/
|
||||||
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
|
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_SPARC
|
|
||||||
u32 regval;
|
|
||||||
u32 i;
|
|
||||||
#endif
|
|
||||||
s32 ret_val;
|
s32 ret_val;
|
||||||
|
|
||||||
ret_val = ixgbe_start_hw_generic(hw);
|
ret_val = ixgbe_start_hw_generic(hw);
|
||||||
|
|
||||||
#ifndef CONFIG_SPARC
|
|
||||||
/* Disable relaxed ordering */
|
|
||||||
for (i = 0; ((i < hw->mac.max_tx_queues) &&
|
|
||||||
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
|
|
||||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
|
|
||||||
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; ((i < hw->mac.max_rx_queues) &&
|
|
||||||
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
|
|
||||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
|
|
||||||
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
|
|
||||||
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
if (ret_val)
|
if (ret_val)
|
||||||
return ret_val;
|
return ret_val;
|
||||||
|
|
||||||
|
@@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
|
|||||||
}
|
}
|
||||||
IXGBE_WRITE_FLUSH(hw);
|
IXGBE_WRITE_FLUSH(hw);
|
||||||
|
|
||||||
#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
|
|
||||||
/* Disable relaxed ordering */
|
|
||||||
for (i = 0; i < hw->mac.max_tx_queues; i++) {
|
|
||||||
u32 regval;
|
|
||||||
|
|
||||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
|
|
||||||
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < hw->mac.max_rx_queues; i++) {
|
|
||||||
u32 regval;
|
|
||||||
|
|
||||||
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
|
|
||||||
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
|
|
||||||
IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
|
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1049,7 +1049,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||||||
{
|
{
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||||
struct ixgbe_ring *temp_ring;
|
struct ixgbe_ring *temp_ring;
|
||||||
int i, err = 0;
|
int i, j, err = 0;
|
||||||
u32 new_rx_count, new_tx_count;
|
u32 new_rx_count, new_tx_count;
|
||||||
|
|
||||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||||
@@ -1086,8 +1086,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* allocate temporary buffer to store rings in */
|
/* allocate temporary buffer to store rings in */
|
||||||
i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
|
i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
|
||||||
i = max_t(int, i, adapter->num_xdp_queues);
|
adapter->num_rx_queues);
|
||||||
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
|
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
|
||||||
|
|
||||||
if (!temp_ring) {
|
if (!temp_ring) {
|
||||||
@@ -1119,8 +1119,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < adapter->num_xdp_queues; i++) {
|
for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
|
||||||
memcpy(&temp_ring[i], adapter->xdp_ring[i],
|
memcpy(&temp_ring[i], adapter->xdp_ring[j],
|
||||||
sizeof(struct ixgbe_ring));
|
sizeof(struct ixgbe_ring));
|
||||||
|
|
||||||
temp_ring[i].count = new_tx_count;
|
temp_ring[i].count = new_tx_count;
|
||||||
@@ -1140,10 +1140,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
|||||||
memcpy(adapter->tx_ring[i], &temp_ring[i],
|
memcpy(adapter->tx_ring[i], &temp_ring[i],
|
||||||
sizeof(struct ixgbe_ring));
|
sizeof(struct ixgbe_ring));
|
||||||
}
|
}
|
||||||
for (i = 0; i < adapter->num_xdp_queues; i++) {
|
for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
|
||||||
ixgbe_free_tx_resources(adapter->xdp_ring[i]);
|
ixgbe_free_tx_resources(adapter->xdp_ring[j]);
|
||||||
|
|
||||||
memcpy(adapter->xdp_ring[i], &temp_ring[i],
|
memcpy(adapter->xdp_ring[j], &temp_ring[i],
|
||||||
sizeof(struct ixgbe_ring));
|
sizeof(struct ixgbe_ring));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -5014,7 +5014,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
|
|||||||
IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
|
IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
|
vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
|
||||||
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
|
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
|
||||||
|
|
||||||
if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
|
if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
|
||||||
@@ -8665,6 +8665,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
|
|||||||
return ixgbe_ptp_set_ts_config(adapter, req);
|
return ixgbe_ptp_set_ts_config(adapter, req);
|
||||||
case SIOCGHWTSTAMP:
|
case SIOCGHWTSTAMP:
|
||||||
return ixgbe_ptp_get_ts_config(adapter, req);
|
return ixgbe_ptp_get_ts_config(adapter, req);
|
||||||
|
case SIOCGMIIPHY:
|
||||||
|
if (!adapter->hw.phy.ops.read_reg)
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
/* fall through */
|
||||||
default:
|
default:
|
||||||
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
|
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
|
||||||
}
|
}
|
||||||
|
@@ -3640,20 +3640,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
|
|||||||
static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
|
static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
|
||||||
struct mlxsw_sp_fib *fib)
|
struct mlxsw_sp_fib *fib)
|
||||||
{
|
{
|
||||||
struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
|
|
||||||
struct mlxsw_sp_lpm_tree *lpm_tree;
|
|
||||||
|
|
||||||
/* Aggregate prefix lengths across all virtual routers to make
|
|
||||||
* sure we only have used prefix lengths in the LPM tree.
|
|
||||||
*/
|
|
||||||
mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
|
|
||||||
lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
|
|
||||||
fib->proto);
|
|
||||||
if (IS_ERR(lpm_tree))
|
|
||||||
goto err_tree_get;
|
|
||||||
mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
|
|
||||||
|
|
||||||
err_tree_get:
|
|
||||||
if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
|
if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
|
||||||
return;
|
return;
|
||||||
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
|
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user