Merge branches 'iommu/fixes', 'arm/rockchip', 'arm/renesas', 'arm/smmu', 'arm/core', 'x86/vt-d', 'x86/amd', 's390' and 'core' into next

This commit is contained in:
495 changed files with 5576 additions and 3400 deletions

View File

@@ -3811,6 +3811,13 @@
expediting. Set to zero to disable automatic expediting. Set to zero to disable automatic
expediting. expediting.
stack_guard_gap= [MM]
override the default stack gap protection. The value
is in page units and it defines how many pages prior
to (for stacks growing down) resp. after (for stacks
growing up) the main stack are reserved for no other
mapping. Default value is 256 pages.
stacktrace [FTRACE] stacktrace [FTRACE]
Enabled the stack tracer on boot up. Enabled the stack tracer on boot up.

View File

@@ -62,10 +62,13 @@ stable kernels.
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
| Cavium | ThunderX SMMUv2 | #27704 | N/A | | Cavium | ThunderX SMMUv2 | #27704 | N/A |
| Cavium | ThunderX2 SMMUv3| #74 | N/A |
| Cavium | ThunderX2 SMMUv3| #126 | N/A |
| | | | | | | | | |
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 | | Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
| | | | | | | | | |
| Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 | | Hisilicon | Hip0{5,6,7} | #161010101 | HISILICON_ERRATUM_161010101 |
| Hisilicon | Hip0{6,7} | #161010701 | N/A |
| | | | | | | | | |
| Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
| Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 |

View File

@@ -22,7 +22,8 @@ Required properties :
- #clock-cells : must contain 1 - #clock-cells : must contain 1
- #reset-cells : must contain 1 - #reset-cells : must contain 1
For the PRCM CCUs on H3/A64, one more clock is needed: For the PRCM CCUs on H3/A64, two more clocks are needed:
- "pll-periph": the SoC's peripheral PLL from the main CCU
- "iosc": the SoC's internal frequency oscillator - "iosc": the SoC's internal frequency oscillator
Example for generic CCU: Example for generic CCU:
@@ -39,8 +40,8 @@ Example for PRCM CCU:
r_ccu: clock@01f01400 { r_ccu: clock@01f01400 {
compatible = "allwinner,sun50i-a64-r-ccu"; compatible = "allwinner,sun50i-a64-r-ccu";
reg = <0x01f01400 0x100>; reg = <0x01f01400 0x100>;
clocks = <&osc24M>, <&osc32k>, <&iosc>; clocks = <&osc24M>, <&osc32k>, <&iosc>, <&ccu CLK_PLL_PERIPH0>;
clock-names = "hosc", "losc", "iosc"; clock-names = "hosc", "losc", "iosc", "pll-periph";
#clock-cells = <1>; #clock-cells = <1>;
#reset-cells = <1>; #reset-cells = <1>;
}; };

View File

@@ -41,9 +41,9 @@ Required properties:
Optional properties: Optional properties:
In order to use the GPIO lines in PWM mode, some additional optional In order to use the GPIO lines in PWM mode, some additional optional
properties are required. Only Armada 370 and XP support these properties. properties are required.
- compatible: Must contain "marvell,armada-370-xp-gpio" - compatible: Must contain "marvell,armada-370-gpio"
- reg: an additional register set is needed, for the GPIO Blink - reg: an additional register set is needed, for the GPIO Blink
Counter on/off registers. Counter on/off registers.
@@ -71,7 +71,7 @@ Example:
}; };
gpio1: gpio@18140 { gpio1: gpio@18140 {
compatible = "marvell,armada-370-xp-gpio"; compatible = "marvell,armada-370-gpio";
reg = <0x18140 0x40>, <0x181c8 0x08>; reg = <0x18140 0x40>, <0x181c8 0x08>;
reg-names = "gpio", "pwm"; reg-names = "gpio", "pwm";
ngpios = <17>; ngpios = <17>;

View File

@@ -26,6 +26,12 @@ the PCIe specification.
* "priq" - PRI Queue not empty * "priq" - PRI Queue not empty
* "cmdq-sync" - CMD_SYNC complete * "cmdq-sync" - CMD_SYNC complete
* "gerror" - Global Error activated * "gerror" - Global Error activated
* "combined" - The combined interrupt is optional,
and should only be provided if the
hardware supports just a single,
combined interrupt line.
If provided, then the combined interrupt
will be used in preference to any others.
- #iommu-cells : See the generic IOMMU binding described in - #iommu-cells : See the generic IOMMU binding described in
devicetree/bindings/pci/pci-iommu.txt devicetree/bindings/pci/pci-iommu.txt
@@ -49,6 +55,12 @@ the PCIe specification.
- hisilicon,broken-prefetch-cmd - hisilicon,broken-prefetch-cmd
: Avoid sending CMD_PREFETCH_* commands to the SMMU. : Avoid sending CMD_PREFETCH_* commands to the SMMU.
- cavium,cn9900-broken-page1-regspace
: Replaces all page 1 offsets used for EVTQ_PROD/CONS,
PRIQ_PROD/CONS register access with page 0 offsets.
Set for Cavium ThunderX2 silicon that doesn't support
SMMU page1 register space.
** Example ** Example
smmu@2b400000 { smmu@2b400000 {

View File

@@ -31,7 +31,7 @@ Example:
compatible = "st,stm32-timers"; compatible = "st,stm32-timers";
reg = <0x40010000 0x400>; reg = <0x40010000 0x400>;
clocks = <&rcc 0 160>; clocks = <&rcc 0 160>;
clock-names = "clk_int"; clock-names = "int";
pwm { pwm {
compatible = "st,stm32-pwm"; compatible = "st,stm32-pwm";

View File

@@ -34,7 +34,7 @@ Required properties:
"brcm,bcm6328-switch" "brcm,bcm6328-switch"
"brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch" "brcm,bcm6368-switch" and the mandatory "brcm,bcm63xx-switch"
See Documentation/devicetree/bindings/dsa/dsa.txt for a list of additional See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
required and optional properties. required and optional properties.
Examples: Examples:

View File

@@ -27,6 +27,7 @@ Optional properties:
of the device. On many systems this is wired high so the device goes of the device. On many systems this is wired high so the device goes
out of reset at power-on, but if it is under program control, this out of reset at power-on, but if it is under program control, this
optional GPIO can wake up in response to it. optional GPIO can wake up in response to it.
- vdd33a-supply, vddvario-supply : 3.3V analog and IO logic power supplies
Examples: Examples:

View File

@@ -122,7 +122,7 @@ associated flow of the packet. The hash is either provided by hardware
or will be computed in the stack. Capable hardware can pass the hash in or will be computed in the stack. Capable hardware can pass the hash in
the receive descriptor for the packet; this would usually be the same the receive descriptor for the packet; this would usually be the same
hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in
skb->rx_hash and can be used elsewhere in the stack as a hash of the skb->hash and can be used elsewhere in the stack as a hash of the
packets flow. packets flow.
Each receive hardware queue has an associated list of CPUs to which Each receive hardware queue has an associated list of CPUs to which

View File

@@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 12 PATCHLEVEL = 12
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc5 EXTRAVERSION = -rc7
NAME = Fearless Coyote NAME = Fearless Coyote
# *DOCUMENTATION* # *DOCUMENTATION*
@@ -1437,7 +1437,7 @@ help:
@echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build' @echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
@echo ' make V=2 [targets] 2 => give reason for rebuild of target' @echo ' make V=2 [targets] 2 => give reason for rebuild of target'
@echo ' make O=dir [targets] Locate all output files in "dir", including .config' @echo ' make O=dir [targets] Locate all output files in "dir", including .config'
@echo ' make C=1 [targets] Check all c source with $$CHECK (sparse by default)' @echo ' make C=1 [targets] Check re-compiled c source with $$CHECK (sparse by default)'
@echo ' make C=2 [targets] Force check of all c source with $$CHECK' @echo ' make C=2 [targets] Force check of all c source with $$CHECK'
@echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections' @echo ' make RECORDMCOUNT_WARN=1 [targets] Warn about ignored mcount sections'
@echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where' @echo ' make W=n [targets] Enable extra gcc checks, n=1,2,3 where'

View File

@@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@@ -220,7 +220,7 @@
mmc1_pins: pinmux_mmc1_pins { mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = < pinctrl-single,pins = <
AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */ AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */
>; >;
}; };
@@ -280,10 +280,6 @@
AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */ AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */
AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */ AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */
AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */ AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */
/* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */
AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */
AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */
AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */
/* PDI Bus - Battery system */ /* PDI Bus - Battery system */
AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */ AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */
AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */ AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */
@@ -384,7 +380,7 @@
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>; pinctrl-0 = <&mmc1_pins>;
bus-width = <4>; bus-width = <4>;
cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vmmcsd_fixed>; vmmc-supply = <&vmmcsd_fixed>;
}; };

View File

@@ -558,10 +558,11 @@
}; };
r_ccu: clock@1f01400 { r_ccu: clock@1f01400 {
compatible = "allwinner,sun50i-a64-r-ccu"; compatible = "allwinner,sun8i-h3-r-ccu";
reg = <0x01f01400 0x100>; reg = <0x01f01400 0x100>;
clocks = <&osc24M>, <&osc32k>, <&iosc>; clocks = <&osc24M>, <&osc32k>, <&iosc>,
clock-names = "hosc", "losc", "iosc"; <&ccu 9>;
clock-names = "hosc", "losc", "iosc", "pll-periph";
#clock-cells = <1>; #clock-cells = <1>;
#reset-cells = <1>; #reset-cells = <1>;
}; };

View File

@@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@@ -406,8 +406,9 @@
r_ccu: clock@1f01400 { r_ccu: clock@1f01400 {
compatible = "allwinner,sun50i-a64-r-ccu"; compatible = "allwinner,sun50i-a64-r-ccu";
reg = <0x01f01400 0x100>; reg = <0x01f01400 0x100>;
clocks = <&osc24M>, <&osc32k>, <&iosc>; clocks = <&osc24M>, <&osc32k>, <&iosc>,
clock-names = "hosc", "losc", "iosc"; <&ccu 11>;
clock-names = "hosc", "losc", "iosc", "pll-periph";
#clock-cells = <1>; #clock-cells = <1>;
#reset-cells = <1>; #reset-cells = <1>;
}; };

View File

@@ -40,7 +40,7 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
*/ */
#include "sunxi-h3-h5.dtsi" #include <arm/sunxi-h3-h5.dtsi>
/ { / {
cpus { cpus {

View File

@@ -1 +0,0 @@
../../../../arm/boot/dts/sunxi-h3-h5.dtsi

View File

@@ -221,10 +221,11 @@ void update_vsyscall(struct timekeeper *tk)
/* tkr_mono.cycle_last == tkr_raw.cycle_last */ /* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_time.tv_sec; vdso_data->raw_time_sec = tk->raw_time.tv_sec;
vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
tk->tkr_raw.shift) +
tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
/* tkr_raw.xtime_nsec == 0 */
vdso_data->cs_mono_mult = tk->tkr_mono.mult; vdso_data->cs_mono_mult = tk->tkr_mono.mult;
vdso_data->cs_raw_mult = tk->tkr_raw.mult; vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */ /* tkr_mono.shift == tkr_raw.shift */

View File

@@ -256,7 +256,6 @@ monotonic_raw:
seqcnt_check fail=monotonic_raw seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */ /* All computations are done with left-shifted nsecs. */
lsl x14, x14, x12
get_nsec_per_sec res=x9 get_nsec_per_sec res=x9
lsl x9, x9, x12 lsl x9, x9, x12

View File

@@ -36,6 +36,7 @@ int bpf_jit_enable __read_mostly;
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2) #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
/* Map BPF registers to A64 registers */ /* Map BPF registers to A64 registers */
static const int bpf2a64[] = { static const int bpf2a64[] = {
@@ -57,6 +58,7 @@ static const int bpf2a64[] = {
/* temporary registers for internal BPF JIT */ /* temporary registers for internal BPF JIT */
[TMP_REG_1] = A64_R(10), [TMP_REG_1] = A64_R(10),
[TMP_REG_2] = A64_R(11), [TMP_REG_2] = A64_R(11),
[TMP_REG_3] = A64_R(12),
/* tail_call_cnt */ /* tail_call_cnt */
[TCALL_CNT] = A64_R(26), [TCALL_CNT] = A64_R(26),
/* temporary register for blinding constants */ /* temporary register for blinding constants */
@@ -319,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 src = bpf2a64[insn->src_reg]; const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1]; const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2]; const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 tmp3 = bpf2a64[TMP_REG_3];
const s16 off = insn->off; const s16 off = insn->off;
const s32 imm = insn->imm; const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi; const int i = insn - ctx->prog->insnsi;
@@ -689,10 +692,10 @@ emit_cond_jmp:
emit(A64_PRFM(tmp, PST, L1, STRM), ctx); emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
emit(A64_LDXR(isdw, tmp2, tmp), ctx); emit(A64_LDXR(isdw, tmp2, tmp), ctx);
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx); emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
jmp_offset = -3; jmp_offset = -3;
check_imm19(jmp_offset); check_imm19(jmp_offset);
emit(A64_CBNZ(0, tmp2, jmp_offset), ctx); emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
break; break;
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */

View File

@@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr); vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
goto success; goto success;
} }

View File

@@ -128,19 +128,19 @@ quiet_cmd_cpp_its_S = ITS $@
-DADDR_BITS=$(ADDR_BITS) \ -DADDR_BITS=$(ADDR_BITS) \
-DADDR_CELLS=$(itb_addr_cells) -DADDR_CELLS=$(itb_addr_cells)
$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE $(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,none,vmlinux.bin) $(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE $(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE $(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE $(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE $(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
quiet_cmd_itb-image = ITB $@ quiet_cmd_itb-image = ITB $@

View File

@@ -35,7 +35,12 @@ extern pte_t *pkmap_page_table;
* easily, subsequent pte tables have to be allocated in one physical * easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM. * chunk of RAM.
*/ */
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024 #define LAST_PKMAP 1024
#endif
#define LAST_PKMAP_MASK (LAST_PKMAP-1) #define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))

View File

@@ -43,7 +43,8 @@ typedef union mips_instruction kprobe_opcode_t;
#define flush_insn_slot(p) \ #define flush_insn_slot(p) \
do { \ do { \
flush_icache_range((unsigned long)p->addr, \ if (p->addr) \
flush_icache_range((unsigned long)p->addr, \
(unsigned long)p->addr + \ (unsigned long)p->addr + \
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
} while (0) } while (0)

View File

@@ -19,6 +19,10 @@
#define __ARCH_USE_5LEVEL_HACK #define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h> #include <asm-generic/pgtable-nopmd.h>
#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>
#endif
extern int temp_tlb_entry; extern int temp_tlb_entry;
/* /*
@@ -62,7 +66,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define VMALLOC_START MAP_BASE #define VMALLOC_START MAP_BASE
#define PKMAP_BASE (0xfe000000UL) #define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)

View File

@@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break; break;
} }
/* Compact branch: BNEZC || JIALC */ /* Compact branch: BNEZC || JIALC */
if (insn.i_format.rs) if (!insn.i_format.rs) {
/* JIALC: set $31/ra */
regs->regs[31] = epc + 4; regs->regs[31] = epc + 4;
}
regs->cp0_epc += 8; regs->cp0_epc += 8;
break; break;
#endif #endif

View File

@@ -38,20 +38,6 @@ void arch_ftrace_update_code(int command)
#endif #endif
/*
* Check if the address is in kernel space
*
* Clone core_kernel_text() from kernel/extable.c, but doesn't call
* init_kernel_text() for Ftrace doesn't trace functions in init sections.
*/
static inline int in_kernel_space(unsigned long ip)
{
if (ip >= (unsigned long)_stext &&
ip <= (unsigned long)_etext)
return 1;
return 0;
}
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
@@ -198,7 +184,7 @@ int ftrace_make_nop(struct module *mod,
* If ip is in kernel space, no long call, otherwise, long call is * If ip is in kernel space, no long call, otherwise, long call is
* needed. * needed.
*/ */
new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new); return ftrace_modify_code(ip, new);
#else #else
@@ -218,12 +204,12 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned int new; unsigned int new;
unsigned long ip = rec->ip; unsigned long ip = rec->ip;
new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new); return ftrace_modify_code(ip, new);
#else #else
return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
INSN_NOP : insn_la_mcount[1]); INSN_NOP : insn_la_mcount[1]);
#endif #endif
} }
@@ -289,7 +275,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
* kernel, move after the instruction "move ra, at"(offset is 16) * kernel, move after the instruction "move ra, at"(offset is 16)
*/ */
ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
/* /*
* search the text until finding the non-store instruction or "s{d,w} * search the text until finding the non-store instruction or "s{d,w}
@@ -394,7 +380,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
* entries configured through the tracing/set_graph_function interface. * entries configured through the tracing/set_graph_function interface.
*/ */
insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
/* Only trace if the calling function expects to */ /* Only trace if the calling function expects to */

View File

@@ -1597,7 +1597,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
break; break;
case CPU_P5600: case CPU_P5600:
case CPU_P6600: case CPU_P6600:
case CPU_I6400:
/* 8-bit event numbers */ /* 8-bit event numbers */
raw_id = config & 0x1ff; raw_id = config & 0x1ff;
base_id = raw_id & 0xff; base_id = raw_id & 0xff;
@@ -1610,6 +1609,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
raw_event.range = P; raw_event.range = P;
#endif #endif
break; break;
case CPU_I6400:
/* 8-bit event numbers */
base_id = config & 0xff;
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
break;
case CPU_1004K: case CPU_1004K:
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;

View File

@@ -166,7 +166,11 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
bool user, bool kernel) bool user, bool kernel)
{ {
int idx_user, idx_kernel; /*
* Initialize idx_user and idx_kernel to workaround bogus
* maybe-initialized warning when using GCC 6.
*/
int idx_user = 0, idx_kernel = 0;
unsigned long flags, old_entryhi; unsigned long flags, old_entryhi;
local_irq_save(flags); local_irq_save(flags);

View File

@@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@@ -51,15 +51,15 @@ void __init pagetable_init(void)
/* /*
* Fixed mappings: * Fixed mappings:
*/ */
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* /*
* Permanent kmaps: * Permanent kmaps:
*/ */
vaddr = PKMAP_BASE; vaddr = PKMAP_BASE;
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + __pgd_offset(vaddr); pgd = swapper_pg_dir + __pgd_offset(vaddr);
pud = pud_offset(pgd, vaddr); pud = pud_offset(pgd, vaddr);

View File

@@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long len, unsigned long pgoff, unsigned long flags)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma, *prev;
unsigned long task_size = TASK_SIZE; unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap; int do_color_align, last_mmap;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
@@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
else else
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma_prev(mm, addr, &prev);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
goto found_addr; goto found_addr;
} }
@@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff, const unsigned long len, const unsigned long pgoff,
const unsigned long flags) const unsigned long flags)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr = addr0; unsigned long addr = addr0;
int do_color_align, last_mmap; int do_color_align, last_mmap;
@@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = COLOR_ALIGN(addr, last_mmap, pgoff); addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else else
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
goto found_addr; goto found_addr;
} }

View File

@@ -104,7 +104,7 @@
"1: "PPC_TLNEI" %4,0\n" \ "1: "PPC_TLNEI" %4,0\n" \
_EMIT_BUG_ENTRY \ _EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \ : : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_TAINT(TAINT_WARN)), \ "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
"i" (sizeof(struct bug_entry)), \ "i" (sizeof(struct bug_entry)), \
"r" (__ret_warn_on)); \ "r" (__ret_warn_on)); \
} \ } \

View File

@@ -103,6 +103,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs); extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs); extern int kprobe_post_handler(struct pt_regs *regs);
extern int is_current_kprobe_addr(unsigned long addr);
#ifdef CONFIG_KPROBES_ON_FTRACE #ifdef CONFIG_KPROBES_ON_FTRACE
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs, extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb); struct kprobe_ctlblk *kcb);

View File

@@ -94,11 +94,13 @@ struct xive_q {
* store at 0 and some ESBs support doing a trigger via a * store at 0 and some ESBs support doing a trigger via a
* separate trigger page. * separate trigger page.
*/ */
#define XIVE_ESB_GET 0x800 #define XIVE_ESB_STORE_EOI 0x400 /* Store */
#define XIVE_ESB_SET_PQ_00 0xc00 #define XIVE_ESB_LOAD_EOI 0x000 /* Load */
#define XIVE_ESB_SET_PQ_01 0xd00 #define XIVE_ESB_GET 0x800 /* Load */
#define XIVE_ESB_SET_PQ_10 0xe00 #define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
#define XIVE_ESB_SET_PQ_11 0xf00 #define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
#define XIVE_ESB_VAL_P 0x2 #define XIVE_ESB_VAL_P 0x2
#define XIVE_ESB_VAL_Q 0x1 #define XIVE_ESB_VAL_Q 0x1

View File

@@ -1411,10 +1411,8 @@ USE_TEXT_SECTION()
.balign IFETCH_ALIGN_BYTES .balign IFETCH_ALIGN_BYTES
do_hash_page: do_hash_page:
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
andis. r0,r4,0xa410 /* weird error? */ andis. r0,r4,0xa450 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */ bne- handle_page_fault /* if not, try to insert a HPTE */
andis. r0,r4,DSISR_DABRMATCH@h
bne- handle_dabr_fault
CURRENT_THREAD_INFO(r11, r1) CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
@@ -1438,11 +1436,16 @@ do_hash_page:
/* Error */ /* Error */
blt- 13f blt- 13f
/* Reload DSISR into r4 for the DABR check below */
ld r4,_DSISR(r1)
#endif /* CONFIG_PPC_STD_MMU_64 */ #endif /* CONFIG_PPC_STD_MMU_64 */
/* Here we have a page fault that hash_page can't handle. */ /* Here we have a page fault that hash_page can't handle. */
handle_page_fault: handle_page_fault:
11: ld r4,_DAR(r1) 11: andis. r0,r4,DSISR_DABRMATCH@h
bne- handle_dabr_fault
ld r4,_DAR(r1)
ld r5,_DSISR(r1) ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl do_page_fault bl do_page_fault

View File

@@ -43,6 +43,12 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
int is_current_kprobe_addr(unsigned long addr)
{
struct kprobe *p = kprobe_running();
return (p && (unsigned long)p->addr == addr) ? 1 : 0;
}
bool arch_within_kprobe_blacklist(unsigned long addr) bool arch_within_kprobe_blacklist(unsigned long addr)
{ {
return (addr >= (unsigned long)__kprobes_text_start && return (addr >= (unsigned long)__kprobes_text_start &&
@@ -617,6 +623,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif #endif
/*
* jprobes use jprobe_return() which skips the normal return
* path of the function, and this messes up the accounting of the
* function graph tracer.
*
* Pause function graph tracing while performing the jprobe function.
*/
pause_graph_tracing();
return 1; return 1;
} }
NOKPROBE_SYMBOL(setjmp_pre_handler); NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -642,6 +657,8 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
* saved regs... * saved regs...
*/ */
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
/* It's OK to start function graph tracing again */
unpause_graph_tracing();
preempt_enable_no_resched(); preempt_enable_no_resched();
return 1; return 1;
} }

View File

@@ -615,6 +615,24 @@ void __init exc_lvl_early_init(void)
} }
#endif #endif
/*
* Emergency stacks are used for a range of things, from asynchronous
* NMIs (system reset, machine check) to synchronous, process context.
* We set preempt_count to zero, even though that isn't necessarily correct. To
* get the right value we'd need to copy it from the previous thread_info, but
* doing that might fault causing more problems.
* TODO: what to do with accounting?
*/
static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
{
ti->task = NULL;
ti->cpu = cpu;
ti->preempt_count = 0;
ti->local_flags = 0;
ti->flags = 0;
klp_init_thread_info(ti);
}
/* /*
* Stack space used when we detect a bad kernel stack pointer, and * Stack space used when we detect a bad kernel stack pointer, and
* early in SMP boots before relocation is enabled. Exclusive emergency * early in SMP boots before relocation is enabled. Exclusive emergency
@@ -633,24 +651,31 @@ void __init emergency_stack_init(void)
* Since we use these as temporary stacks during secondary CPU * Since we use these as temporary stacks during secondary CPU
* bringup, we need to get at them in real mode. This means they * bringup, we need to get at them in real mode. This means they
* must also be within the RMO region. * must also be within the RMO region.
*
* The IRQ stacks allocated elsewhere in this file are zeroed and
* initialized in kernel/irq.c. These are initialized here in order
* to have emergency stacks available as early as possible.
*/ */
limit = min(safe_stack_limit(), ppc64_rma_size); limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct thread_info *ti; struct thread_info *ti;
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti); memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].emergency_sp = (void *)ti + THREAD_SIZE; paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for NMI exception handling. */ /* emergency stack for NMI exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti); memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE; paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
/* emergency stack for machine check exception handling. */ /* emergency stack for machine check exception handling. */
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
klp_init_thread_info(ti); memset(ti, 0, THREAD_SIZE);
emerg_stack_init_thread_info(ti, i);
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE; paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif #endif
} }

View File

@@ -45,10 +45,14 @@ _GLOBAL(ftrace_caller)
stdu r1,-SWITCH_FRAME_SIZE(r1) stdu r1,-SWITCH_FRAME_SIZE(r1)
/* Save all gprs to pt_regs */ /* Save all gprs to pt_regs */
SAVE_8GPRS(0,r1) SAVE_GPR(0, r1)
SAVE_8GPRS(8,r1) SAVE_10GPRS(2, r1)
SAVE_8GPRS(16,r1) SAVE_10GPRS(12, r1)
SAVE_8GPRS(24,r1) SAVE_10GPRS(22, r1)
/* Save previous stack pointer (r1) */
addi r8, r1, SWITCH_FRAME_SIZE
std r8, GPR1(r1)
/* Load special regs for save below */ /* Load special regs for save below */
mfmsr r8 mfmsr r8
@@ -95,18 +99,44 @@ ftrace_call:
bl ftrace_stub bl ftrace_stub
nop nop
/* Load ctr with the possibly modified NIP */ /* Load the possibly modified NIP */
ld r3, _NIP(r1) ld r15, _NIP(r1)
mtctr r3
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
cmpd r14,r3 /* has NIP been altered? */ cmpd r14, r15 /* has NIP been altered? */
#endif #endif
#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
/* NIP has not been altered, skip over further checks */
beq 1f
/* Check if there is an active kprobe on us */
subi r3, r14, 4
bl is_current_kprobe_addr
nop
/*
* If r3 == 1, then this is a kprobe/jprobe.
* else, this is livepatched function.
*
* The conditional branch for livepatch_handler below will use the
* result of this comparison. For kprobe/jprobe, we just need to branch to
* the new NIP, not call livepatch_handler. The branch below is bne, so we
* want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
* CR0[EQ] = (r3 == 1).
*/
cmpdi r3, 1
1:
#endif
/* Load CTR with the possibly modified NIP */
mtctr r15
/* Restore gprs */ /* Restore gprs */
REST_8GPRS(0,r1) REST_GPR(0,r1)
REST_8GPRS(8,r1) REST_10GPRS(2,r1)
REST_8GPRS(16,r1) REST_10GPRS(12,r1)
REST_8GPRS(24,r1) REST_10GPRS(22,r1)
/* Restore possibly modified LR */ /* Restore possibly modified LR */
ld r0, _LINK(r1) ld r0, _LINK(r1)
@@ -119,7 +149,10 @@ ftrace_call:
addi r1, r1, SWITCH_FRAME_SIZE addi r1, r1, SWITCH_FRAME_SIZE
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
/* Based on the cmpd above, if the NIP was altered handle livepatch */ /*
* Based on the cmpd or cmpdi above, if the NIP was altered and we're
* not on a kprobe/jprobe, then handle livepatch.
*/
bne- livepatch_handler bne- livepatch_handler
#endif #endif

View File

@@ -1486,6 +1486,14 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break; break;
case KVM_REG_PPC_TB_OFFSET: case KVM_REG_PPC_TB_OFFSET:
/*
* POWER9 DD1 has an erratum where writing TBU40 causes
* the timebase to lose ticks. So we don't let the
* timebase offset be changed on P9 DD1. (It is
* initialized to zero.)
*/
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
break;
/* round up to multiple of 2^24 */ /* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset = vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24); ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -2907,12 +2915,36 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{ {
int r; int r;
int srcu_idx; int srcu_idx;
unsigned long ebb_regs[3] = {}; /* shut up GCC */
unsigned long user_tar = 0;
unsigned int user_vrsave;
if (!vcpu->arch.sane) { if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL; return -EINVAL;
} }
/*
* Don't allow entry with a suspended transaction, because
* the guest entry/exit code will lose it.
* If the guest has TM enabled, save away their TM-related SPRs
* (they will get restored by the TM unavailable interrupt).
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
(current->thread.regs->msr & MSR_TM)) {
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
current->thread.tm_texasr = mfspr(SPRN_TEXASR);
current->thread.regs->msr &= ~MSR_TM;
}
#endif
kvmppc_core_prepare_to_enter(vcpu); kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */ /* No need to go into the guest when all we'll do is come back out */
@@ -2934,6 +2966,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
flush_all_to_thread(current); flush_all_to_thread(current);
/* Save userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
ebb_regs[0] = mfspr(SPRN_EBBHR);
ebb_regs[1] = mfspr(SPRN_EBBRR);
ebb_regs[2] = mfspr(SPRN_BESCR);
user_tar = mfspr(SPRN_TAR);
}
user_vrsave = mfspr(SPRN_VRSAVE);
vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd; vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2960,6 +3001,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
} while (is_kvmppc_resume_guest(r)); } while (is_kvmppc_resume_guest(r));
/* Restore userspace EBB and other register values */
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
mtspr(SPRN_EBBHR, ebb_regs[0]);
mtspr(SPRN_EBBRR, ebb_regs[1]);
mtspr(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
mtspr(SPRN_FSCR, current->thread.fscr);
}
mtspr(SPRN_VRSAVE, user_vrsave);
out: out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY; vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running); atomic_dec(&vcpu->kvm->arch.vcpus_running);

View File

@@ -121,10 +121,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* Put whatever is in the decrementer into the * Put whatever is in the decrementer into the
* hypervisor decrementer. * hypervisor decrementer.
*/ */
BEGIN_FTR_SECTION
ld r5, HSTATE_KVM_VCORE(r13)
ld r6, VCORE_KVM(r5)
ld r9, KVM_HOST_LPCR(r6)
andis. r9, r9, LPCR_LD@h
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mfspr r8,SPRN_DEC mfspr r8,SPRN_DEC
mftb r7 mftb r7
mtspr SPRN_HDEC,r8 BEGIN_FTR_SECTION
/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
bne 32f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8 extsw r8,r8
32: mtspr SPRN_HDEC,r8
add r8,r8,r7 add r8,r8,r7
std r8,HSTATE_DECEXP(r13) std r8,HSTATE_DECEXP(r13)

View File

@@ -32,12 +32,29 @@
#include <asm/opal.h> #include <asm/opal.h>
#include <asm/xive-regs.h> #include <asm/xive-regs.h>
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg) \
BEGIN_FTR_SECTION; \
extsw reg, reg; \
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
/* Values in HSTATE_NAPPING(r13) */ /* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1 #define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2 #define NAPPING_NOVCPU 2
/* Stack frame offsets for kvmppc_hv_entry */
#define SFS 144
#define STACK_SLOT_TRAP (SFS-4)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
#define STACK_SLOT_PID (SFS-32)
#define STACK_SLOT_IAMR (SFS-40)
#define STACK_SLOT_CIABR (SFS-48)
#define STACK_SLOT_DAWR (SFS-56)
#define STACK_SLOT_DAWRX (SFS-64)
/* /*
* Call kvmppc_hv_entry in real mode. * Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled. * Must be called with interrupts hard-disabled.
@@ -214,6 +231,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvmppc_primary_no_guest: kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */ /* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */ /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
/* HDEC may be larger than DEC for arch >= v3.00, but since the */
/* HDEC value came from DEC in the first place, it will fit */
mfspr r3, SPRN_HDEC mfspr r3, SPRN_HDEC
mtspr SPRN_DEC, r3 mtspr SPRN_DEC, r3
/* /*
@@ -295,8 +314,9 @@ kvm_novcpu_wakeup:
/* See if our timeslice has expired (HDEC is negative) */ /* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC mfspr r0, SPRN_HDEC
EXTEND_HDEC(r0)
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
cmpwi r0, 0 cmpdi r0, 0
blt kvm_novcpu_exit blt kvm_novcpu_exit
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
@@ -319,10 +339,10 @@ kvm_novcpu_exit:
bl kvmhv_accumulate_time bl kvmhv_accumulate_time
#endif #endif
13: mr r3, r12 13: mr r3, r12
stw r12, 112-4(r1) stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit bl kvmhv_commence_exit
nop nop
lwz r12, 112-4(r1) lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host b kvmhv_switch_to_host
/* /*
@@ -390,8 +410,8 @@ kvm_secondary_got_guest:
lbz r4, HSTATE_PTID(r13) lbz r4, HSTATE_PTID(r13)
cmpwi r4, 0 cmpwi r4, 0
bne 63f bne 63f
lis r6, 0x7fff LOAD_REG_ADDR(r6, decrementer_max)
ori r6, r6, 0xffff ld r6, 0(r6)
mtspr SPRN_HDEC, r6 mtspr SPRN_HDEC, r6
/* and set per-LPAR registers, if doing dynamic micro-threading */ /* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13) ld r6, HSTATE_SPLIT_MODE(r13)
@@ -545,11 +565,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* * * *
*****************************************************************************/ *****************************************************************************/
/* Stack frame offsets */
#define STACK_SLOT_TID (112-16)
#define STACK_SLOT_PSSCR (112-24)
#define STACK_SLOT_PID (112-32)
.global kvmppc_hv_entry .global kvmppc_hv_entry
kvmppc_hv_entry: kvmppc_hv_entry:
@@ -565,7 +580,7 @@ kvmppc_hv_entry:
*/ */
mflr r0 mflr r0
std r0, PPC_LR_STKOFF(r1) std r0, PPC_LR_STKOFF(r1)
stdu r1, -112(r1) stdu r1, -SFS(r1)
/* Save R1 in the PACA */ /* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13) std r1, HSTATE_HOST_R1(r13)
@@ -749,10 +764,20 @@ BEGIN_FTR_SECTION
mfspr r5, SPRN_TIDR mfspr r5, SPRN_TIDR
mfspr r6, SPRN_PSSCR mfspr r6, SPRN_PSSCR
mfspr r7, SPRN_PID mfspr r7, SPRN_PID
mfspr r8, SPRN_IAMR
std r5, STACK_SLOT_TID(r1) std r5, STACK_SLOT_TID(r1)
std r6, STACK_SLOT_PSSCR(r1) std r6, STACK_SLOT_PSSCR(r1)
std r7, STACK_SLOT_PID(r1) std r7, STACK_SLOT_PID(r1)
std r8, STACK_SLOT_IAMR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
mfspr r5, SPRN_CIABR
mfspr r6, SPRN_DAWR
mfspr r7, SPRN_DAWRX
std r5, STACK_SLOT_CIABR(r1)
std r6, STACK_SLOT_DAWR(r1)
std r7, STACK_SLOT_DAWRX(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
/* Set partition DABR */ /* Set partition DABR */
@@ -968,7 +993,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/* Check if HDEC expires soon */ /* Check if HDEC expires soon */
mfspr r3, SPRN_HDEC mfspr r3, SPRN_HDEC
cmpwi r3, 512 /* 1 microsecond */ EXTEND_HDEC(r3)
cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon blt hdec_soon
#ifdef CONFIG_KVM_XICS #ifdef CONFIG_KVM_XICS
@@ -1505,11 +1531,10 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
* set by the guest could disrupt the host. * set by the guest could disrupt the host.
*/ */
li r0, 0 li r0, 0
mtspr SPRN_IAMR, r0 mtspr SPRN_PSPB, r0
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
mtspr SPRN_WORT, r0 mtspr SPRN_WORT, r0
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
mtspr SPRN_IAMR, r0
mtspr SPRN_TCSCR, r0 mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */ /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1 li r0, 1
@@ -1525,6 +1550,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
std r6,VCPU_UAMOR(r9) std r6,VCPU_UAMOR(r9)
li r6,0 li r6,0
mtspr SPRN_AMR,r6 mtspr SPRN_AMR,r6
mtspr SPRN_UAMOR, r6
/* Switch DSCR back to host value */ /* Switch DSCR back to host value */
mfspr r8, SPRN_DSCR mfspr r8, SPRN_DSCR
@@ -1669,13 +1695,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
ptesync ptesync
/* Restore host values of some registers */ /* Restore host values of some registers */
BEGIN_FTR_SECTION
ld r5, STACK_SLOT_CIABR(r1)
ld r6, STACK_SLOT_DAWR(r1)
ld r7, STACK_SLOT_DAWRX(r1)
mtspr SPRN_CIABR, r5
mtspr SPRN_DAWR, r6
mtspr SPRN_DAWRX, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
ld r5, STACK_SLOT_TID(r1) ld r5, STACK_SLOT_TID(r1)
ld r6, STACK_SLOT_PSSCR(r1) ld r6, STACK_SLOT_PSSCR(r1)
ld r7, STACK_SLOT_PID(r1) ld r7, STACK_SLOT_PID(r1)
ld r8, STACK_SLOT_IAMR(r1)
mtspr SPRN_TIDR, r5 mtspr SPRN_TIDR, r5
mtspr SPRN_PSSCR, r6 mtspr SPRN_PSSCR, r6
mtspr SPRN_PID, r7 mtspr SPRN_PID, r7
mtspr SPRN_IAMR, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT PPC_INVALIDATE_ERAT
@@ -1819,8 +1855,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
li r0, KVM_GUEST_MODE_NONE li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13) stb r0, HSTATE_IN_GUEST(r13)
ld r0, 112+PPC_LR_STKOFF(r1) ld r0, SFS+PPC_LR_STKOFF(r1)
addi r1, r1, 112 addi r1, r1, SFS
mtlr r0 mtlr r0
blr blr
@@ -2366,12 +2402,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
mfspr r3, SPRN_DEC mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC mfspr r4, SPRN_HDEC
mftb r5 mftb r5
cmpw r3, r4 extsw r3, r3
EXTEND_HDEC(r4)
cmpd r3, r4
ble 67f ble 67f
mtspr SPRN_DEC, r4 mtspr SPRN_DEC, r4
67: 67:
/* save expiry time of guest decrementer */ /* save expiry time of guest decrementer */
extsw r3, r3
add r3, r3, r5 add r3, r3, r5
ld r4, HSTATE_KVM_VCPU(r13) ld r4, HSTATE_KVM_VCPU(r13)
ld r5, HSTATE_KVM_VCORE(r13) ld r5, HSTATE_KVM_VCORE(r13)

View File

@@ -69,7 +69,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
{ {
/* If the XIVE supports the new "store EOI facility, use it */ /* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
__x_writeq(0, __x_eoi_page(xd)); __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
opal_int_eoi(hw_irq); opal_int_eoi(hw_irq);
} else { } else {
@@ -89,7 +89,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
* properly. * properly.
*/ */
if (xd->flags & XIVE_IRQ_FLAG_LSI) if (xd->flags & XIVE_IRQ_FLAG_LSI)
__x_readq(__x_eoi_page(xd)); __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
else { else {
eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);

View File

@@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && if (mm->task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
/* /*

View File

@@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr && if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr && if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
if ((mm->task_size - len) < addr) if ((mm->task_size - len) < addr)
return 0; return 0;
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
return (!vma || (addr + len) <= vma->vm_start); return (!vma || (addr + len) <= vm_start_gap(vma));
} }
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)

View File

@@ -101,5 +101,6 @@ void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs_user_copy) struct pt_regs *regs_user_copy)
{ {
regs_user->regs = task_pt_regs(current); regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current); regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
PERF_SAMPLE_REGS_ABI_NONE;
} }

View File

@@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
if (WARN_ON(!gpdev)) if (WARN_ON(!gpdev))
return NULL; return NULL;
if (WARN_ON(!gpdev->dev.of_node)) /* Not all PCI devices have device-tree nodes */
if (!gpdev->dev.of_node)
return NULL; return NULL;
/* Get assoicated PCI device */ /* Get assoicated PCI device */
@@ -448,7 +449,7 @@ static int mmio_launch_invalidate(struct npu *npu, unsigned long launch,
return mmio_atsd_reg; return mmio_atsd_reg;
} }
static int mmio_invalidate_pid(struct npu *npu, unsigned long pid) static int mmio_invalidate_pid(struct npu *npu, unsigned long pid, bool flush)
{ {
unsigned long launch; unsigned long launch;
@@ -464,12 +465,15 @@ static int mmio_invalidate_pid(struct npu *npu, unsigned long pid)
/* PID */ /* PID */
launch |= pid << PPC_BITLSHIFT(38); launch |= pid << PPC_BITLSHIFT(38);
/* No flush */
launch |= !flush << PPC_BITLSHIFT(39);
/* Invalidating the entire process doesn't use a va */ /* Invalidating the entire process doesn't use a va */
return mmio_launch_invalidate(npu, launch, 0); return mmio_launch_invalidate(npu, launch, 0);
} }
static int mmio_invalidate_va(struct npu *npu, unsigned long va, static int mmio_invalidate_va(struct npu *npu, unsigned long va,
unsigned long pid) unsigned long pid, bool flush)
{ {
unsigned long launch; unsigned long launch;
@@ -485,26 +489,60 @@ static int mmio_invalidate_va(struct npu *npu, unsigned long va,
/* PID */ /* PID */
launch |= pid << PPC_BITLSHIFT(38); launch |= pid << PPC_BITLSHIFT(38);
/* No flush */
launch |= !flush << PPC_BITLSHIFT(39);
return mmio_launch_invalidate(npu, launch, va); return mmio_launch_invalidate(npu, launch, va);
} }
#define mn_to_npu_context(x) container_of(x, struct npu_context, mn) #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
struct mmio_atsd_reg {
struct npu *npu;
int reg;
};
static void mmio_invalidate_wait(
struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS], bool flush)
{
struct npu *npu;
int i, reg;
/* Wait for all invalidations to complete */
for (i = 0; i <= max_npu2_index; i++) {
if (mmio_atsd_reg[i].reg < 0)
continue;
/* Wait for completion */
npu = mmio_atsd_reg[i].npu;
reg = mmio_atsd_reg[i].reg;
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
cpu_relax();
put_mmio_atsd_reg(npu, reg);
/*
* The GPU requires two flush ATSDs to ensure all entries have
* been flushed. We use PID 0 as it will never be used for a
* process on the GPU.
*/
if (flush)
mmio_invalidate_pid(npu, 0, true);
}
}
/* /*
* Invalidate either a single address or an entire PID depending on * Invalidate either a single address or an entire PID depending on
* the value of va. * the value of va.
*/ */
static void mmio_invalidate(struct npu_context *npu_context, int va, static void mmio_invalidate(struct npu_context *npu_context, int va,
unsigned long address) unsigned long address, bool flush)
{ {
int i, j, reg; int i, j;
struct npu *npu; struct npu *npu;
struct pnv_phb *nphb; struct pnv_phb *nphb;
struct pci_dev *npdev; struct pci_dev *npdev;
struct { struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
struct npu *npu;
int reg;
} mmio_atsd_reg[NV_MAX_NPUS];
unsigned long pid = npu_context->mm->context.id; unsigned long pid = npu_context->mm->context.id;
/* /*
@@ -524,10 +562,11 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
if (va) if (va)
mmio_atsd_reg[i].reg = mmio_atsd_reg[i].reg =
mmio_invalidate_va(npu, address, pid); mmio_invalidate_va(npu, address, pid,
flush);
else else
mmio_atsd_reg[i].reg = mmio_atsd_reg[i].reg =
mmio_invalidate_pid(npu, pid); mmio_invalidate_pid(npu, pid, flush);
/* /*
* The NPU hardware forwards the shootdown to all GPUs * The NPU hardware forwards the shootdown to all GPUs
@@ -543,18 +582,10 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
*/ */
flush_tlb_mm(npu_context->mm); flush_tlb_mm(npu_context->mm);
/* Wait for all invalidations to complete */ mmio_invalidate_wait(mmio_atsd_reg, flush);
for (i = 0; i <= max_npu2_index; i++) { if (flush)
if (mmio_atsd_reg[i].reg < 0) /* Wait for the flush to complete */
continue; mmio_invalidate_wait(mmio_atsd_reg, false);
/* Wait for completion */
npu = mmio_atsd_reg[i].npu;
reg = mmio_atsd_reg[i].reg;
while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
cpu_relax();
put_mmio_atsd_reg(npu, reg);
}
} }
static void pnv_npu2_mn_release(struct mmu_notifier *mn, static void pnv_npu2_mn_release(struct mmu_notifier *mn,
@@ -570,7 +601,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
* There should be no more translation requests for this PID, but we * There should be no more translation requests for this PID, but we
* need to ensure any entries for it are removed from the TLB. * need to ensure any entries for it are removed from the TLB.
*/ */
mmio_invalidate(npu_context, 0, 0); mmio_invalidate(npu_context, 0, 0, true);
} }
static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn, static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -580,7 +611,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
{ {
struct npu_context *npu_context = mn_to_npu_context(mn); struct npu_context *npu_context = mn_to_npu_context(mn);
mmio_invalidate(npu_context, 1, address); mmio_invalidate(npu_context, 1, address, true);
} }
static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn, static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
@@ -589,7 +620,7 @@ static void pnv_npu2_mn_invalidate_page(struct mmu_notifier *mn,
{ {
struct npu_context *npu_context = mn_to_npu_context(mn); struct npu_context *npu_context = mn_to_npu_context(mn);
mmio_invalidate(npu_context, 1, address); mmio_invalidate(npu_context, 1, address, true);
} }
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn, static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -599,8 +630,11 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct npu_context *npu_context = mn_to_npu_context(mn); struct npu_context *npu_context = mn_to_npu_context(mn);
unsigned long address; unsigned long address;
for (address = start; address <= end; address += PAGE_SIZE) for (address = start; address < end; address += PAGE_SIZE)
mmio_invalidate(npu_context, 1, address); mmio_invalidate(npu_context, 1, address, false);
/* Do the flush only on the final addess == end */
mmio_invalidate(npu_context, 1, address, true);
} }
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = { static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -650,8 +684,11 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
/* No nvlink associated with this GPU device */ /* No nvlink associated with this GPU device */
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
if (!mm) { if (!mm || mm->context.id == 0) {
/* kernel thread contexts are not supported */ /*
* Kernel thread contexts are not supported and context id 0 is
* reserved on the GPU.
*/
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }

View File

@@ -297,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
{ {
/* If the XIVE supports the new "store EOI facility, use it */ /* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
out_be64(xd->eoi_mmio, 0); out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
/* /*
* The FW told us to call it. This happens for some * The FW told us to call it. This happens for some

View File

@@ -30,6 +30,7 @@ CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y CONFIG_BSD_DISKLABEL=y
@@ -90,6 +94,8 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_MULTICAST=y CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_ADVANCED_ROUTER=y
@@ -359,6 +365,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y CONFIG_DNS_RESOLVER=y
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
@@ -367,16 +374,19 @@ CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768 CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_CDROM_PKTCDVD=m CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_ATA_OVER_ETH=m
CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_ENCLOSURE_SERVICES=m CONFIG_ENCLOSURE_SERVICES=m
CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
@@ -442,6 +452,8 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m CONFIG_PPP_BSDCOMP=m
@@ -452,7 +464,6 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
@@ -471,6 +482,7 @@ CONFIG_DIAG288_WATCHDOG=m
CONFIG_INFINIBAND=m CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_POSIX_ACL=y
@@ -487,12 +499,18 @@ CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y CONFIG_XFS_RT=y
CONFIG_XFS_DEBUG=y CONFIG_XFS_DEBUG=y
CONFIG_GFS2_FS=m CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
CONFIG_NILFS2_FS=m CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QUOTA_DEBUG=y
CONFIG_QFMT_V1=m CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m CONFIG_AUTOFS4_FS=m
@@ -558,6 +576,7 @@ CONFIG_HEADERS_CHECK=y
CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DEBUG_RODATA_TEST=y
CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_SELFTEST=y CONFIG_DEBUG_OBJECTS_SELFTEST=y
CONFIG_DEBUG_OBJECTS_FREE=y CONFIG_DEBUG_OBJECTS_FREE=y
@@ -580,7 +599,6 @@ CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_TIMEKEEPING=y CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
CONFIG_PROVE_LOCKING=y CONFIG_PROVE_LOCKING=y
@@ -595,6 +613,7 @@ CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300 CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_PM_NOTIFIER_ERROR_INJECT=m CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y CONFIG_FAILSLAB=y
CONFIG_FAIL_PAGE_ALLOC=y CONFIG_FAIL_PAGE_ALLOC=y
@@ -616,13 +635,12 @@ CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y CONFIG_TEST_LIST_SORT=y
CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_STRING_HELPERS=y
CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m CONFIG_TEST_BPF=m
CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_BUG_ON_DATA_CORRUPTION=y
@@ -630,6 +648,7 @@ CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -640,7 +659,9 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_GCM=m
@@ -648,6 +669,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
@@ -657,8 +679,10 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAMELLIA=m
@@ -674,6 +698,7 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
@@ -685,6 +710,7 @@ CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_KEY_TYPE=y
@@ -692,6 +718,7 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m CONFIG_CRC7=m
CONFIG_CRC8=m CONFIG_CRC8=m
CONFIG_RANDOM32_SELFTEST=y
CONFIG_CORDIC=m CONFIG_CORDIC=m
CONFIG_CMM=m CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y CONFIG_APPLDATA_BASE=y

View File

@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
@@ -46,7 +47,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y CONFIG_BSD_DISKLABEL=y
@@ -88,6 +92,8 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_MULTICAST=y CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_ADVANCED_ROUTER=y
@@ -356,6 +362,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y CONFIG_DNS_RESOLVER=y
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
@@ -364,16 +371,18 @@ CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768 CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_CDROM_PKTCDVD=m CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_ATA_OVER_ETH=m
CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_BLK=y
CONFIG_ENCLOSURE_SERVICES=m CONFIG_ENCLOSURE_SERVICES=m
CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
@@ -439,6 +448,8 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m CONFIG_PPP_BSDCOMP=m
@@ -449,7 +460,6 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
@@ -468,6 +478,7 @@ CONFIG_DIAG288_WATCHDOG=m
CONFIG_INFINIBAND=m CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_POSIX_ACL=y
@@ -483,11 +494,15 @@ CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V1=m CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m CONFIG_QFMT_V2=m
@@ -553,7 +568,6 @@ CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
@@ -576,6 +590,7 @@ CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -599,6 +614,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
@@ -611,6 +627,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAMELLIA=m
@@ -626,16 +643,19 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m CONFIG_CRC7=m

View File

@@ -31,6 +31,7 @@ CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
@@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y CONFIG_BSD_DISKLABEL=y
@@ -86,6 +90,8 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_MULTICAST=y CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_ADVANCED_ROUTER=y
@@ -354,6 +360,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y CONFIG_DNS_RESOLVER=y
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
@@ -362,16 +369,18 @@ CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768 CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_CDROM_PKTCDVD=m CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_ATA_OVER_ETH=m
CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_BLK=y
CONFIG_ENCLOSURE_SERVICES=m CONFIG_ENCLOSURE_SERVICES=m
CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
@@ -437,6 +446,8 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m CONFIG_PPP_BSDCOMP=m
@@ -447,7 +458,6 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
@@ -466,6 +476,7 @@ CONFIG_DIAG288_WATCHDOG=m
CONFIG_INFINIBAND=m CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_POSIX_ACL=y
@@ -481,11 +492,15 @@ CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V1=m CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m CONFIG_QFMT_V2=m
@@ -551,7 +566,6 @@ CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
@@ -574,6 +588,7 @@ CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@@ -597,6 +612,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
@@ -609,6 +625,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAMELLIA=m
@@ -624,6 +641,7 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
@@ -635,6 +653,7 @@ CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m CONFIG_CRC7=m

View File

@@ -12,8 +12,10 @@ CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=2 CONFIG_NR_CPUS=2
# CONFIG_HOTPLUG_CPU is not set # CONFIG_HOTPLUG_CPU is not set
CONFIG_HZ_100=y CONFIG_HZ_100=y
# CONFIG_ARCH_RANDOM is not set
# CONFIG_COMPACTION is not set # CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set # CONFIG_MIGRATION is not set
# CONFIG_BOUNCE is not set
# CONFIG_CHECK_STACK is not set # CONFIG_CHECK_STACK is not set
# CONFIG_CHSC_SCH is not set # CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set # CONFIG_SCM_BUS is not set
@@ -36,11 +38,11 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_FC_ATTRS=y
CONFIG_ZFCP=y CONFIG_ZFCP=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_HVC_IUCV is not set # CONFIG_HVC_IUCV is not set
# CONFIG_HW_RANDOM_S390 is not set
CONFIG_RAW_DRIVER=y CONFIG_RAW_DRIVER=y
# CONFIG_SCLP_ASYNC is not set # CONFIG_SCLP_ASYNC is not set
# CONFIG_HMC_DRV is not set # CONFIG_HMC_DRV is not set
@@ -54,9 +56,9 @@ CONFIG_RAW_DRIVER=y
# CONFIG_INOTIFY_USER is not set # CONFIG_INOTIFY_USER is not set
CONFIG_CONFIGFS_FS=y CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set # CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_PRINTK_TIME=y CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set # CONFIG_SCHED_DEBUG is not set

View File

@@ -28,6 +28,7 @@ CONFIG_NAMESPACES=y
CONFIG_USER_NS=y CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
@@ -108,7 +109,6 @@ CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=y CONFIG_SCSI_VIRTIO=y
CONFIG_MD=y CONFIG_MD=y
CONFIG_MD_LINEAR=m CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_MULTIPATH=m CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=y CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=m CONFIG_DM_CRYPT=m
@@ -131,6 +131,7 @@ CONFIG_TUN=m
CONFIG_VIRTIO_NET=y CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
CONFIG_DEVKMEM=y CONFIG_DEVKMEM=y
@@ -162,7 +163,6 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DETECT_HUNG_TASK=y CONFIG_DETECT_HUNG_TASK=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_PROVE_LOCKING=y CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y CONFIG_LOCK_STAT=y
@@ -172,14 +172,12 @@ CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y CONFIG_DEBUG_NOTIFIERS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_RCU_TRACE=y
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y CONFIG_FUNCTION_PROFILER=y
CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y CONFIG_KPROBES_SANITY_TEST=y
@@ -190,7 +188,6 @@ CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m CONFIG_CRYPTO_XTS=m
@@ -230,6 +227,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m

View File

@@ -231,12 +231,17 @@ ENTRY(sie64a)
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
.Lsie_done: .Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception) # some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
# instructions between sie64a and .Lsie_done should not cause program # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
# interrupts. So lets use a nop (47 00 00 00) as a landing pad. # Other instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
# See also .Lcleanup_sie # See also .Lcleanup_sie
.Lrewind_pad: .Lrewind_pad6:
nop 0 nopr 7
.Lrewind_pad4:
nopr 7
.Lrewind_pad2:
nopr 7
.globl sie_exit .globl sie_exit
sie_exit: sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -249,7 +254,9 @@ sie_exit:
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
j sie_exit j sie_exit
EX_TABLE(.Lrewind_pad,.Lsie_fault) EX_TABLE(.Lrewind_pad6,.Lsie_fault)
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault) EX_TABLE(sie_exit,.Lsie_fault)
EXPORT_SYMBOL(sie64a) EXPORT_SYMBOL(sie64a)
EXPORT_SYMBOL(sie_exit) EXPORT_SYMBOL(sie_exit)

View File

@@ -977,11 +977,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
ptr = asce.origin * 4096; ptr = asce.origin * 4096;
if (asce.r) { if (asce.r) {
*fake = 1; *fake = 1;
ptr = 0;
asce.dt = ASCE_TYPE_REGION1; asce.dt = ASCE_TYPE_REGION1;
} }
switch (asce.dt) { switch (asce.dt) {
case ASCE_TYPE_REGION1: case ASCE_TYPE_REGION1:
if (vaddr.rfx01 > asce.tl && !asce.r) if (vaddr.rfx01 > asce.tl && !*fake)
return PGM_REGION_FIRST_TRANS; return PGM_REGION_FIRST_TRANS;
break; break;
case ASCE_TYPE_REGION2: case ASCE_TYPE_REGION2:
@@ -1009,8 +1010,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
union region1_table_entry rfte; union region1_table_entry rfte;
if (*fake) { if (*fake) {
/* offset in 16EB guest memory block */ ptr += (unsigned long) vaddr.rfx << 53;
ptr = ptr + ((unsigned long) vaddr.rsx << 53UL);
rfte.val = ptr; rfte.val = ptr;
goto shadow_r2t; goto shadow_r2t;
} }
@@ -1036,8 +1036,7 @@ shadow_r2t:
union region2_table_entry rste; union region2_table_entry rste;
if (*fake) { if (*fake) {
/* offset in 8PB guest memory block */ ptr += (unsigned long) vaddr.rsx << 42;
ptr = ptr + ((unsigned long) vaddr.rtx << 42UL);
rste.val = ptr; rste.val = ptr;
goto shadow_r3t; goto shadow_r3t;
} }
@@ -1064,8 +1063,7 @@ shadow_r3t:
union region3_table_entry rtte; union region3_table_entry rtte;
if (*fake) { if (*fake) {
/* offset in 4TB guest memory block */ ptr += (unsigned long) vaddr.rtx << 31;
ptr = ptr + ((unsigned long) vaddr.sx << 31UL);
rtte.val = ptr; rtte.val = ptr;
goto shadow_sgt; goto shadow_sgt;
} }
@@ -1101,8 +1099,7 @@ shadow_sgt:
union segment_table_entry ste; union segment_table_entry ste;
if (*fake) { if (*fake) {
/* offset in 2G guest memory block */ ptr += (unsigned long) vaddr.sx << 20;
ptr = ptr + ((unsigned long) vaddr.sx << 20UL);
ste.val = ptr; ste.val = ptr;
goto shadow_pgt; goto shadow_pgt;
} }

View File

@@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit; goto check_asce_limit;
} }
@@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit; goto check_asce_limit;
} }

View File

@@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
if (mm->get_unmapped_area == arch_get_unmapped_area) if (mm->get_unmapped_area == arch_get_unmapped_area)

View File

@@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
if (current->mm->get_unmapped_area == arch_get_unmapped_area) if (current->mm->get_unmapped_area == arch_get_unmapped_area)

View File

@@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids
[ C(DTLB) ] = { [ C(DTLB) ] = {
[ C(OP_READ) ] = { [ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */
[ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */
}, },
[ C(OP_WRITE) ] = { [ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */
[ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */
}, },
[ C(OP_PREFETCH) ] = { [ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0x0, [ C(RESULT_ACCESS) ] = 0x0,

View File

@@ -29,6 +29,7 @@ struct pt_regs;
} while (0) } while (0)
extern int fixup_exception(struct pt_regs *regs, int trapnr); extern int fixup_exception(struct pt_regs *regs, int trapnr);
extern int fixup_bug(struct pt_regs *regs, int trapnr);
extern bool ex_has_fault_handler(unsigned long ip); extern bool ex_has_fault_handler(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr); extern void early_fixup_exception(struct pt_regs *regs, int trapnr);

View File

@@ -296,6 +296,7 @@ struct x86_emulate_ctxt {
bool perm_ok; /* do not check permissions if true */ bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */ bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception; bool have_exception;
struct x86_exception exception; struct x86_exception exception;

View File

@@ -2,8 +2,7 @@
#define _ASM_X86_MSHYPER_H #define _ASM_X86_MSHYPER_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/interrupt.h> #include <linux/atomic.h>
#include <linux/clocksource.h>
#include <asm/hyperv.h> #include <asm/hyperv.h>
/* /*

View File

@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (end - len >= addr && if (end - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@@ -182,7 +182,7 @@ int is_valid_bugaddr(unsigned long addr)
return ud == INSN_UD0 || ud == INSN_UD2; return ud == INSN_UD0 || ud == INSN_UD2;
} }
static int fixup_bug(struct pt_regs *regs, int trapnr) int fixup_bug(struct pt_regs *regs, int trapnr)
{ {
if (trapnr != X86_TRAP_UD) if (trapnr != X86_TRAP_UD)
return 0; return 0;

View File

@@ -2742,6 +2742,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF); ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
} }
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }

View File

@@ -5313,6 +5313,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
ctxt->eflags = kvm_get_rflags(vcpu); ctxt->eflags = kvm_get_rflags(vcpu);
ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
ctxt->eip = kvm_rip_read(vcpu); ctxt->eip = kvm_rip_read(vcpu);
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
@@ -5528,36 +5530,25 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
return dr6; return dr6;
} }
static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
{ {
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
/* if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
* rflags is the old, "raw" value of the flags. The new value has kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
* not been saved yet. kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
* kvm_run->debug.arch.exception = DB_VECTOR;
* This is correct even for TF set by the guest, because "the kvm_run->exit_reason = KVM_EXIT_DEBUG;
* processor will not generate this exception after the instruction *r = EMULATE_USER_EXIT;
* that sets the TF flag". } else {
*/ /*
if (unlikely(rflags & X86_EFLAGS_TF)) { * "Certain debug exceptions may clear bit 0-3. The
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { * remaining contents of the DR6 register are never
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | * cleared by the processor".
DR6_RTM; */
kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; vcpu->arch.dr6 &= ~15;
kvm_run->debug.arch.exception = DB_VECTOR; vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
kvm_run->exit_reason = KVM_EXIT_DEBUG; kvm_queue_exception(vcpu, DB_VECTOR);
*r = EMULATE_USER_EXIT;
} else {
/*
* "Certain debug exceptions may clear bit 0-3. The
* remaining contents of the DR6 register are never
* cleared by the processor".
*/
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
kvm_queue_exception(vcpu, DB_VECTOR);
}
} }
} }
@@ -5567,7 +5558,17 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
int r = EMULATE_DONE; int r = EMULATE_DONE;
kvm_x86_ops->skip_emulated_instruction(vcpu); kvm_x86_ops->skip_emulated_instruction(vcpu);
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
/*
* rflags is the old, "raw" value of the flags. The new value has
* not been saved yet.
*
* This is correct even for TF set by the guest, because "the
* processor will not generate this exception after the instruction
* that sets the TF flag".
*/
if (unlikely(rflags & X86_EFLAGS_TF))
kvm_vcpu_do_singlestep(vcpu, &r);
return r == EMULATE_DONE; return r == EMULATE_DONE;
} }
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction); EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
@@ -5726,8 +5727,9 @@ restart:
toggle_interruptibility(vcpu, ctxt->interruptibility); toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false; vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip); kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE) if (r == EMULATE_DONE &&
kvm_vcpu_check_singlestep(vcpu, rflags, &r); (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception || if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) exception_type(ctxt->exception.vector) == EXCPT_TRAP)
__kvm_set_rflags(vcpu, ctxt->eflags); __kvm_set_rflags(vcpu, ctxt->eflags);

View File

@@ -162,6 +162,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
if (fixup_exception(regs, trapnr)) if (fixup_exception(regs, trapnr))
return; return;
if (fixup_bug(regs, trapnr))
return;
fail: fail:
early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n", early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
(unsigned)trapnr, (unsigned long)regs->cs, regs->ip, (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,

View File

@@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
if (mm->get_unmapped_area == arch_get_unmapped_area) if (mm->get_unmapped_area == arch_get_unmapped_area)

View File

@@ -161,16 +161,16 @@ static int page_size_mask;
static void __init probe_page_size_mask(void) static void __init probe_page_size_mask(void)
{ {
#if !defined(CONFIG_KMEMCHECK)
/* /*
* For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
* use small pages. * use small pages.
* This will simplify cpa(), which otherwise needs to support splitting * This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc. * large pages into small in interrupt context, etc.
*/ */
if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
page_size_mask |= 1 << PG_LEVEL_2M; page_size_mask |= 1 << PG_LEVEL_2M;
#endif else
direct_gbpages = 0;
/* Enable PSE if available */ /* Enable PSE if available */
if (boot_cpu_has(X86_FEATURE_PSE)) if (boot_cpu_has(X86_FEATURE_PSE))

View File

@@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
# define PLATFORM_NR_IRQS 0 # define PLATFORM_NR_IRQS 0
#endif #endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) #define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
#if VARIANT_NR_IRQS == 0 #if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { } static inline void variant_init_irq(void) { }

View File

@@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{ {
int irq = irq_find_mapping(NULL, hwirq); int irq = irq_find_mapping(NULL, hwirq);
if (hwirq >= NR_IRQS) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, hwirq);
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW #ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */ /* Debugging check for stack overflow: is there less than 1KB free? */
{ {

View File

@@ -593,8 +593,7 @@ c_show(struct seq_file *f, void *slot)
(ccount_freq/10000) % 100, (ccount_freq/10000) % 100,
loops_per_jiffy/(500000/HZ), loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100); (loops_per_jiffy/(5000/HZ)) % 100);
seq_puts(f, "flags\t\t: "
seq_printf(f,"flags\t\t: "
#if XCHAL_HAVE_NMI #if XCHAL_HAVE_NMI
"nmi " "nmi "
#endif #endif

View File

@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
/* At this point: (!vmm || addr < vmm->vm_end). */ /* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr) if (TASK_SIZE - len < addr)
return -ENOMEM; return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start) if (!vmm || addr + len <= vm_start_gap(vmm))
return addr; return addr;
addr = vmm->vm_end; addr = vmm->vm_end;
if (flags & MAP_SHARED) if (flags & MAP_SHARED)

View File

@@ -118,7 +118,7 @@ SECTIONS
SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48) SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
#endif #endif
@@ -306,13 +306,13 @@ SECTIONS
.UserExceptionVector.literal) .UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal, SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal, .DoubleExceptionVector.literal,
DOUBLEEXC_VECTOR_VADDR - 48, DOUBLEEXC_VECTOR_VADDR - 20,
SIZEOF(.UserExceptionVector.text), SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text) .UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text, SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text, .DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR, DOUBLEEXC_VECTOR_VADDR,
48, 20,
.DoubleExceptionVector.literal) .DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;

View File

@@ -317,8 +317,7 @@ static int __init simdisk_init(void)
if (simdisk_count > MAX_SIMDISK_COUNT) if (simdisk_count > MAX_SIMDISK_COUNT)
simdisk_count = MAX_SIMDISK_COUNT; simdisk_count = MAX_SIMDISK_COUNT;
sddev = kmalloc(simdisk_count * sizeof(struct simdisk), sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL);
GFP_KERNEL);
if (sddev == NULL) if (sddev == NULL)
goto out_unregister; goto out_unregister;

View File

@@ -24,16 +24,18 @@
/* Interrupt configuration. */ /* Interrupt configuration. */
#define PLATFORM_NR_IRQS 10 #define PLATFORM_NR_IRQS 0
/* Default assignment of LX60 devices to external interrupts. */ /* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX #ifdef CONFIG_XTENSA_MX
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM #define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM #define OETH_IRQ XCHAL_EXTINT4_NUM
#define C67X00_IRQ XCHAL_EXTINT8_NUM
#else #else
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM #define DUART16552_INTNUM XCHAL_EXTINT0_NUM
#define OETH_IRQ XCHAL_EXTINT1_NUM #define OETH_IRQ XCHAL_EXTINT1_NUM
#define C67X00_IRQ XCHAL_EXTINT5_NUM
#endif #endif
/* /*
@@ -63,5 +65,5 @@
#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
#define C67X00_SIZE 0x10 #define C67X00_SIZE 0x10
#define C67X00_IRQ 5
#endif /* __XTENSA_XTAVNET_HARDWARE_H */ #endif /* __XTENSA_XTAVNET_HARDWARE_H */

View File

@@ -175,8 +175,8 @@ static struct resource ethoc_res[] = {
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[2] = { /* IRQ number */ [2] = { /* IRQ number */
.start = OETH_IRQ, .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.end = OETH_IRQ, .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
@@ -213,8 +213,8 @@ static struct resource c67x00_res[] = {
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { /* IRQ number */ [1] = { /* IRQ number */
.start = C67X00_IRQ, .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.end = C67X00_IRQ, .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
@@ -247,7 +247,7 @@ static struct resource serial_resource = {
static struct plat_serial8250_port serial_platform_data[] = { static struct plat_serial8250_port serial_platform_data[] = {
[0] = { [0] = {
.mapbase = DUART16552_PADDR, .mapbase = DUART16552_PADDR,
.irq = DUART16552_INTNUM, .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP, UPF_IOREMAP,
.iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,

View File

@@ -68,6 +68,45 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
__blk_mq_sched_assign_ioc(q, rq, bio, ioc); __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
} }
/*
* Mark a hardware queue as needing a restart. For shared queues, maintain
* a count of how many hardware queues are marked for restart.
*/
static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return;
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
struct request_queue *q = hctx->queue;
if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_inc(&q->shared_hctx_restart);
} else
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return false;
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
struct request_queue *q = hctx->queue;
if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_dec(&q->shared_hctx_restart);
} else
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (blk_mq_hctx_has_pending(hctx)) {
blk_mq_run_hw_queue(hctx, true);
return true;
}
return false;
}
struct request *blk_mq_sched_get_request(struct request_queue *q, struct request *blk_mq_sched_get_request(struct request_queue *q,
struct bio *bio, struct bio *bio,
unsigned int op, unsigned int op,
@@ -266,18 +305,6 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
return true; return true;
} }
static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (blk_mq_hctx_has_pending(hctx)) {
blk_mq_run_hw_queue(hctx, true);
return true;
}
}
return false;
}
/** /**
* list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
* @pos: loop cursor. * @pos: loop cursor.
@@ -309,6 +336,13 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
unsigned int i, j; unsigned int i, j;
if (set->flags & BLK_MQ_F_TAG_SHARED) { if (set->flags & BLK_MQ_F_TAG_SHARED) {
/*
* If this is 0, then we know that no hardware queues
* have RESTART marked. We're done.
*/
if (!atomic_read(&queue->shared_hctx_restart))
return;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu_rr(q, queue, &set->tag_list, list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
tag_set_list) { tag_set_list) {

View File

@@ -115,15 +115,6 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
return false; return false;
} }
/*
* Mark a hardware queue as needing a restart.
*/
static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx) static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
{ {
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state); return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);

View File

@@ -2103,20 +2103,30 @@ static void blk_mq_map_swqueue(struct request_queue *q,
} }
} }
/*
* Caller needs to ensure that we're either frozen/quiesced, or that
* the queue isn't live yet.
*/
static void queue_set_hctx_shared(struct request_queue *q, bool shared) static void queue_set_hctx_shared(struct request_queue *q, bool shared)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int i; int i;
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (shared) if (shared) {
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_inc(&q->shared_hctx_restart);
hctx->flags |= BLK_MQ_F_TAG_SHARED; hctx->flags |= BLK_MQ_F_TAG_SHARED;
else } else {
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
atomic_dec(&q->shared_hctx_restart);
hctx->flags &= ~BLK_MQ_F_TAG_SHARED; hctx->flags &= ~BLK_MQ_F_TAG_SHARED;
}
} }
} }
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared) static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set,
bool shared)
{ {
struct request_queue *q; struct request_queue *q;

View File

@@ -777,24 +777,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
} }
/** /**
* blk_release_queue: - release a &struct request_queue when it is no longer needed * __blk_release_queue - release a request queue when it is no longer needed
* @kobj: the kobj belonging to the request queue to be released * @work: pointer to the release_work member of the request queue to be released
* *
* Description: * Description:
* blk_release_queue is the pair to blk_init_queue() or * blk_release_queue is the counterpart of blk_init_queue(). It should be
* blk_queue_make_request(). It should be called when a request queue is * called when a request queue is being released; typically when a block
* being released; typically when a block device is being de-registered. * device is being de-registered. Its primary task it to free the queue
* Currently, its primary task it to free all the &struct request * itself.
* structures that were allocated to the queue and the queue itself.
* *
* Note: * Notes:
* The low level driver must have finished any outstanding requests first * The low level driver must have finished any outstanding requests first
* via blk_cleanup_queue(). * via blk_cleanup_queue().
**/ *
static void blk_release_queue(struct kobject *kobj) * Although blk_release_queue() may be called with preemption disabled,
* __blk_release_queue() may sleep.
*/
static void __blk_release_queue(struct work_struct *work)
{ {
struct request_queue *q = struct request_queue *q = container_of(work, typeof(*q), release_work);
container_of(kobj, struct request_queue, kobj);
if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
blk_stat_remove_callback(q, q->poll_cb); blk_stat_remove_callback(q, q->poll_cb);
@@ -834,6 +835,15 @@ static void blk_release_queue(struct kobject *kobj)
call_rcu(&q->rcu_head, blk_free_queue_rcu); call_rcu(&q->rcu_head, blk_free_queue_rcu);
} }
static void blk_release_queue(struct kobject *kobj)
{
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
INIT_WORK(&q->release_work, __blk_release_queue);
schedule_work(&q->release_work);
}
static const struct sysfs_ops queue_sysfs_ops = { static const struct sysfs_ops queue_sysfs_ops = {
.show = queue_attr_show, .show = queue_attr_show,
.store = queue_attr_store, .store = queue_attr_store,

View File

@@ -416,9 +416,18 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc,
} }
} }
table_desc->validation_count++; if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
if (table_desc->validation_count == 0) { table_desc->validation_count++;
table_desc->validation_count--;
/*
* Detect validation_count overflows to ensure that the warning
* message will only be printed once.
*/
if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
ACPI_WARNING((AE_INFO,
"Table %p, Validation count overflows\n",
table_desc));
}
} }
*out_table = table_desc->pointer; *out_table = table_desc->pointer;
@@ -445,13 +454,20 @@ void acpi_tb_put_table(struct acpi_table_desc *table_desc)
ACPI_FUNCTION_TRACE(acpi_tb_put_table); ACPI_FUNCTION_TRACE(acpi_tb_put_table);
if (table_desc->validation_count == 0) { if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
ACPI_WARNING((AE_INFO, table_desc->validation_count--;
"Table %p, Validation count is zero before decrement\n",
table_desc)); /*
return_VOID; * Detect validation_count underflows to ensure that the warning
* message will only be printed once.
*/
if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
ACPI_WARNING((AE_INFO,
"Table %p, Validation count underflows\n",
table_desc));
return_VOID;
}
} }
table_desc->validation_count--;
if (table_desc->validation_count == 0) { if (table_desc->validation_count == 0) {

View File

@@ -474,15 +474,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
} }
/*
* The end_tag opcode must be followed by a zero byte.
* Although this byte is technically defined to be a checksum,
* in practice, all ASL compilers set this byte to zero.
*/
if (*(aml + 1) != 0) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/* Return the pointer to the end_tag if requested */ /* Return the pointer to the end_tag if requested */
if (!user_function) { if (!user_function) {

View File

@@ -31,6 +31,11 @@
#define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \
(1 << ACPI_IORT_NODE_SMMU_V3)) (1 << ACPI_IORT_NODE_SMMU_V3))
/* Until ACPICA headers cover IORT rev. C */
#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX
#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2
#endif
struct iort_its_msi_chip { struct iort_its_msi_chip {
struct list_head list; struct list_head list;
struct fwnode_handle *fw_node; struct fwnode_handle *fw_node;
@@ -834,6 +839,36 @@ static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
return num_res; return num_res;
} }
static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
{
/*
* Cavium ThunderX2 implementation doesn't not support unique
* irq line. Use single irq line for all the SMMUv3 interrupts.
*/
if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
return false;
/*
* ThunderX2 doesn't support MSIs from the SMMU, so we're checking
* SPI numbers here.
*/
return smmu->event_gsiv == smmu->pri_gsiv &&
smmu->event_gsiv == smmu->gerr_gsiv &&
smmu->event_gsiv == smmu->sync_gsiv;
}
static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
{
/*
* Override the size, for Cavium ThunderX2 implementation
* which doesn't support the page 1 SMMU register space.
*/
if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
return SZ_64K;
return SZ_128K;
}
static void __init arm_smmu_v3_init_resources(struct resource *res, static void __init arm_smmu_v3_init_resources(struct resource *res,
struct acpi_iort_node *node) struct acpi_iort_node *node)
{ {
@@ -844,30 +879,38 @@ static void __init arm_smmu_v3_init_resources(struct resource *res,
smmu = (struct acpi_iort_smmu_v3 *)node->node_data; smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
res[num_res].start = smmu->base_address; res[num_res].start = smmu->base_address;
res[num_res].end = smmu->base_address + SZ_128K - 1; res[num_res].end = smmu->base_address +
arm_smmu_v3_resource_size(smmu) - 1;
res[num_res].flags = IORESOURCE_MEM; res[num_res].flags = IORESOURCE_MEM;
num_res++; num_res++;
if (arm_smmu_v3_is_combined_irq(smmu)) {
if (smmu->event_gsiv)
acpi_iort_register_irq(smmu->event_gsiv, "combined",
ACPI_EDGE_SENSITIVE,
&res[num_res++]);
} else {
if (smmu->event_gsiv) if (smmu->event_gsiv)
acpi_iort_register_irq(smmu->event_gsiv, "eventq", acpi_iort_register_irq(smmu->event_gsiv, "eventq",
ACPI_EDGE_SENSITIVE, ACPI_EDGE_SENSITIVE,
&res[num_res++]); &res[num_res++]);
if (smmu->pri_gsiv) if (smmu->pri_gsiv)
acpi_iort_register_irq(smmu->pri_gsiv, "priq", acpi_iort_register_irq(smmu->pri_gsiv, "priq",
ACPI_EDGE_SENSITIVE, ACPI_EDGE_SENSITIVE,
&res[num_res++]); &res[num_res++]);
if (smmu->gerr_gsiv) if (smmu->gerr_gsiv)
acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
ACPI_EDGE_SENSITIVE, ACPI_EDGE_SENSITIVE,
&res[num_res++]); &res[num_res++]);
if (smmu->sync_gsiv) if (smmu->sync_gsiv)
acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
ACPI_EDGE_SENSITIVE, ACPI_EDGE_SENSITIVE,
&res[num_res++]); &res[num_res++]);
}
} }
static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)

View File

@@ -1428,6 +1428,37 @@ static void acpi_init_coherency(struct acpi_device *adev)
adev->flags.coherent_dma = cca; adev->flags.coherent_dma = cca;
} }
static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
{
bool *is_spi_i2c_slave_p = data;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return 1;
/*
* devices that are connected to UART still need to be enumerated to
* platform bus
*/
if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
*is_spi_i2c_slave_p = true;
/* no need to do more checking */
return -1;
}
static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
{
struct list_head resource_list;
bool is_spi_i2c_slave = false;
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
return is_spi_i2c_slave;
}
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta) int type, unsigned long long sta)
{ {
@@ -1443,6 +1474,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
acpi_bus_get_flags(device); acpi_bus_get_flags(device);
device->flags.match_driver = false; device->flags.match_driver = false;
device->flags.initialized = true; device->flags.initialized = true;
device->flags.spi_i2c_slave = acpi_is_spi_i2c_slave(device);
acpi_device_clear_enumerated(device); acpi_device_clear_enumerated(device);
device_initialize(&device->dev); device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true); dev_set_uevent_suppress(&device->dev, true);
@@ -1727,38 +1759,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
return AE_OK; return AE_OK;
} }
static int acpi_check_spi_i2c_slave(struct acpi_resource *ares, void *data)
{
bool *is_spi_i2c_slave_p = data;
if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
return 1;
/*
* devices that are connected to UART still need to be enumerated to
* platform bus
*/
if (ares->data.common_serial_bus.type != ACPI_RESOURCE_SERIAL_TYPE_UART)
*is_spi_i2c_slave_p = true;
/* no need to do more checking */
return -1;
}
static void acpi_default_enumeration(struct acpi_device *device) static void acpi_default_enumeration(struct acpi_device *device)
{ {
struct list_head resource_list;
bool is_spi_i2c_slave = false;
/* /*
* Do not enumerate SPI/I2C slaves as they will be enumerated by their * Do not enumerate SPI/I2C slaves as they will be enumerated by their
* respective parents. * respective parents.
*/ */
INIT_LIST_HEAD(&resource_list); if (!device->flags.spi_i2c_slave) {
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
if (!is_spi_i2c_slave) {
acpi_create_platform_device(device, NULL); acpi_create_platform_device(device, NULL);
acpi_device_set_enumerated(device); acpi_device_set_enumerated(device);
} else { } else {
@@ -1854,7 +1861,7 @@ static void acpi_bus_attach(struct acpi_device *device)
return; return;
device->flags.match_driver = true; device->flags.match_driver = true;
if (ret > 0) { if (ret > 0 && !device->flags.spi_i2c_slave) {
acpi_device_set_enumerated(device); acpi_device_set_enumerated(device);
goto ok; goto ok;
} }
@@ -1863,10 +1870,10 @@ static void acpi_bus_attach(struct acpi_device *device)
if (ret < 0) if (ret < 0)
return; return;
if (device->pnp.type.platform_id) if (!device->pnp.type.platform_id && !device->flags.spi_i2c_slave)
acpi_default_enumeration(device);
else
acpi_device_set_enumerated(device); acpi_device_set_enumerated(device);
else
acpi_default_enumeration(device);
ok: ok:
list_for_each_entry(child, &device->children, node) list_for_each_entry(child, &device->children, node)

View File

@@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
unsigned long timeout; unsigned long timeout;
int ret; int ret;
xen_blkif_get(blkif);
set_freezable(); set_freezable();
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
if (try_to_freeze()) if (try_to_freeze())
@@ -665,7 +663,6 @@ purge_gnt_list:
print_stats(ring); print_stats(ring);
ring->xenblkd = NULL; ring->xenblkd = NULL;
xen_blkif_put(blkif);
return 0; return 0;
} }
@@ -1436,34 +1433,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
static void make_response(struct xen_blkif_ring *ring, u64 id, static void make_response(struct xen_blkif_ring *ring, u64 id,
unsigned short op, int st) unsigned short op, int st)
{ {
struct blkif_response resp; struct blkif_response *resp;
unsigned long flags; unsigned long flags;
union blkif_back_rings *blk_rings; union blkif_back_rings *blk_rings;
int notify; int notify;
resp.id = id;
resp.operation = op;
resp.status = st;
spin_lock_irqsave(&ring->blk_ring_lock, flags); spin_lock_irqsave(&ring->blk_ring_lock, flags);
blk_rings = &ring->blk_rings; blk_rings = &ring->blk_rings;
/* Place on the response ring for the relevant domain. */ /* Place on the response ring for the relevant domain. */
switch (ring->blkif->blk_protocol) { switch (ring->blkif->blk_protocol) {
case BLKIF_PROTOCOL_NATIVE: case BLKIF_PROTOCOL_NATIVE:
memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->native,
&resp, sizeof(resp)); blk_rings->native.rsp_prod_pvt);
break; break;
case BLKIF_PROTOCOL_X86_32: case BLKIF_PROTOCOL_X86_32:
memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->x86_32,
&resp, sizeof(resp)); blk_rings->x86_32.rsp_prod_pvt);
break; break;
case BLKIF_PROTOCOL_X86_64: case BLKIF_PROTOCOL_X86_64:
memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), resp = RING_GET_RESPONSE(&blk_rings->x86_64,
&resp, sizeof(resp)); blk_rings->x86_64.rsp_prod_pvt);
break; break;
default: default:
BUG(); BUG();
} }
resp->id = id;
resp->operation = op;
resp->status = st;
blk_rings->common.rsp_prod_pvt++; blk_rings->common.rsp_prod_pvt++;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
spin_unlock_irqrestore(&ring->blk_ring_lock, flags); spin_unlock_irqrestore(&ring->blk_ring_lock, flags);

View File

@@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues;
struct blkif_common_request { struct blkif_common_request {
char dummy; char dummy;
}; };
struct blkif_common_response {
char dummy; /* i386 protocol version */
};
struct blkif_x86_32_request_rw { struct blkif_x86_32_request_rw {
uint8_t nr_segments; /* number of segments */ uint8_t nr_segments; /* number of segments */
@@ -129,14 +128,6 @@ struct blkif_x86_32_request {
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
/* i386 protocol version */
#pragma pack(push, 4)
struct blkif_x86_32_response {
uint64_t id; /* copied from request */
uint8_t operation; /* copied from request */
int16_t status; /* BLKIF_RSP_??? */
};
#pragma pack(pop)
/* x86_64 protocol version */ /* x86_64 protocol version */
struct blkif_x86_64_request_rw { struct blkif_x86_64_request_rw {
@@ -193,18 +184,12 @@ struct blkif_x86_64_request {
} u; } u;
} __attribute__((__packed__)); } __attribute__((__packed__));
struct blkif_x86_64_response {
uint64_t __attribute__((__aligned__(8))) id;
uint8_t operation; /* copied from request */
int16_t status; /* BLKIF_RSP_??? */
};
DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
struct blkif_common_response); struct blkif_response);
DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
struct blkif_x86_32_response); struct blkif_response __packed);
DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
struct blkif_x86_64_response); struct blkif_response);
union blkif_back_rings { union blkif_back_rings {
struct blkif_back_ring native; struct blkif_back_ring native;
@@ -281,6 +266,7 @@ struct xen_blkif_ring {
wait_queue_head_t wq; wait_queue_head_t wq;
atomic_t inflight; atomic_t inflight;
bool active;
/* One thread per blkif ring. */ /* One thread per blkif ring. */
struct task_struct *xenblkd; struct task_struct *xenblkd;
unsigned int waiting_reqs; unsigned int waiting_reqs;

View File

@@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
init_waitqueue_head(&ring->shutdown_wq); init_waitqueue_head(&ring->shutdown_wq);
ring->blkif = blkif; ring->blkif = blkif;
ring->st_print = jiffies; ring->st_print = jiffies;
xen_blkif_get(blkif); ring->active = true;
} }
return 0; return 0;
@@ -249,10 +249,12 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
struct xen_blkif_ring *ring = &blkif->rings[r]; struct xen_blkif_ring *ring = &blkif->rings[r];
unsigned int i = 0; unsigned int i = 0;
if (!ring->active)
continue;
if (ring->xenblkd) { if (ring->xenblkd) {
kthread_stop(ring->xenblkd); kthread_stop(ring->xenblkd);
wake_up(&ring->shutdown_wq); wake_up(&ring->shutdown_wq);
ring->xenblkd = NULL;
} }
/* The above kthread_stop() guarantees that at this point we /* The above kthread_stop() guarantees that at this point we
@@ -296,7 +298,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
BUG_ON(ring->free_pages_num != 0); BUG_ON(ring->free_pages_num != 0);
BUG_ON(ring->persistent_gnt_c != 0); BUG_ON(ring->persistent_gnt_c != 0);
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
xen_blkif_put(blkif); ring->active = false;
} }
blkif->nr_ring_pages = 0; blkif->nr_ring_pages = 0;
/* /*
@@ -312,9 +314,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
static void xen_blkif_free(struct xen_blkif *blkif) static void xen_blkif_free(struct xen_blkif *blkif)
{ {
WARN_ON(xen_blkif_disconnect(blkif));
xen_blkif_disconnect(blkif);
xen_vbd_free(&blkif->vbd); xen_vbd_free(&blkif->vbd);
kfree(blkif->be->mode);
kfree(blkif->be);
/* Make sure everything is drained before shutting down */ /* Make sure everything is drained before shutting down */
kmem_cache_free(xen_blkif_cachep, blkif); kmem_cache_free(xen_blkif_cachep, blkif);
@@ -511,8 +514,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
xen_blkif_put(be->blkif); xen_blkif_put(be->blkif);
} }
kfree(be->mode);
kfree(be);
return 0; return 0;
} }

View File

@@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp; p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--; cp++; crng_init_cnt++; len--;
} }
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
invalidate_batched_entropy(); invalidate_batched_entropy();
crng_init = 1; crng_init = 1;
wake_up_interruptible(&crng_init_wait); wake_up_interruptible(&crng_init_wait);
pr_notice("random: fast init done\n"); pr_notice("random: fast init done\n");
} }
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 1; return 1;
} }
@@ -841,6 +841,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
} }
memzero_explicit(&buf, sizeof(buf)); memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies; crng->init_time = jiffies;
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng == &primary_crng && crng_init < 2) { if (crng == &primary_crng && crng_init < 2) {
invalidate_batched_entropy(); invalidate_batched_entropy();
crng_init = 2; crng_init = 2;
@@ -848,7 +849,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
wake_up_interruptible(&crng_init_wait); wake_up_interruptible(&crng_init_wait);
pr_notice("random: crng init done\n"); pr_notice("random: crng init done\n");
} }
spin_unlock_irqrestore(&primary_crng.lock, flags);
} }
static inline void crng_wait_ready(void) static inline void crng_wait_ready(void)
@@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void) u64 get_random_u64(void)
{ {
u64 ret; u64 ret;
bool use_lock = crng_init < 2; bool use_lock = READ_ONCE(crng_init) < 2;
unsigned long flags; unsigned long flags = 0;
struct batched_entropy *batch; struct batched_entropy *batch;
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
@@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
u32 get_random_u32(void) u32 get_random_u32(void)
{ {
u32 ret; u32 ret;
bool use_lock = crng_init < 2; bool use_lock = READ_ONCE(crng_init) < 2;
unsigned long flags; unsigned long flags = 0;
struct batched_entropy *batch; struct batched_entropy *batch;
if (arch_get_random_int(&ret)) if (arch_get_random_int(&ret))

View File

@@ -14,6 +14,7 @@ config COMMON_CLK_MESON8B
config COMMON_CLK_GXBB config COMMON_CLK_GXBB
bool bool
depends on COMMON_CLK_AMLOGIC depends on COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
help help
Support for the clock controller on AmLogic S905 devices, aka gxbb. Support for the clock controller on AmLogic S905 devices, aka gxbb.
Say Y if you want peripherals and CPU frequency scaling to work. Say Y if you want peripherals and CPU frequency scaling to work.

View File

@@ -156,6 +156,7 @@ config SUN8I_R_CCU
bool "Support for Allwinner SoCs' PRCM CCUs" bool "Support for Allwinner SoCs' PRCM CCUs"
select SUNXI_CCU_DIV select SUNXI_CCU_DIV
select SUNXI_CCU_GATE select SUNXI_CCU_GATE
select SUNXI_CCU_MP
default MACH_SUN8I || (ARCH_SUNXI && ARM64) default MACH_SUN8I || (ARCH_SUNXI && ARM64)
endif endif

View File

@@ -31,7 +31,9 @@
#define CLK_PLL_VIDEO0_2X 8 #define CLK_PLL_VIDEO0_2X 8
#define CLK_PLL_VE 9 #define CLK_PLL_VE 9
#define CLK_PLL_DDR0 10 #define CLK_PLL_DDR0 10
#define CLK_PLL_PERIPH0 11
/* PLL_PERIPH0 exported for PRCM */
#define CLK_PLL_PERIPH0_2X 12 #define CLK_PLL_PERIPH0_2X 12
#define CLK_PLL_PERIPH1 13 #define CLK_PLL_PERIPH1 13
#define CLK_PLL_PERIPH1_2X 14 #define CLK_PLL_PERIPH1_2X 14

View File

@@ -243,7 +243,7 @@ static SUNXI_CCU_GATE(ahb_ss_clk, "ahb-ss", "ahb",
static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb", static SUNXI_CCU_GATE(ahb_dma_clk, "ahb-dma", "ahb",
0x060, BIT(6), 0); 0x060, BIT(6), 0);
static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb", static SUNXI_CCU_GATE(ahb_bist_clk, "ahb-bist", "ahb",
0x060, BIT(6), 0); 0x060, BIT(7), 0);
static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb", static SUNXI_CCU_GATE(ahb_mmc0_clk, "ahb-mmc0", "ahb",
0x060, BIT(8), 0); 0x060, BIT(8), 0);
static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb", static SUNXI_CCU_GATE(ahb_mmc1_clk, "ahb-mmc1", "ahb",

View File

@@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents,
0x12c, 0, 4, 24, 3, BIT(31), 0x12c, 0, 4, 24, 3, BIT(31),
CLK_SET_RATE_PARENT); CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents,
0x12c, 0, 4, 24, 3, BIT(31), 0x130, 0, 4, 24, 3, BIT(31),
CLK_SET_RATE_PARENT); CLK_SET_RATE_PARENT);
static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1", static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1",

View File

@@ -29,7 +29,9 @@
#define CLK_PLL_VIDEO 6 #define CLK_PLL_VIDEO 6
#define CLK_PLL_VE 7 #define CLK_PLL_VE 7
#define CLK_PLL_DDR 8 #define CLK_PLL_DDR 8
#define CLK_PLL_PERIPH0 9
/* PLL_PERIPH0 exported for PRCM */
#define CLK_PLL_PERIPH0_2X 10 #define CLK_PLL_PERIPH0_2X 10
#define CLK_PLL_GPU 11 #define CLK_PLL_GPU 11
#define CLK_PLL_PERIPH1 12 #define CLK_PLL_PERIPH1 12

View File

@@ -537,7 +537,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = {
[RST_BUS_EMAC] = { 0x2c0, BIT(17) }, [RST_BUS_EMAC] = { 0x2c0, BIT(17) },
[RST_BUS_HSTIMER] = { 0x2c0, BIT(19) }, [RST_BUS_HSTIMER] = { 0x2c0, BIT(19) },
[RST_BUS_SPI0] = { 0x2c0, BIT(20) }, [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
[RST_BUS_OTG] = { 0x2c0, BIT(23) }, [RST_BUS_OTG] = { 0x2c0, BIT(24) },
[RST_BUS_EHCI0] = { 0x2c0, BIT(26) }, [RST_BUS_EHCI0] = { 0x2c0, BIT(26) },
[RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, [RST_BUS_OHCI0] = { 0x2c0, BIT(29) },

View File

@@ -1209,9 +1209,9 @@ arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
return 0; return 0;
} }
rate = readl_relaxed(frame + CNTFRQ); rate = readl_relaxed(base + CNTFRQ);
iounmap(frame); iounmap(base);
return rate; return rate;
} }

Some files were not shown because too many files have changed in this diff Show More