Merge 4.8-rc5 into usb-testing

We want the USB fixes in here for testing and merge issues.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman
2016-09-05 08:07:58 +02:00
568 changed files with 5007 additions and 2819 deletions

View File

@@ -158,6 +158,8 @@ Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com> Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com> Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com> Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
Takashi YOSHII <takashi.yoshii.zj@renesas.com> Takashi YOSHII <takashi.yoshii.zj@renesas.com>
Yusuke Goda <goda.yusuke@renesas.com> Yusuke Goda <goda.yusuke@renesas.com>
Gustavo Padovan <gustavo@las.ic.unicamp.br> Gustavo Padovan <gustavo@las.ic.unicamp.br>

View File

@@ -1,7 +1,7 @@
# Note: This documents additional properties of any device beyond what # Note: This documents additional properties of any device beyond what
# is documented in Documentation/sysfs-rules.txt # is documented in Documentation/sysfs-rules.txt
What: /sys/devices/*/of_path What: /sys/devices/*/of_node
Date: February 2015 Date: February 2015
Contact: Device Tree mailing list <devicetree@vger.kernel.org> Contact: Device Tree mailing list <devicetree@vger.kernel.org>
Description: Description:

View File

@@ -94,14 +94,11 @@ has a requirements for a minimum number of vectors the driver can pass a
min_vecs argument set to this limit, and the PCI core will return -ENOSPC min_vecs argument set to this limit, and the PCI core will return -ENOSPC
if it can't meet the minimum number of vectors. if it can't meet the minimum number of vectors.
The flags argument should normally be set to 0, but can be used to pass the The flags argument is used to specify which type of interrupt can be used
PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support by the device and the driver (PCI_IRQ_LEGACY, PCI_IRQ_MSI, PCI_IRQ_MSIX).
MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in A convenient short-hand (PCI_IRQ_ALL_TYPES) is also available to ask for
case the device does not support legacy interrupt lines. any possible kind of interrupt. If the PCI_IRQ_AFFINITY flag is set,
pci_alloc_irq_vectors() will spread the interrupts around the available CPUs.
By default this function will spread the interrupts around the available
CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
flag.
To get the Linux IRQ numbers passed to request_irq() and free_irq() and the To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
vectors, use the following function: vectors, use the following function:
@@ -131,7 +128,7 @@ larger than the number supported by the device it will automatically be
capped to the supported limit, so there is no need to query the number of capped to the supported limit, so there is no need to query the number of
vectors supported beforehand: vectors supported beforehand:
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0); nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_ALL_TYPES)
if (nvec < 0) if (nvec < 0)
goto out_err; goto out_err;
@@ -140,7 +137,7 @@ interrupts it can request a particular number of interrupts by passing that
number to pci_alloc_irq_vectors() function as both 'min_vecs' and number to pci_alloc_irq_vectors() function as both 'min_vecs' and
'max_vecs' parameters: 'max_vecs' parameters:
ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0); ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_ALL_TYPES);
if (ret < 0) if (ret < 0)
goto out_err; goto out_err;
@@ -148,15 +145,14 @@ The most notorious example of the request type described above is enabling
the single MSI mode for a device. It could be done by passing two 1s as the single MSI mode for a device. It could be done by passing two 1s as
'min_vecs' and 'max_vecs': 'min_vecs' and 'max_vecs':
ret = pci_alloc_irq_vectors(pdev, 1, 1, 0); ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0) if (ret < 0)
goto out_err; goto out_err;
Some devices might not support using legacy line interrupts, in which case Some devices might not support using legacy line interrupts, in which case
the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform the driver can specify that only MSI or MSI-X is acceptable:
can't provide MSI or MSI-X interrupts:
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY); nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nvec < 0) if (nvec < 0)
goto out_err; goto out_err;

View File

@@ -124,7 +124,6 @@ initialization with a pointer to a structure describing the driver
The ID table is an array of struct pci_device_id entries ending with an The ID table is an array of struct pci_device_id entries ending with an
all-zero entry. Definitions with static const are generally preferred. all-zero entry. Definitions with static const are generally preferred.
Use of the deprecated macro DEFINE_PCI_DEVICE_TABLE should be avoided.
Each entry consists of: Each entry consists of:

View File

@@ -53,6 +53,7 @@ stable kernels.
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 | | ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
| ARM | Cortex-A57 | #852523 | N/A | | ARM | Cortex-A57 | #852523 | N/A |
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 | | ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
| ARM | Cortex-A72 | #853709 | N/A |
| ARM | MMU-500 | #841119,#826419 | N/A | | ARM | MMU-500 | #841119,#826419 | N/A |
| | | | | | | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |

View File

@@ -16,6 +16,11 @@ Required properties:
- vref-supply: The regulator supply ADC reference voltage. - vref-supply: The regulator supply ADC reference voltage.
- #io-channel-cells: Should be 1, see ../iio-bindings.txt - #io-channel-cells: Should be 1, see ../iio-bindings.txt
Optional properties:
- resets: Must contain an entry for each entry in reset-names if need support
this option. See ../reset/reset.txt for details.
- reset-names: Must include the name "saradc-apb".
Example: Example:
saradc: saradc@2006c000 { saradc: saradc@2006c000 {
compatible = "rockchip,saradc"; compatible = "rockchip,saradc";
@@ -23,6 +28,8 @@ Example:
interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk"; clock-names = "saradc", "apb_pclk";
resets = <&cru SRST_SARADC>;
reset-names = "saradc-apb";
#io-channel-cells = <1>; #io-channel-cells = <1>;
vref-supply = <&vcc18>; vref-supply = <&vcc18>;
}; };

View File

@@ -42,9 +42,6 @@ Optional properties:
- auto-flow-control: one way to enable automatic flow control support. The - auto-flow-control: one way to enable automatic flow control support. The
driver is allowed to detect support for the capability even without this driver is allowed to detect support for the capability even without this
property. property.
- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
line respectively. It will use specified GPIO instead of the peripheral
function pin for the UART feature. If unsure, don't specify this property.
Note: Note:
* fsl,ns16550: * fsl,ns16550:
@@ -66,19 +63,3 @@ Example:
interrupts = <10>; interrupts = <10>;
reg-shift = <2>; reg-shift = <2>;
}; };
Example for OMAP UART using GPIO-based modem control signals:
uart4: serial@49042000 {
compatible = "ti,omap3-uart";
reg = <0x49042000 0x400>;
interrupts = <80>;
ti,hwmods = "uart4";
clock-frequency = <48000000>;
cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
};

View File

@@ -8,8 +8,6 @@ Required properties:
- interrupts: Interrupt number for McPDM - interrupts: Interrupt number for McPDM
- interrupt-parent: The parent interrupt controller - interrupt-parent: The parent interrupt controller
- ti,hwmods: Name of the hwmod associated to the McPDM - ti,hwmods: Name of the hwmod associated to the McPDM
- clocks: phandle for the pdmclk provider, likely <&twl6040>
- clock-names: Must be "pdmclk"
Example: Example:
@@ -21,11 +19,3 @@ mcpdm: mcpdm@40132000 {
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
ti,hwmods = "mcpdm"; ti,hwmods = "mcpdm";
}; };
In board DTS file the pdmclk needs to be added:
&mcpdm {
clocks = <&twl6040>;
clock-names = "pdmclk";
status = "okay";
};

View File

@@ -183,12 +183,10 @@ The copy_up operation essentially creates a new, identical file and
moves it over to the old name. The new file may be on a different moves it over to the old name. The new file may be on a different
filesystem, so both st_dev and st_ino of the file may change. filesystem, so both st_dev and st_ino of the file may change.
Any open files referring to this inode will access the old data and Any open files referring to this inode will access the old data.
metadata. Similarly any file locks obtained before copy_up will not
apply to the copied up file.
On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and Any file locks (and leases) obtained before copy_up will not apply
fsetxattr(2) will fail with EROFS. to the copied up file.
If a file with multiple hard links is copied up, then this will If a file with multiple hard links is copied up, then this will
"break" the link. Changes will not be propagated to other names "break" the link. Changes will not be propagated to other names

View File

@@ -3032,6 +3032,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
PAGE_SIZE is used as alignment. PAGE_SIZE is used as alignment.
PCI-PCI bridge can be specified, if resource PCI-PCI bridge can be specified, if resource
windows need to be expanded. windows need to be expanded.
To specify the alignment for several
instances of a device, the PCI vendor,
device, subvendor, and subdevice may be
specified, e.g., 4096@pci:8086:9c22:103c:198f
ecrc= Enable/disable PCIe ECRC (transaction layer ecrc= Enable/disable PCIe ECRC (transaction layer
end-to-end CRC checking). end-to-end CRC checking).
bios: Use BIOS/firmware settings. This is the bios: Use BIOS/firmware settings. This is the

View File

@@ -587,26 +587,6 @@ of DSA, would be the its port-based VLAN, used by the associated bridge device.
TODO TODO
==== ====
The platform device problem
---------------------------
DSA is currently implemented as a platform device driver which is far from ideal
as was discussed in this thread:
http://permalink.gmane.org/gmane.linux.network/329848
This basically prevents the device driver model to be properly used and applied,
and support non-MDIO, non-MMIO Ethernet connected switches.
Another problem with the platform device driver approach is that it prevents the
use of a modular switch drivers build due to a circular dependency, illustrated
here:
http://comments.gmane.org/gmane.linux.network/345803
Attempts of reworking this has been done here:
https://lwn.net/Articles/643149/
Making SWITCHDEV and DSA converge towards an unified codebase Making SWITCHDEV and DSA converge towards an unified codebase
------------------------------------------------------------- -------------------------------------------------------------

View File

@@ -167,6 +167,8 @@ signal will be rolled back anyway.
For signals taken in non-TM or suspended mode, we use the For signals taken in non-TM or suspended mode, we use the
normal/non-checkpointed stack pointer. normal/non-checkpointed stack pointer.
Any transaction initiated inside a sighandler and suspended on return
from the sighandler to the kernel will get reclaimed and discarded.
Failure cause codes used by kernel Failure cause codes used by kernel
================================== ==================================

View File

@@ -80,6 +80,10 @@ functionality of their platform when planning to use this driver:
III. Module parameters III. Module parameters
- 'dma_timeout' - DMA transfer completion timeout (in msec, default value 3000).
This parameter set a maximum completion wait time for SYNC mode DMA
transfer requests and for RIO_WAIT_FOR_ASYNC ioctl requests.
- 'dbg_level' - This parameter allows to control amount of debug information - 'dbg_level' - This parameter allows to control amount of debug information
generated by this device driver. This parameter is formed by set of generated by this device driver. This parameter is formed by set of
bit masks that correspond to the specific functional blocks. bit masks that correspond to the specific functional blocks.

View File

@@ -798,6 +798,7 @@ M: Laura Abbott <labbott@redhat.com>
M: Sumit Semwal <sumit.semwal@linaro.org> M: Sumit Semwal <sumit.semwal@linaro.org>
L: devel@driverdev.osuosl.org L: devel@driverdev.osuosl.org
S: Supported S: Supported
F: Documentation/devicetree/bindings/staging/ion/
F: drivers/staging/android/ion F: drivers/staging/android/ion
F: drivers/staging/android/uapi/ion.h F: drivers/staging/android/uapi/ion.h
F: drivers/staging/android/uapi/ion_test.h F: drivers/staging/android/uapi/ion_test.h
@@ -881,6 +882,15 @@ S: Supported
F: drivers/gpu/drm/arc/ F: drivers/gpu/drm/arc/
F: Documentation/devicetree/bindings/display/snps,arcpgu.txt F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
ARM ARCHITECTED TIMER DRIVER
M: Mark Rutland <mark.rutland@arm.com>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/include/asm/arch_timer.h
F: arch/arm64/include/asm/arch_timer.h
F: drivers/clocksource/arm_arch_timer.c
ARM HDLCD DRM DRIVER ARM HDLCD DRM DRIVER
M: Liviu Dudau <liviu.dudau@arm.com> M: Liviu Dudau <liviu.dudau@arm.com>
S: Supported S: Supported
@@ -3238,7 +3248,7 @@ F: kernel/cpuset.c
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org> M: Johannes Weiner <hannes@cmpxchg.org>
M: Michal Hocko <mhocko@kernel.org> M: Michal Hocko <mhocko@kernel.org>
M: Vladimir Davydov <vdavydov@virtuozzo.com> M: Vladimir Davydov <vdavydov.dev@gmail.com>
L: cgroups@vger.kernel.org L: cgroups@vger.kernel.org
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
@@ -7661,7 +7671,7 @@ L: linux-rdma@vger.kernel.org
S: Supported S: Supported
W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
Q: http://patchwork.kernel.org/project/linux-rdma/list/ Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/hw/rxe/ F: drivers/infiniband/sw/rxe/
F: include/uapi/rdma/rdma_user_rxe.h F: include/uapi/rdma/rdma_user_rxe.h
MEMBARRIER SUPPORT MEMBARRIER SUPPORT
@@ -11223,12 +11233,8 @@ S: Odd Fixes
F: drivers/staging/vt665?/ F: drivers/staging/vt665?/
STAGING - WILC1000 WIFI DRIVER STAGING - WILC1000 WIFI DRIVER
M: Johnny Kim <johnny.kim@atmel.com> M: Aditya Shankar <aditya.shankar@microchip.com>
M: Austin Shin <austin.shin@atmel.com> M: Ganesh Krishna <ganesh.krishna@microchip.com>
M: Chris Park <chris.park@atmel.com>
M: Tony Cho <tony.cho@atmel.com>
M: Glen Lee <glen.lee@atmel.com>
M: Leo Kim <leo.kim@atmel.com>
L: linux-wireless@vger.kernel.org L: linux-wireless@vger.kernel.org
S: Supported S: Supported
F: drivers/staging/wilc1000/ F: drivers/staging/wilc1000/

View File

@@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 8 PATCHLEVEL = 8
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc3 EXTRAVERSION = -rc5
NAME = Psychotic Stoned Sheep NAME = Psychotic Stoned Sheep
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@@ -142,7 +142,7 @@
#ifdef CONFIG_ARC_CURR_IN_REG #ifdef CONFIG_ARC_CURR_IN_REG
; Retrieve orig r25 and save it with rest of callee_regs ; Retrieve orig r25 and save it with rest of callee_regs
ld.as r12, [r12, PT_user_r25] ld r12, [r12, PT_user_r25]
PUSH r12 PUSH r12
#else #else
PUSH r25 PUSH r25
@@ -198,7 +198,7 @@
; SP is back to start of pt_regs ; SP is back to start of pt_regs
#ifdef CONFIG_ARC_CURR_IN_REG #ifdef CONFIG_ARC_CURR_IN_REG
st.as r12, [sp, PT_user_r25] st r12, [sp, PT_user_r25]
#endif #endif
.endm .endm

View File

@@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
.endm .endm
.macro IRQ_ENABLE scratch .macro IRQ_ENABLE scratch
TRACE_ASM_IRQ_ENABLE
lr \scratch, [status32] lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK) or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch flag \scratch
TRACE_ASM_IRQ_ENABLE
.endm .endm
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */

View File

@@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)

View File

@@ -13,8 +13,15 @@
/* Machine specific ELF Hdr flags */ /* Machine specific ELF Hdr flags */
#define EF_ARC_OSABI_MSK 0x00000f00 #define EF_ARC_OSABI_MSK 0x00000f00
#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */ #define EF_ARC_OSABI_V3 0x00000300 /* v3 (no legacy syscalls) */
#define EF_ARC_OSABI_V4 0x00000400 /* v4 (64bit data any reg align) */
#if __GNUC__ < 6
#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V3
#else
#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V4
#endif
typedef unsigned long elf_greg_t; typedef unsigned long elf_greg_t;
typedef unsigned long elf_fpregset_t; typedef unsigned long elf_fpregset_t;

View File

@@ -28,6 +28,7 @@ extern void __muldf3(void);
extern void __divdf3(void); extern void __divdf3(void);
extern void __floatunsidf(void); extern void __floatunsidf(void);
extern void __floatunsisf(void); extern void __floatunsisf(void);
extern void __udivdi3(void);
EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__ashrdi3);
@@ -45,6 +46,7 @@ EXPORT_SYMBOL(__muldf3);
EXPORT_SYMBOL(__divdf3); EXPORT_SYMBOL(__divdf3);
EXPORT_SYMBOL(__floatunsidf); EXPORT_SYMBOL(__floatunsidf);
EXPORT_SYMBOL(__floatunsisf); EXPORT_SYMBOL(__floatunsisf);
EXPORT_SYMBOL(__udivdi3);
/* ARC optimised assembler routines */ /* ARC optimised assembler routines */
EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memset);

View File

@@ -199,7 +199,7 @@ int elf_check_arch(const struct elf32_hdr *x)
} }
eflags = x->e_flags; eflags = x->e_flags;
if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) { if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
pr_err("ABI mismatch - you need newer toolchain\n"); pr_err("ABI mismatch - you need newer toolchain\n");
force_sigsegv(SIGSEGV, current); force_sigsegv(SIGSEGV, current);
return 0; return 0;

View File

@@ -291,8 +291,10 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
cpu->dccm.base_addr, TO_KB(cpu->dccm.sz), cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
cpu->iccm.base_addr, TO_KB(cpu->iccm.sz)); cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
n += scnprintf(buf + n, len - n, n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
"OS ABI [v3]\t: no-legacy-syscalls\n"); EF_ARC_OSABI_CURRENT >> 8,
EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
"no-legacy-syscalls" : "64-bit data any register aligned");
return buf; return buf;
} }

View File

@@ -921,6 +921,15 @@ void arc_cache_init(void)
printk(arc_cache_mumbojumbo(0, str, sizeof(str))); printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
/*
* Only master CPU needs to execute rest of function:
* - Assume SMP so all cores will have same cache config so
* any geomtry checks will be same for all
* - IOC setup / dma callbacks only need to be setup once
*/
if (cpu)
return;
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;

View File

@@ -61,6 +61,7 @@ void *kmap(struct page *page)
return kmap_high(page); return kmap_high(page);
} }
EXPORT_SYMBOL(kmap);
void *kmap_atomic(struct page *page) void *kmap_atomic(struct page *page)
{ {

View File

@@ -197,6 +197,8 @@
clock-names = "saradc", "apb_pclk"; clock-names = "saradc", "apb_pclk";
interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
#io-channel-cells = <1>; #io-channel-cells = <1>;
resets = <&cru SRST_SARADC>;
reset-names = "saradc-apb";
status = "disabled"; status = "disabled";
}; };

View File

@@ -279,6 +279,8 @@
#io-channel-cells = <1>; #io-channel-cells = <1>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk"; clock-names = "saradc", "apb_pclk";
resets = <&cru SRST_SARADC>;
reset-names = "saradc-apb";
status = "disabled"; status = "disabled";
}; };

View File

@@ -399,6 +399,8 @@
#io-channel-cells = <1>; #io-channel-cells = <1>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk"; clock-names = "saradc", "apb_pclk";
resets = <&cru SRST_SARADC>;
reset-names = "saradc-apb";
status = "disabled"; status = "disabled";
}; };

View File

@@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
smp_rmb(); smp_rmb();
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
if (is_error_pfn(pfn)) if (is_error_noslot_pfn(pfn))
return -EFAULT; return -EFAULT;
if (kvm_is_device_pfn(pfn)) { if (kvm_is_device_pfn(pfn)) {

View File

@@ -83,7 +83,8 @@ static struct resource smc91x_resources[] = {
}; };
static struct smc91x_platdata smc91x_platdata = { static struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT, .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_USE_DMA | SMC91X_NOWAIT,
}; };
static struct platform_device smc91x_device = { static struct platform_device smc91x_device = {

View File

@@ -120,7 +120,8 @@ static struct resource smc91x_resources[] = {
}; };
static struct smc91x_platdata xcep_smc91x_info = { static struct smc91x_platdata xcep_smc91x_info = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA, .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT | SMC91X_USE_DMA,
}; };
static struct platform_device smc91x_device = { static struct platform_device smc91x_device = {

View File

@@ -93,7 +93,8 @@ static struct smsc911x_platform_config smsc911x_config = {
}; };
static struct smc91x_platdata smc91x_platdata = { static struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT,
}; };
static struct platform_device realview_eth_device = { static struct platform_device realview_eth_device = {

View File

@@ -45,7 +45,7 @@ static struct resource smc91x_resources[] = {
}; };
static struct smc91x_platdata smc91x_platdata = { static struct smc91x_platdata smc91x_platdata = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .flags = SMC91X_USE_16BIT | SMC91X_USE_8BIT | SMC91X_NOWAIT,
}; };
static struct platform_device smc91x_device = { static struct platform_device smc91x_device = {

View File

@@ -50,7 +50,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
static struct vcpu_info __percpu *xen_vcpu_info; static struct vcpu_info __percpu *xen_vcpu_info;
/* Linux <-> Xen vCPU id mapping */ /* Linux <-> Xen vCPU id mapping */
DEFINE_PER_CPU(int, xen_vcpu_id) = -1; DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
/* These are unused until we support booting "pre-ballooned" */ /* These are unused until we support booting "pre-ballooned" */

View File

@@ -270,6 +270,8 @@
#io-channel-cells = <1>; #io-channel-cells = <1>;
clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>; clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
clock-names = "saradc", "apb_pclk"; clock-names = "saradc", "apb_pclk";
resets = <&cru SRST_SARADC>;
reset-names = "saradc-apb";
status = "disabled"; status = "disabled";
}; };

View File

@@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
isb isb
bl __create_page_tables // recreate kernel mapping bl __create_page_tables // recreate kernel mapping
tlbi vmalle1 // Remove any stale TLB entries
dsb nsh
msr sctlr_el1, x19 // re-enable the MMU msr sctlr_el1, x19 // re-enable the MMU
isb isb
ic iallu // flush instructions fetched ic iallu // flush instructions fetched

View File

@@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
/* /*
* We must restore the 32-bit state before the sysregs, thanks * We must restore the 32-bit state before the sysregs, thanks
* to Cortex-A57 erratum #852523. * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
*/ */
__sysreg32_restore_state(vcpu); __sysreg32_restore_state(vcpu);
__sysreg_restore_guest_state(guest_ctxt); __sysreg_restore_guest_state(guest_ctxt);

View File

@@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
* Architected system registers. * Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
* *
* We could trap ID_DFR0 and tell the guest we don't support performance
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
* NAKed, so it will read the PMCR anyway.
*
* Therefore we tell the guest we have 0 counters. Unfortunately, we
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for
* all PM registers, which doesn't crash the guest kernel at least.
*
* Debug handling: We do trap most, if not all debug related system * Debug handling: We do trap most, if not all debug related system
* registers. The implementation is good enough to ensure that a guest * registers. The implementation is good enough to ensure that a guest
* can use these with minimal performance degradation. The drawback is * can use these with minimal performance degradation. The drawback is
@@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
/* ICC_SRE */ /* ICC_SRE */
{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },

View File

@@ -100,7 +100,16 @@ ENTRY(cpu_do_resume)
msr tcr_el1, x8 msr tcr_el1, x8
msr vbar_el1, x9 msr vbar_el1, x9
/*
* __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
* debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
* exception. Mask them until local_dbg_restore() in cpu_suspend()
* resets them.
*/
disable_dbg
msr mdscr_el1, x10 msr mdscr_el1, x10
msr sctlr_el1, x12 msr sctlr_el1, x12
/* /*
* Restore oslsr_el1 by writing oslar_el1 * Restore oslsr_el1 by writing oslar_el1

View File

@@ -146,7 +146,8 @@ static struct platform_device hitachi_fb_device = {
#include <linux/smc91x.h> #include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = { static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT,
.leda = RPC_LED_100_10, .leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX, .ledb = RPC_LED_TX_RX,
}; };

View File

@@ -134,7 +134,8 @@ static struct platform_device net2272_bfin_device = {
#include <linux/smc91x.h> #include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = { static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT |
SMC91X_NOWAIT,
.leda = RPC_LED_100_10, .leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX, .ledb = RPC_LED_TX_RX,
}; };

View File

@@ -164,7 +164,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
*/ */
static inline unsigned long ___pa(unsigned long x) static inline unsigned long ___pa(unsigned long x)
{ {
if (config_enabled(CONFIG_64BIT)) { if (IS_ENABLED(CONFIG_64BIT)) {
/* /*
* For MIPS64 the virtual address may either be in one of * For MIPS64 the virtual address may either be in one of
* the compatibility segements ckseg0 or ckseg1, or it may * the compatibility segements ckseg0 or ckseg1, or it may
@@ -173,7 +173,7 @@ static inline unsigned long ___pa(unsigned long x)
return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x); return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
} }
if (!config_enabled(CONFIG_EVA)) { if (!IS_ENABLED(CONFIG_EVA)) {
/* /*
* We're using the standard MIPS32 legacy memory map, ie. * We're using the standard MIPS32 legacy memory map, ie.
* the address x is going to be in kseg0 or kseg1. We can * the address x is going to be in kseg0 or kseg1. We can

View File

@@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
pfn = gfn_to_pfn(kvm, gfn); pfn = gfn_to_pfn(kvm, gfn);
if (is_error_pfn(pfn)) { if (is_error_noslot_pfn(pfn)) {
kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn); kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
err = -EFAULT; err = -EFAULT;
goto out; goto out;

View File

@@ -1,6 +1,5 @@
config PARISC config PARISC
def_bool y def_bool y
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE select HAVE_IDE
select HAVE_OPROFILE select HAVE_OPROFILE

View File

@@ -245,7 +245,6 @@ CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_PROVE_RCU_DELAY=y CONFIG_PROVE_RCU_DELAY=y
CONFIG_DEBUG_BLOCK_EXT_DEVT=y CONFIG_DEBUG_BLOCK_EXT_DEVT=y
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_KEYS=y CONFIG_KEYS=y
# CONFIG_CRYPTO_HW is not set # CONFIG_CRYPTO_HW is not set
CONFIG_FONTS=y CONFIG_FONTS=y

View File

@@ -291,7 +291,6 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
# CONFIG_SCHED_DEBUG is not set # CONFIG_SCHED_DEBUG is not set
CONFIG_TIMER_STATS=y CONFIG_TIMER_STATS=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m

View File

@@ -208,13 +208,13 @@ unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned lo
#define __copy_to_user_inatomic __copy_to_user #define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user #define __copy_from_user_inatomic __copy_from_user
extern void copy_from_user_overflow(void) extern void __compiletime_error("usercopy buffer size is too small")
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS __bad_copy_user(void);
__compiletime_error("copy_from_user() buffer size is not provably correct")
#else static inline void copy_user_overflow(int size, unsigned long count)
__compiletime_warning("copy_from_user() buffer size is not provably correct") {
#endif WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
; }
static inline unsigned long __must_check copy_from_user(void *to, static inline unsigned long __must_check copy_from_user(void *to,
const void __user *from, const void __user *from,
@@ -223,10 +223,12 @@ static inline unsigned long __must_check copy_from_user(void *to,
int sz = __compiletime_object_size(to); int sz = __compiletime_object_size(to);
int ret = -EFAULT; int ret = -EFAULT;
if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n)) if (likely(sz == -1 || sz >= n))
ret = __copy_from_user(to, from, n); ret = __copy_from_user(to, from, n);
else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else else
copy_from_user_overflow(); __bad_copy_user();
return ret; return ret;
} }

View File

@@ -3,6 +3,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/cpu_has_feature.h>
/* /*
* Mapping of threads to cores * Mapping of threads to cores

View File

@@ -21,7 +21,7 @@
#ifndef __ASM_PPC64_HMI_H__ #ifndef __ASM_PPC64_HMI_H__
#define __ASM_PPC64_HMI_H__ #define __ASM_PPC64_HMI_H__
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#define CORE_TB_RESYNC_REQ_BIT 63 #define CORE_TB_RESYNC_REQ_BIT 63
#define MAX_SUBCORE_PER_CORE 4 #define MAX_SUBCORE_PER_CORE 4

View File

@@ -183,11 +183,6 @@ struct paca_struct {
*/ */
u16 in_mce; u16 in_mce;
u8 hmi_event_available; /* HMI event is available */ u8 hmi_event_available; /* HMI event is available */
/*
* Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
* more details
*/
struct sibling_subcore_state *sibling_subcore_state;
#endif #endif
/* Stuff for accurate time accounting */ /* Stuff for accurate time accounting */
@@ -202,6 +197,13 @@ struct paca_struct {
struct kvmppc_book3s_shadow_vcpu shadow_vcpu; struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
#endif #endif
struct kvmppc_host_state kvm_hstate; struct kvmppc_host_state kvm_hstate;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
* Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
* more details
*/
struct sibling_subcore_state *sibling_subcore_state;
#endif
#endif #endif
}; };

View File

@@ -301,6 +301,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
/* Allocate & free a PCI host bridge structure */ /* Allocate & free a PCI host bridge structure */
extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev); extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
extern void pcibios_free_controller(struct pci_controller *phb); extern void pcibios_free_controller(struct pci_controller *phb);
extern void pcibios_free_controller_deferred(struct pci_host_bridge *bridge);
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
extern int pcibios_vaddr_is_ioport(void __iomem *address); extern int pcibios_vaddr_is_ioport(void __iomem *address);

View File

@@ -41,7 +41,7 @@ obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o hmi.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o obj-$(CONFIG_ALTIVEC) += vecemu.o

View File

@@ -368,13 +368,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
tabort_syscall: tabort_syscall:
/* Firstly we need to enable TM in the kernel */ /* Firstly we need to enable TM in the kernel */
mfmsr r10 mfmsr r10
li r13, 1 li r9, 1
rldimi r10, r13, MSR_TM_LG, 63-MSR_TM_LG rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
mtmsrd r10, 0 mtmsrd r10, 0
/* tabort, this dooms the transaction, nothing else */ /* tabort, this dooms the transaction, nothing else */
li r13, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
TABORT(R13) TABORT(R9)
/* /*
* Return directly to userspace. We have corrupted user register state, * Return directly to userspace. We have corrupted user register state,
@@ -382,8 +382,8 @@ tabort_syscall:
* resume after the tbegin of the aborted transaction with the * resume after the tbegin of the aborted transaction with the
* checkpointed register state. * checkpointed register state.
*/ */
li r13, MSR_RI li r9, MSR_RI
andc r10, r10, r13 andc r10, r10, r9
mtmsrd r10, 1 mtmsrd r10, 1
mtspr SPRN_SRR0, r11 mtspr SPRN_SRR0, r11
mtspr SPRN_SRR1, r12 mtspr SPRN_SRR1, r12

View File

@@ -485,7 +485,23 @@ machine_check_fwnmi:
EXCEPTION_PROLOG_0(PACA_EXMC) EXCEPTION_PROLOG_0(PACA_EXMC)
machine_check_pSeries_0: machine_check_pSeries_0:
EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200) EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD) /*
* The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
* difference that MSR_RI is not enabled, because PACA_EXMC is being
* used, so nested machine check corrupts it. machine_check_common
* enables MSR_RI.
*/
ld r12,PACAKBASE(r13)
ld r10,PACAKMSR(r13)
xori r10,r10,MSR_RI
mfspr r11,SPRN_SRR0
LOAD_HANDLER(r12, machine_check_common)
mtspr SPRN_SRR0,r12
mfspr r12,SPRN_SRR1
mtspr SPRN_SRR1,r10
rfid
b . /* prevent speculative execution */
KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
@@ -969,14 +985,17 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
machine_check_common: machine_check_common:
mfspr r10,SPRN_DAR mfspr r10,SPRN_DAR
std r10,PACA_EXGEN+EX_DAR(r13) std r10,PACA_EXMC+EX_DAR(r13)
mfspr r10,SPRN_DSISR mfspr r10,SPRN_DSISR
stw r10,PACA_EXGEN+EX_DSISR(r13) stw r10,PACA_EXMC+EX_DSISR(r13)
EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
FINISH_NAP FINISH_NAP
RECONCILE_IRQ_STATE(r10, r11) RECONCILE_IRQ_STATE(r10, r11)
ld r3,PACA_EXGEN+EX_DAR(r13) ld r3,PACA_EXMC+EX_DAR(r13)
lwz r4,PACA_EXGEN+EX_DSISR(r13) lwz r4,PACA_EXMC+EX_DSISR(r13)
/* Enable MSR_RI when finished with PACA_EXMC */
li r10,MSR_RI
mtmsrd r10,1
std r3,_DAR(r1) std r3,_DAR(r1)
std r4,_DSISR(r1) std r4,_DSISR(r1)
bl save_nvgprs bl save_nvgprs

View File

@@ -29,7 +29,7 @@
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/module.h> #include <linux/extable.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>

View File

@@ -153,6 +153,42 @@ void pcibios_free_controller(struct pci_controller *phb)
} }
EXPORT_SYMBOL_GPL(pcibios_free_controller); EXPORT_SYMBOL_GPL(pcibios_free_controller);
/*
* This function is used to call pcibios_free_controller()
* in a deferred manner: a callback from the PCI subsystem.
*
* _*DO NOT*_ call pcibios_free_controller() explicitly if
* this is used (or it may access an invalid *phb pointer).
*
* The callback occurs when all references to the root bus
* are dropped (e.g., child buses/devices and their users).
*
* It's called as .release_fn() of 'struct pci_host_bridge'
* which is associated with the 'struct pci_controller.bus'
* (root bus) - it expects .release_data to hold a pointer
* to 'struct pci_controller'.
*
* In order to use it, register .release_fn()/release_data
* like this:
*
* pci_set_host_bridge_release(bridge,
* pcibios_free_controller_deferred
* (void *) phb);
*
* e.g. in the pcibios_root_bridge_prepare() callback from
* pci_create_root_bus().
*/
void pcibios_free_controller_deferred(struct pci_host_bridge *bridge)
{
struct pci_controller *phb = (struct pci_controller *)
bridge->release_data;
pr_debug("domain %d, dynamic %d\n", phb->global_number, phb->is_dynamic);
pcibios_free_controller(phb);
}
EXPORT_SYMBOL_GPL(pcibios_free_controller_deferred);
/* /*
* The function is used to return the minimal alignment * The function is used to return the minimal alignment
* for memory or I/O windows of the associated P2P bridge. * for memory or I/O windows of the associated P2P bridge.

View File

@@ -695,7 +695,7 @@ unsigned char ibm_architecture_vec[] = {
OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
/* option vector 5: PAPR/OF options */ /* option vector 5: PAPR/OF options */
VECTOR_LENGTH(18), /* length */ VECTOR_LENGTH(21), /* length */
0, /* don't ignore, don't halt */ 0, /* don't ignore, don't halt */
OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) | OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) | OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
@@ -726,8 +726,11 @@ unsigned char ibm_architecture_vec[] = {
0, 0,
0, 0,
OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) | OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
OV5_FEAT(OV5_PFO_HW_842), OV5_FEAT(OV5_PFO_HW_842), /* Byte 17 */
OV5_FEAT(OV5_SUB_PROCESSORS), 0, /* Byte 18 */
0, /* Byte 19 */
0, /* Byte 20 */
OV5_FEAT(OV5_SUB_PROCESSORS), /* Byte 21 */
/* option vector 6: IBM PAPR hints */ /* option vector 6: IBM PAPR hints */
VECTOR_LENGTH(3), /* length */ VECTOR_LENGTH(3), /* length */

View File

@@ -1226,7 +1226,21 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf))) if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
goto bad; goto bad;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* If there is a transactional state then throw it away.
* The purpose of a sigreturn is to destroy all traces of the
* signal frame, this includes any transactional state created
* within in. We only check for suspended as we can never be
* active in the kernel, we are active, there is nothing better to
* do than go ahead and Bad Thing later.
* The cause is not important as there will never be a
* recheckpoint so it's not user visible.
*/
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
if (__get_user(tmp, &rt_sf->uc.uc_link)) if (__get_user(tmp, &rt_sf->uc.uc_link))
goto bad; goto bad;
uc_transact = (struct ucontext __user *)(uintptr_t)tmp; uc_transact = (struct ucontext __user *)(uintptr_t)tmp;

View File

@@ -676,7 +676,21 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
goto badframe; goto badframe;
set_current_blocked(&set); set_current_blocked(&set);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* If there is a transactional state then throw it away.
* The purpose of a sigreturn is to destroy all traces of the
* signal frame, this includes any transactional state created
* within in. We only check for suspended as we can never be
* active in the kernel, we are active, there is nothing better to
* do than go ahead and Bad Thing later.
* The cause is not important as there will never be a
* recheckpoint so it's not user visible.
*/
if (MSR_TM_SUSPENDED(mfmsr()))
tm_reclaim_current(0);
if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
goto badframe; goto badframe;
if (MSR_TM_ACTIVE(msr)) { if (MSR_TM_ACTIVE(msr)) {

View File

@@ -830,7 +830,7 @@ int __cpu_disable(void)
/* Update sibling maps */ /* Update sibling maps */
base = cpu_first_thread_sibling(cpu); base = cpu_first_thread_sibling(cpu);
for (i = 0; i < threads_per_core; i++) { for (i = 0; i < threads_per_core && base + i < nr_cpu_ids; i++) {
cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); cpumask_clear_cpu(cpu, cpu_core_mask(base + i));

View File

@@ -25,7 +25,8 @@
#include <linux/user.h> #include <linux/user.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/extable.h>
#include <linux/module.h> /* print_modules */
#include <linux/prctl.h> #include <linux/prctl.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>

View File

@@ -78,6 +78,7 @@ kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_hmi.o \
book3s_hv_rmhandlers.o \ book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \ book3s_hv_rm_mmu.o \
book3s_hv_ras.o \ book3s_hv_ras.o \

View File

@@ -26,7 +26,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/module.h> #include <linux/extable.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>

View File

@@ -528,7 +528,6 @@ static struct platform_driver mpc512x_lpbfifo_driver = {
.remove = mpc512x_lpbfifo_remove, .remove = mpc512x_lpbfifo_remove,
.driver = { .driver = {
.name = DRV_NAME, .name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = mpc512x_lpbfifo_match, .of_match_table = mpc512x_lpbfifo_match,
}, },
}; };

View File

@@ -222,7 +222,6 @@ static const struct of_device_id mcu_of_match_table[] = {
static struct i2c_driver mcu_driver = { static struct i2c_driver mcu_driver = {
.driver = { .driver = {
.name = "mcu-mpc8349emitx", .name = "mcu-mpc8349emitx",
.owner = THIS_MODULE,
.of_match_table = mcu_of_match_table, .of_match_table = mcu_of_match_table,
}, },
.probe = mcu_probe, .probe = mcu_probe,

View File

@@ -26,7 +26,7 @@
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/serial_core.h> #include <linux/serial_core.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/module.h> #include <linux/extable.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/machdep.h> #include <asm/machdep.h>

View File

@@ -23,7 +23,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/kdev_t.h> #include <linux/kdev_t.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/module.h> #include <linux/extable.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>

View File

@@ -370,6 +370,7 @@ static irqreturn_t process_dump(int irq, void *data)
uint32_t dump_id, dump_size, dump_type; uint32_t dump_id, dump_size, dump_type;
struct dump_obj *dump; struct dump_obj *dump;
char name[22]; char name[22];
struct kobject *kobj;
rc = dump_read_info(&dump_id, &dump_size, &dump_type); rc = dump_read_info(&dump_id, &dump_size, &dump_type);
if (rc != OPAL_SUCCESS) if (rc != OPAL_SUCCESS)
@@ -381,8 +382,12 @@ static irqreturn_t process_dump(int irq, void *data)
* that gracefully and not create two conflicting * that gracefully and not create two conflicting
* entries. * entries.
*/ */
if (kset_find_obj(dump_kset, name)) kobj = kset_find_obj(dump_kset, name);
if (kobj) {
/* Drop reference added by kset_find_obj() */
kobject_put(kobj);
return 0; return 0;
}
dump = create_dump_obj(dump_id, dump_size, dump_type); dump = create_dump_obj(dump_id, dump_size, dump_type);
if (!dump) if (!dump)

View File

@@ -247,6 +247,7 @@ static irqreturn_t elog_event(int irq, void *data)
uint64_t elog_type; uint64_t elog_type;
int rc; int rc;
char name[2+16+1]; char name[2+16+1];
struct kobject *kobj;
rc = opal_get_elog_size(&id, &size, &type); rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) { if (rc != OPAL_SUCCESS) {
@@ -269,8 +270,12 @@ static irqreturn_t elog_event(int irq, void *data)
* that gracefully and not create two conflicting * that gracefully and not create two conflicting
* entries. * entries.
*/ */
if (kset_find_obj(elog_kset, name)) kobj = kset_find_obj(elog_kset, name);
if (kobj) {
/* Drop reference added by kset_find_obj() */
kobject_put(kobj);
return IRQ_HANDLED; return IRQ_HANDLED;
}
create_elog_obj(log_id, elog_size, elog_type); create_elog_obj(log_id, elog_size, elog_type);

View File

@@ -149,7 +149,7 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb) static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
{ {
unsigned long pe = phb->ioda.total_pe_num - 1; long pe;
for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) { for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
if (!test_and_set_bit(pe, phb->ioda.pe_alloc)) if (!test_and_set_bit(pe, phb->ioda.pe_alloc))

View File

@@ -119,6 +119,10 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
bus = bridge->bus; bus = bridge->bus;
/* Rely on the pcibios_free_controller_deferred() callback. */
pci_set_host_bridge_release(bridge, pcibios_free_controller_deferred,
(void *) pci_bus_to_host(bus));
dn = pcibios_get_phb_of_node(bus); dn = pcibios_get_phb_of_node(bus);
if (!dn) if (!dn)
return 0; return 0;

View File

@@ -106,8 +106,11 @@ int remove_phb_dynamic(struct pci_controller *phb)
release_resource(res); release_resource(res);
} }
/* Free pci_controller data structure */ /*
pcibios_free_controller(phb); * The pci_controller data structure is freed by
* the pcibios_free_controller_deferred() callback;
* see pseries_root_bridge_prepare().
*/
return 0; return 0;
} }

View File

@@ -534,7 +534,8 @@ struct cpm1_gpio16_chip {
static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc) static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
{ {
struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); struct cpm1_gpio16_chip *cpm1_gc =
container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc);
struct cpm_ioport16 __iomem *iop = mm_gc->regs; struct cpm_ioport16 __iomem *iop = mm_gc->regs;
cpm1_gc->cpdata = in_be16(&iop->dat); cpm1_gc->cpdata = in_be16(&iop->dat);
@@ -649,7 +650,8 @@ struct cpm1_gpio32_chip {
static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
{ {
struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc); struct cpm1_gpio32_chip *cpm1_gc =
container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc);
struct cpm_ioport32b __iomem *iop = mm_gc->regs; struct cpm_ioport32b __iomem *iop = mm_gc->regs;
cpm1_gc->cpdata = in_be32(&iop->dat); cpm1_gc->cpdata = in_be32(&iop->dat);

View File

@@ -94,7 +94,8 @@ struct cpm2_gpio32_chip {
static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc) static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
{ {
struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc); struct cpm2_gpio32_chip *cpm2_gc =
container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
struct cpm2_ioports __iomem *iop = mm_gc->regs; struct cpm2_ioports __iomem *iop = mm_gc->regs;
cpm2_gc->cpdata = in_be32(&iop->dat); cpm2_gc->cpdata = in_be32(&iop->dat);

View File

@@ -23,7 +23,7 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/extable.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>

View File

@@ -68,7 +68,6 @@ config DEBUG_RODATA
config S390 config S390
def_bool y def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL

View File

@@ -602,7 +602,6 @@ CONFIG_FAIL_FUTEX=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_IRQSOFF_TRACER=y CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y CONFIG_PREEMPT_TRACER=y
CONFIG_SCHED_TRACER=y CONFIG_SCHED_TRACER=y

View File

@@ -552,7 +552,6 @@ CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_CPU_NOTIFIER_ERROR_INJECT=m CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
CONFIG_PM_NOTIFIER_ERROR_INJECT=m CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_KPROBE_EVENT is not set # CONFIG_KPROBE_EVENT is not set
CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_TRACE_ENUM_MAP_FILE=y

View File

@@ -549,7 +549,6 @@ CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_SCHED_TRACER=y CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y CONFIG_STACK_TRACER=y

View File

@@ -172,7 +172,6 @@ CONFIG_DEBUG_NOTIFIERS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_RCU_TRACE=y CONFIG_RCU_TRACE=y
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_SCHED_TRACER=y CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y

View File

@@ -311,6 +311,14 @@ int __get_user_bad(void) __attribute__((noreturn));
#define __put_user_unaligned __put_user #define __put_user_unaligned __put_user
#define __get_user_unaligned __get_user #define __get_user_unaligned __get_user
extern void __compiletime_error("usercopy buffer size is too small")
__bad_copy_user(void);
static inline void copy_user_overflow(int size, unsigned long count)
{
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
}
/** /**
* copy_to_user: - Copy a block of data into user space. * copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space. * @to: Destination address, in user space.
@@ -332,12 +340,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
return __copy_to_user(to, from, n); return __copy_to_user(to, from, n);
} }
void copy_from_user_overflow(void)
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
__compiletime_warning("copy_from_user() buffer size is not provably correct")
#endif
;
/** /**
* copy_from_user: - Copy a block of data from user space. * copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space. * @to: Destination address, in kernel space.
@@ -362,7 +364,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
might_fault(); might_fault();
if (unlikely(sz != -1 && sz < n)) { if (unlikely(sz != -1 && sz < n)) {
copy_from_user_overflow(); if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else
__bad_copy_user();
return n; return n;
} }
return __copy_from_user(to, from, n); return __copy_from_user(to, from, n);

View File

@@ -204,11 +204,9 @@ static void __init conmode_default(void)
#endif #endif
} }
} else if (MACHINE_IS_KVM) { } else if (MACHINE_IS_KVM) {
if (sclp.has_vt220 && if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
config_enabled(CONFIG_SCLP_VT220_CONSOLE))
SET_CONSOLE_VT220; SET_CONSOLE_VT220;
else if (sclp.has_linemode && else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
config_enabled(CONFIG_SCLP_CONSOLE))
SET_CONSOLE_SCLP; SET_CONSOLE_SCLP;
else else
SET_CONSOLE_HVC; SET_CONSOLE_HVC;

View File

@@ -4,7 +4,6 @@
config TILE config TILE
def_bool y def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS

View File

@@ -416,14 +416,13 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
return n; return n;
} }
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS extern void __compiletime_error("usercopy buffer size is too small")
/* __bad_copy_user(void);
* There are still unprovable places in the generic code as of 2.6.34, so this
* option is not really compatible with -Werror, which is more useful in static inline void copy_user_overflow(int size, unsigned long count)
* general. {
*/ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
extern void copy_from_user_overflow(void) }
__compiletime_warning("copy_from_user() size is not provably correct");
static inline unsigned long __must_check copy_from_user(void *to, static inline unsigned long __must_check copy_from_user(void *to,
const void __user *from, const void __user *from,
@@ -433,14 +432,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
if (likely(sz == -1 || sz >= n)) if (likely(sz == -1 || sz >= n))
n = _copy_from_user(to, from, n); n = _copy_from_user(to, from, n);
else if (!__builtin_constant_p(n))
copy_user_overflow(sz, n);
else else
copy_from_user_overflow(); __bad_copy_user();
return n; return n;
} }
#else
#define copy_from_user _copy_from_user
#endif
#ifdef __tilegx__ #ifdef __tilegx__
/** /**

View File

@@ -81,7 +81,7 @@
.altinstr_replacement : { *(.altinstr_replacement) } .altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references /* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */ from .altinstructions and .eh_frame */
.exit.text : { *(.exit.text) } .exit.text : { EXIT_TEXT }
.exit.data : { *(.exit.data) } .exit.data : { *(.exit.data) }
.preinit_array : { .preinit_array : {

View File

@@ -24,7 +24,6 @@ config X86
select ARCH_DISCARD_MEMBLOCK select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FAST_MULTIPLIER

View File

@@ -1 +1,3 @@
CONFIG_NOHIGHMEM=y CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set
# CONFIG_HIGHMEM64G is not set

View File

@@ -485,10 +485,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
req = cast_mcryptd_ctx_to_req(req_ctx); req = cast_mcryptd_ctx_to_req(req_ctx);
if (irqs_disabled()) if (irqs_disabled())
rctx->complete(&req->base, ret); req_ctx->complete(&req->base, ret);
else { else {
local_bh_disable(); local_bh_disable();
rctx->complete(&req->base, ret); req_ctx->complete(&req->base, ret);
local_bh_enable(); local_bh_enable();
} }
} }

View File

@@ -265,13 +265,14 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0 vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
movl _args_digest+4*32(state, idx, 4), tmp2_w vmovd _args_digest(state , idx, 4) , %xmm0
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1 vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
vmovdqu %xmm0, _result_digest(job_rax) vmovdqu %xmm0, _result_digest(job_rax)
movl tmp2_w, _result_digest+1*16(job_rax) offset = (_result_digest + 1*16)
vmovdqu %xmm1, offset(job_rax)
pop %rbx pop %rbx

View File

@@ -497,10 +497,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
req = cast_mcryptd_ctx_to_req(req_ctx); req = cast_mcryptd_ctx_to_req(req_ctx);
if (irqs_disabled()) if (irqs_disabled())
rctx->complete(&req->base, ret); req_ctx->complete(&req->base, ret);
else { else {
local_bh_disable(); local_bh_disable();
rctx->complete(&req->base, ret); req_ctx->complete(&req->base, ret);
local_bh_enable(); local_bh_enable();
} }
} }

View File

@@ -697,43 +697,14 @@ unsigned long __must_check _copy_from_user(void *to, const void __user *from,
unsigned long __must_check _copy_to_user(void __user *to, const void *from, unsigned long __must_check _copy_to_user(void __user *to, const void *from,
unsigned n); unsigned n);
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS extern void __compiletime_error("usercopy buffer size is too small")
# define copy_user_diag __compiletime_error __bad_copy_user(void);
#else
# define copy_user_diag __compiletime_warning
#endif
extern void copy_user_diag("copy_from_user() buffer size is too small") static inline void copy_user_overflow(int size, unsigned long count)
copy_from_user_overflow(void);
extern void copy_user_diag("copy_to_user() buffer size is too small")
copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
#undef copy_user_diag
#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
extern void
__compiletime_warning("copy_from_user() buffer size is not provably correct")
__copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
#define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
extern void
__compiletime_warning("copy_to_user() buffer size is not provably correct")
__copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
#define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
#else
static inline void
__copy_from_user_overflow(int size, unsigned long count)
{ {
WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
} }
#define __copy_to_user_overflow __copy_from_user_overflow
#endif
static inline unsigned long __must_check static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n) copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
@@ -743,31 +714,13 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
kasan_check_write(to, n); kasan_check_write(to, n);
/*
* While we would like to have the compiler do the checking for us
* even in the non-constant size case, any false positives there are
* a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
* without - the [hopefully] dangerous looking nature of the warning
* would make people go look at the respecitive call sites over and
* over again just to find that there's no problem).
*
* And there are cases where it's just not realistic for the compiler
* to prove the count to be in range. For example when multiple call
* sites of a helper function - perhaps in different source files -
* all doing proper range checking, yet the helper function not doing
* so again.
*
* Therefore limit the compile time checking to the constant size
* case, and do only runtime checking for non-constant sizes.
*/
if (likely(sz < 0 || sz >= n)) { if (likely(sz < 0 || sz >= n)) {
check_object_size(to, n, false); check_object_size(to, n, false);
n = _copy_from_user(to, from, n); n = _copy_from_user(to, from, n);
} else if (__builtin_constant_p(n)) } else if (!__builtin_constant_p(n))
copy_from_user_overflow(); copy_user_overflow(sz, n);
else else
__copy_from_user_overflow(sz, n); __bad_copy_user();
return n; return n;
} }
@@ -781,21 +734,17 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
might_fault(); might_fault();
/* See the comment in copy_from_user() above. */
if (likely(sz < 0 || sz >= n)) { if (likely(sz < 0 || sz >= n)) {
check_object_size(from, n, true); check_object_size(from, n, true);
n = _copy_to_user(to, from, n); n = _copy_to_user(to, from, n);
} else if (__builtin_constant_p(n)) } else if (!__builtin_constant_p(n))
copy_to_user_overflow(); copy_user_overflow(sz, n);
else else
__copy_to_user_overflow(sz, n); __bad_copy_user();
return n; return n;
} }
#undef __copy_from_user_overflow
#undef __copy_to_user_overflow
/* /*
* We rely on the nested NMI work to allow atomic faults from the NMI path; the * We rely on the nested NMI work to allow atomic faults from the NMI path; the
* nested NMI paths are careful to preserve CR2. * nested NMI paths are careful to preserve CR2.

View File

@@ -1623,6 +1623,9 @@ void __init enable_IR_x2apic(void)
unsigned long flags; unsigned long flags;
int ret, ir_stat; int ret, ir_stat;
if (skip_ioapic_setup)
return;
ir_stat = irq_remapping_prepare(); ir_stat = irq_remapping_prepare();
if (ir_stat < 0 && !x2apic_supported()) if (ir_stat < 0 && !x2apic_supported())
return; return;

View File

@@ -669,6 +669,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
} }
#define MSR_AMD64_DE_CFG 0xC0011029
static void init_amd_ln(struct cpuinfo_x86 *c)
{
/*
* Apply erratum 665 fix unconditionally so machines without a BIOS
* fix work.
*/
msr_set_bit(MSR_AMD64_DE_CFG, 31);
}
static void init_amd_bd(struct cpuinfo_x86 *c) static void init_amd_bd(struct cpuinfo_x86 *c)
{ {
u64 value; u64 value;
@@ -726,6 +737,7 @@ static void init_amd(struct cpuinfo_x86 *c)
case 6: init_amd_k7(c); break; case 6: init_amd_k7(c); break;
case 0xf: init_amd_k8(c); break; case 0xf: init_amd_k8(c); break;
case 0x10: init_amd_gh(c); break; case 0x10: init_amd_gh(c); break;
case 0x12: init_amd_ln(c); break;
case 0x15: init_amd_bd(c); break; case 0x15: init_amd_bd(c); break;
} }

View File

@@ -56,12 +56,12 @@ asm (".pushsection .entry.text, \"ax\"\n"
".popsection"); ".popsection");
/* identity function, which can be inlined */ /* identity function, which can be inlined */
u32 _paravirt_ident_32(u32 x) u32 notrace _paravirt_ident_32(u32 x)
{ {
return x; return x;
} }
u64 _paravirt_ident_64(u64 x) u64 notrace _paravirt_ident_64(u64 x)
{ {
return x; return x;
} }

View File

@@ -422,6 +422,7 @@ struct nested_vmx {
struct list_head vmcs02_pool; struct list_head vmcs02_pool;
int vmcs02_num; int vmcs02_num;
u64 vmcs01_tsc_offset; u64 vmcs01_tsc_offset;
bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */ /* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending; bool nested_run_pending;
/* /*
@@ -435,6 +436,8 @@ struct nested_vmx {
bool pi_pending; bool pi_pending;
u16 posted_intr_nv; u16 posted_intr_nv;
unsigned long *msr_bitmap;
struct hrtimer preemption_timer; struct hrtimer preemption_timer;
bool preemption_timer_expired; bool preemption_timer_expired;
@@ -924,7 +927,6 @@ static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode; static unsigned long *vmx_msr_bitmap_longmode;
static unsigned long *vmx_msr_bitmap_legacy_x2apic; static unsigned long *vmx_msr_bitmap_legacy_x2apic;
static unsigned long *vmx_msr_bitmap_longmode_x2apic; static unsigned long *vmx_msr_bitmap_longmode_x2apic;
static unsigned long *vmx_msr_bitmap_nested;
static unsigned long *vmx_vmread_bitmap; static unsigned long *vmx_vmread_bitmap;
static unsigned long *vmx_vmwrite_bitmap; static unsigned long *vmx_vmwrite_bitmap;
@@ -2198,6 +2200,12 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
new.control) != old.control); new.control) != old.control);
} }
static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
{
vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
}
/* /*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes * Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken. * vcpu mutex is already taken.
@@ -2256,10 +2264,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Setup TSC multiplier */ /* Setup TSC multiplier */
if (kvm_has_tsc_control && if (kvm_has_tsc_control &&
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) { vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio; decache_tsc_multiplier(vmx);
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
}
vmx_vcpu_pi_load(vcpu, cpu); vmx_vcpu_pi_load(vcpu, cpu);
vmx->host_pkru = read_pkru(); vmx->host_pkru = read_pkru();
@@ -2508,7 +2514,7 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
unsigned long *msr_bitmap; unsigned long *msr_bitmap;
if (is_guest_mode(vcpu)) if (is_guest_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_nested; msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
else if (cpu_has_secondary_exec_ctrls() && else if (cpu_has_secondary_exec_ctrls() &&
(vmcs_read32(SECONDARY_VM_EXEC_CONTROL) & (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) { SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
@@ -6363,13 +6369,6 @@ static __init int hardware_setup(void)
if (!vmx_msr_bitmap_longmode_x2apic) if (!vmx_msr_bitmap_longmode_x2apic)
goto out4; goto out4;
if (nested) {
vmx_msr_bitmap_nested =
(unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_nested)
goto out5;
}
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmread_bitmap) if (!vmx_vmread_bitmap)
goto out6; goto out6;
@@ -6392,8 +6391,6 @@ static __init int hardware_setup(void)
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
if (nested)
memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
if (setup_vmcs_config(&vmcs_config) < 0) { if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO; r = -EIO;
@@ -6529,9 +6526,6 @@ out8:
out7: out7:
free_page((unsigned long)vmx_vmread_bitmap); free_page((unsigned long)vmx_vmread_bitmap);
out6: out6:
if (nested)
free_page((unsigned long)vmx_msr_bitmap_nested);
out5:
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
out4: out4:
free_page((unsigned long)vmx_msr_bitmap_longmode); free_page((unsigned long)vmx_msr_bitmap_longmode);
@@ -6557,8 +6551,6 @@ static __exit void hardware_unsetup(void)
free_page((unsigned long)vmx_io_bitmap_a); free_page((unsigned long)vmx_io_bitmap_a);
free_page((unsigned long)vmx_vmwrite_bitmap); free_page((unsigned long)vmx_vmwrite_bitmap);
free_page((unsigned long)vmx_vmread_bitmap); free_page((unsigned long)vmx_vmread_bitmap);
if (nested)
free_page((unsigned long)vmx_msr_bitmap_nested);
free_kvm_area(); free_kvm_area();
} }
@@ -6995,16 +6987,21 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1; return 1;
} }
if (cpu_has_vmx_msr_bitmap()) {
vmx->nested.msr_bitmap =
(unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx->nested.msr_bitmap)
goto out_msr_bitmap;
}
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
if (!vmx->nested.cached_vmcs12) if (!vmx->nested.cached_vmcs12)
return -ENOMEM; goto out_cached_vmcs12;
if (enable_shadow_vmcs) { if (enable_shadow_vmcs) {
shadow_vmcs = alloc_vmcs(); shadow_vmcs = alloc_vmcs();
if (!shadow_vmcs) { if (!shadow_vmcs)
kfree(vmx->nested.cached_vmcs12); goto out_shadow_vmcs;
return -ENOMEM;
}
/* mark vmcs as shadow */ /* mark vmcs as shadow */
shadow_vmcs->revision_id |= (1u << 31); shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */ /* init shadow vmcs */
@@ -7024,6 +7021,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
skip_emulated_instruction(vcpu); skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu); nested_vmx_succeed(vcpu);
return 1; return 1;
out_shadow_vmcs:
kfree(vmx->nested.cached_vmcs12);
out_cached_vmcs12:
free_page((unsigned long)vmx->nested.msr_bitmap);
out_msr_bitmap:
return -ENOMEM;
} }
/* /*
@@ -7098,6 +7104,10 @@ static void free_nested(struct vcpu_vmx *vmx)
vmx->nested.vmxon = false; vmx->nested.vmxon = false;
free_vpid(vmx->nested.vpid02); free_vpid(vmx->nested.vpid02);
nested_release_vmcs12(vmx); nested_release_vmcs12(vmx);
if (vmx->nested.msr_bitmap) {
free_page((unsigned long)vmx->nested.msr_bitmap);
vmx->nested.msr_bitmap = NULL;
}
if (enable_shadow_vmcs) if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs); free_vmcs(vmx->nested.current_shadow_vmcs);
kfree(vmx->nested.cached_vmcs12); kfree(vmx->nested.cached_vmcs12);
@@ -8419,6 +8429,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
{ {
u32 sec_exec_control; u32 sec_exec_control;
/* Postpone execution until vmcs01 is the current VMCS. */
if (is_guest_mode(vcpu)) {
to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
return;
}
/* /*
* There is not point to enable virtualize x2apic without enable * There is not point to enable virtualize x2apic without enable
* apicv * apicv
@@ -9472,8 +9488,10 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
{ {
int msr; int msr;
struct page *page; struct page *page;
unsigned long *msr_bitmap; unsigned long *msr_bitmap_l1;
unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
/* This shortcut is ok because we support only x2APIC MSRs so far. */
if (!nested_cpu_has_virt_x2apic_mode(vmcs12)) if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
return false; return false;
@@ -9482,63 +9500,37 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
WARN_ON(1); WARN_ON(1);
return false; return false;
} }
msr_bitmap = (unsigned long *)kmap(page); msr_bitmap_l1 = (unsigned long *)kmap(page);
if (!msr_bitmap) { if (!msr_bitmap_l1) {
nested_release_page_clean(page); nested_release_page_clean(page);
WARN_ON(1); WARN_ON(1);
return false; return false;
} }
memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) { if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
if (nested_cpu_has_apic_reg_virt(vmcs12)) if (nested_cpu_has_apic_reg_virt(vmcs12))
for (msr = 0x800; msr <= 0x8ff; msr++) for (msr = 0x800; msr <= 0x8ff; msr++)
nested_vmx_disable_intercept_for_msr( nested_vmx_disable_intercept_for_msr(
msr_bitmap, msr_bitmap_l1, msr_bitmap_l0,
vmx_msr_bitmap_nested,
msr, MSR_TYPE_R); msr, MSR_TYPE_R);
/* TPR is allowed */
nested_vmx_disable_intercept_for_msr(msr_bitmap, nested_vmx_disable_intercept_for_msr(
vmx_msr_bitmap_nested, msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_TASKPRI >> 4), APIC_BASE_MSR + (APIC_TASKPRI >> 4),
MSR_TYPE_R | MSR_TYPE_W); MSR_TYPE_R | MSR_TYPE_W);
if (nested_cpu_has_vid(vmcs12)) { if (nested_cpu_has_vid(vmcs12)) {
/* EOI and self-IPI are allowed */
nested_vmx_disable_intercept_for_msr( nested_vmx_disable_intercept_for_msr(
msr_bitmap, msr_bitmap_l1, msr_bitmap_l0,
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_EOI >> 4), APIC_BASE_MSR + (APIC_EOI >> 4),
MSR_TYPE_W); MSR_TYPE_W);
nested_vmx_disable_intercept_for_msr( nested_vmx_disable_intercept_for_msr(
msr_bitmap, msr_bitmap_l1, msr_bitmap_l0,
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_SELF_IPI >> 4), APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
MSR_TYPE_W); MSR_TYPE_W);
} }
} else {
/*
* Enable reading intercept of all the x2apic
* MSRs. We should not rely on vmcs12 to do any
* optimizations here, it may have been modified
* by L1.
*/
for (msr = 0x800; msr <= 0x8ff; msr++)
__vmx_enable_intercept_for_msr(
vmx_msr_bitmap_nested,
msr,
MSR_TYPE_R);
__vmx_enable_intercept_for_msr(
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
MSR_TYPE_W);
__vmx_enable_intercept_for_msr(
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_EOI >> 4),
MSR_TYPE_W);
__vmx_enable_intercept_for_msr(
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
MSR_TYPE_W);
} }
kunmap(page); kunmap(page);
nested_release_page_clean(page); nested_release_page_clean(page);
@@ -9957,10 +9949,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
} }
if (cpu_has_vmx_msr_bitmap() && if (cpu_has_vmx_msr_bitmap() &&
exec_control & CPU_BASED_USE_MSR_BITMAPS) { exec_control & CPU_BASED_USE_MSR_BITMAPS &&
nested_vmx_merge_msr_bitmap(vcpu, vmcs12); nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
/* MSR_BITMAP will be set by following vmx_set_efer. */ ; /* MSR_BITMAP will be set by following vmx_set_efer. */
} else else
exec_control &= ~CPU_BASED_USE_MSR_BITMAPS; exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
/* /*
@@ -10011,6 +10003,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
else else
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
if (enable_vpid) { if (enable_vpid) {
/* /*
@@ -10767,6 +10761,14 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
else else
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL, vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
PIN_BASED_VMX_PREEMPTION_TIMER); PIN_BASED_VMX_PREEMPTION_TIMER);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
vmx_set_virtual_x2apic_mode(vcpu,
vcpu->arch.apic_base & X2APIC_ENABLE);
}
/* This is needed for same reason as it was needed in prepare_vmcs02 */ /* This is needed for same reason as it was needed in prepare_vmcs02 */
vmx->host_rsp = 0; vmx->host_rsp = 0;

View File

@@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
*/ */
static inline bool kaslr_memory_enabled(void) static inline bool kaslr_memory_enabled(void)
{ {
return kaslr_enabled() && !config_enabled(CONFIG_KASAN); return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
} }
/* Initialize base and padding for each memory region randomized with KASLR */ /* Initialize base and padding for each memory region randomized with KASLR */

View File

@@ -41,6 +41,7 @@ static DEFINE_RAW_SPINLOCK(list_lock);
* @node: list item for parent traversal. * @node: list item for parent traversal.
* @rcu: RCU callback item for freeing. * @rcu: RCU callback item for freeing.
* @irq: back pointer to parent. * @irq: back pointer to parent.
* @enabled: true if driver enabled IRQ
* @virq: the virtual IRQ value provided to the requesting driver. * @virq: the virtual IRQ value provided to the requesting driver.
* *
* Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
@@ -50,6 +51,7 @@ struct vmd_irq {
struct list_head node; struct list_head node;
struct rcu_head rcu; struct rcu_head rcu;
struct vmd_irq_list *irq; struct vmd_irq_list *irq;
bool enabled;
unsigned int virq; unsigned int virq;
}; };
@@ -122,7 +124,9 @@ static void vmd_irq_enable(struct irq_data *data)
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&list_lock, flags); raw_spin_lock_irqsave(&list_lock, flags);
WARN_ON(vmdirq->enabled);
list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
vmdirq->enabled = true;
raw_spin_unlock_irqrestore(&list_lock, flags); raw_spin_unlock_irqrestore(&list_lock, flags);
data->chip->irq_unmask(data); data->chip->irq_unmask(data);
@@ -136,8 +140,10 @@ static void vmd_irq_disable(struct irq_data *data)
data->chip->irq_mask(data); data->chip->irq_mask(data);
raw_spin_lock_irqsave(&list_lock, flags); raw_spin_lock_irqsave(&list_lock, flags);
if (vmdirq->enabled) {
list_del_rcu(&vmdirq->node); list_del_rcu(&vmdirq->node);
INIT_LIST_HEAD_RCU(&vmdirq->node); vmdirq->enabled = false;
}
raw_spin_unlock_irqrestore(&list_lock, flags); raw_spin_unlock_irqrestore(&list_lock, flags);
} }

View File

@@ -118,7 +118,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
/* Linux <-> Xen vCPU id mapping */ /* Linux <-> Xen vCPU id mapping */
DEFINE_PER_CPU(int, xen_vcpu_id) = -1; DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
enum xen_domain_type xen_domain_type = XEN_NATIVE; enum xen_domain_type xen_domain_type = XEN_NATIVE;

View File

@@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
if (bio_op(bio) == REQ_OP_DISCARD) switch (bio_op(bio)) {
goto integrity_clone; case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
if (bio_op(bio) == REQ_OP_WRITE_SAME) { break;
case REQ_OP_WRITE_SAME:
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
goto integrity_clone; break;
} default:
bio_for_each_segment(bv, bio_src, iter) bio_for_each_segment(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv; bio->bi_io_vec[bio->bi_vcnt++] = bv;
break;
}
integrity_clone:
if (bio_integrity(bio_src)) { if (bio_integrity(bio_src)) {
int ret; int ret;
@@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
* Discards need a mutable bio_vec to accommodate the payload * Discards need a mutable bio_vec to accommodate the payload
* required by the DSM TRIM and UNMAP commands. * required by the DSM TRIM and UNMAP commands.
*/ */
if (bio_op(bio) == REQ_OP_DISCARD) if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
split = bio_clone_bioset(bio, gfp, bs); split = bio_clone_bioset(bio, gfp, bs);
else else
split = bio_clone_fast(bio, gfp, bs); split = bio_clone_fast(bio, gfp, bs);

View File

@@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
void blk_set_queue_dying(struct request_queue *q) void blk_set_queue_dying(struct request_queue *q)
{ {
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); spin_lock_irq(q->queue_lock);
queue_flag_set(QUEUE_FLAG_DYING, q);
spin_unlock_irq(q->queue_lock);
if (q->mq_ops) if (q->mq_ops)
blk_mq_wake_waiters(q); blk_mq_wake_waiters(q);

Some files were not shown because too many files have changed in this diff Show More