Merge branch 'android12-5.10-lts' into 'android12-5.10'
Updates the branch to the 5.10.26 upstream kernel version. Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I84aa29bf4e4e809051eb346830c4c4b5acb78c8c
This commit is contained in:
@@ -26,8 +26,9 @@ Date: September 2008
|
|||||||
Contact: Badari Pulavarty <pbadari@us.ibm.com>
|
Contact: Badari Pulavarty <pbadari@us.ibm.com>
|
||||||
Description:
|
Description:
|
||||||
The file /sys/devices/system/memory/memoryX/phys_device
|
The file /sys/devices/system/memory/memoryX/phys_device
|
||||||
is read-only and is designed to show the name of physical
|
is read-only; it is a legacy interface only ever used on s390x
|
||||||
memory device. Implementation is currently incomplete.
|
to expose the covered storage increment.
|
||||||
|
Users: Legacy s390-tools lsmem/chmem
|
||||||
|
|
||||||
What: /sys/devices/system/memory/memoryX/phys_index
|
What: /sys/devices/system/memory/memoryX/phys_index
|
||||||
Date: September 2008
|
Date: September 2008
|
||||||
|
@@ -160,8 +160,8 @@ Under each memory block, you can see 5 files:
|
|||||||
|
|
||||||
"online_movable", "online", "offline" command
|
"online_movable", "online", "offline" command
|
||||||
which will be performed on all sections in the block.
|
which will be performed on all sections in the block.
|
||||||
``phys_device`` read-only: designed to show the name of physical memory
|
``phys_device`` read-only: legacy interface only ever used on s390x to
|
||||||
device. This is not well implemented now.
|
expose the covered storage increment.
|
||||||
``removable`` read-only: contains an integer value indicating
|
``removable`` read-only: contains an integer value indicating
|
||||||
whether the memory block is removable or not
|
whether the memory block is removable or not
|
||||||
removable. A value of 1 indicates that the memory
|
removable. A value of 1 indicates that the memory
|
||||||
|
@@ -560,6 +560,27 @@ Some of these date from the very introduction of KMS in 2008 ...
|
|||||||
|
|
||||||
Level: Intermediate
|
Level: Intermediate
|
||||||
|
|
||||||
|
Remove automatic page mapping from dma-buf importing
|
||||||
|
----------------------------------------------------
|
||||||
|
|
||||||
|
When importing dma-bufs, the dma-buf and PRIME frameworks automatically map
|
||||||
|
imported pages into the importer's DMA area. drm_gem_prime_fd_to_handle() and
|
||||||
|
drm_gem_prime_handle_to_fd() require that importers call dma_buf_attach()
|
||||||
|
even if they never do actual device DMA, but only CPU access through
|
||||||
|
dma_buf_vmap(). This is a problem for USB devices, which do not support DMA
|
||||||
|
operations.
|
||||||
|
|
||||||
|
To fix the issue, automatic page mappings should be removed from the
|
||||||
|
buffer-sharing code. Fixing this is a bit more involved, since the import/export
|
||||||
|
cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over
|
||||||
|
this problem for USB devices by fishing out the USB host controller device, as
|
||||||
|
long as that supports DMA. Otherwise importing can still needlessly fail.
|
||||||
|
|
||||||
|
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
|
||||||
|
|
||||||
|
Level: Advanced
|
||||||
|
|
||||||
|
|
||||||
Better Testing
|
Better Testing
|
||||||
==============
|
==============
|
||||||
|
|
||||||
|
@@ -144,77 +144,13 @@ Please send incremental versions on top of what has been merged in order to fix
|
|||||||
the patches the way they would look like if your latest patch series was to be
|
the patches the way they would look like if your latest patch series was to be
|
||||||
merged.
|
merged.
|
||||||
|
|
||||||
Q: How can I tell what patches are queued up for backporting to the various stable releases?
|
Q: Are there special rules regarding stable submissions on netdev?
|
||||||
--------------------------------------------------------------------------------------------
|
---------------------------------------------------------------
|
||||||
A: Normally Greg Kroah-Hartman collects stable commits himself, but for
|
While it used to be the case that netdev submissions were not supposed
|
||||||
networking, Dave collects up patches he deems critical for the
|
to carry explicit ``CC: stable@vger.kernel.org`` tags that is no longer
|
||||||
networking subsystem, and then hands them off to Greg.
|
the case today. Please follow the standard stable rules in
|
||||||
|
:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`,
|
||||||
There is a patchworks queue that you can see here:
|
and make sure you include appropriate Fixes tags!
|
||||||
|
|
||||||
https://patchwork.kernel.org/bundle/netdev/stable/?state=*
|
|
||||||
|
|
||||||
It contains the patches which Dave has selected, but not yet handed off
|
|
||||||
to Greg. If Greg already has the patch, then it will be here:
|
|
||||||
|
|
||||||
https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
|
|
||||||
|
|
||||||
A quick way to find whether the patch is in this stable-queue is to
|
|
||||||
simply clone the repo, and then git grep the mainline commit ID, e.g.
|
|
||||||
::
|
|
||||||
|
|
||||||
stable-queue$ git grep -l 284041ef21fdf2e
|
|
||||||
releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
|
|
||||||
releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
|
|
||||||
releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
|
|
||||||
stable/stable-queue$
|
|
||||||
|
|
||||||
Q: I see a network patch and I think it should be backported to stable.
|
|
||||||
-----------------------------------------------------------------------
|
|
||||||
Q: Should I request it via stable@vger.kernel.org like the references in
|
|
||||||
the kernel's Documentation/process/stable-kernel-rules.rst file say?
|
|
||||||
A: No, not for networking. Check the stable queues as per above first
|
|
||||||
to see if it is already queued. If not, then send a mail to netdev,
|
|
||||||
listing the upstream commit ID and why you think it should be a stable
|
|
||||||
candidate.
|
|
||||||
|
|
||||||
Before you jump to go do the above, do note that the normal stable rules
|
|
||||||
in :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
|
|
||||||
still apply. So you need to explicitly indicate why it is a critical
|
|
||||||
fix and exactly what users are impacted. In addition, you need to
|
|
||||||
convince yourself that you *really* think it has been overlooked,
|
|
||||||
vs. having been considered and rejected.
|
|
||||||
|
|
||||||
Generally speaking, the longer it has had a chance to "soak" in
|
|
||||||
mainline, the better the odds that it is an OK candidate for stable. So
|
|
||||||
scrambling to request a commit be added the day after it appears should
|
|
||||||
be avoided.
|
|
||||||
|
|
||||||
Q: I have created a network patch and I think it should be backported to stable.
|
|
||||||
--------------------------------------------------------------------------------
|
|
||||||
Q: Should I add a Cc: stable@vger.kernel.org like the references in the
|
|
||||||
kernel's Documentation/ directory say?
|
|
||||||
A: No. See above answer. In short, if you think it really belongs in
|
|
||||||
stable, then ensure you write a decent commit log that describes who
|
|
||||||
gets impacted by the bug fix and how it manifests itself, and when the
|
|
||||||
bug was introduced. If you do that properly, then the commit will get
|
|
||||||
handled appropriately and most likely get put in the patchworks stable
|
|
||||||
queue if it really warrants it.
|
|
||||||
|
|
||||||
If you think there is some valid information relating to it being in
|
|
||||||
stable that does *not* belong in the commit log, then use the three dash
|
|
||||||
marker line as described in
|
|
||||||
:ref:`Documentation/process/submitting-patches.rst <the_canonical_patch_format>`
|
|
||||||
to temporarily embed that information into the patch that you send.
|
|
||||||
|
|
||||||
Q: Are all networking bug fixes backported to all stable releases?
|
|
||||||
------------------------------------------------------------------
|
|
||||||
A: Due to capacity, Dave could only take care of the backports for the
|
|
||||||
last two stable releases. For earlier stable releases, each stable
|
|
||||||
branch maintainer is supposed to take care of them. If you find any
|
|
||||||
patch is missing from an earlier stable branch, please notify
|
|
||||||
stable@vger.kernel.org with either a commit ID or a formal patch
|
|
||||||
backported, and CC Dave and other relevant networking developers.
|
|
||||||
|
|
||||||
Q: Is the comment style convention different for the networking content?
|
Q: Is the comment style convention different for the networking content?
|
||||||
------------------------------------------------------------------------
|
------------------------------------------------------------------------
|
||||||
|
@@ -35,12 +35,6 @@ Rules on what kind of patches are accepted, and which ones are not, into the
|
|||||||
Procedure for submitting patches to the -stable tree
|
Procedure for submitting patches to the -stable tree
|
||||||
----------------------------------------------------
|
----------------------------------------------------
|
||||||
|
|
||||||
- If the patch covers files in net/ or drivers/net please follow netdev stable
|
|
||||||
submission guidelines as described in
|
|
||||||
:ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
|
|
||||||
after first checking the stable networking queue at
|
|
||||||
https://patchwork.kernel.org/bundle/netdev/stable/?state=*
|
|
||||||
to ensure the requested patch is not already queued up.
|
|
||||||
- Security patches should not be handled (solely) by the -stable review
|
- Security patches should not be handled (solely) by the -stable review
|
||||||
process but should follow the procedures in
|
process but should follow the procedures in
|
||||||
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
|
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.
|
||||||
|
@@ -250,11 +250,6 @@ should also read
|
|||||||
:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
|
:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
|
||||||
in addition to this file.
|
in addition to this file.
|
||||||
|
|
||||||
Note, however, that some subsystem maintainers want to come to their own
|
|
||||||
conclusions on which patches should go to the stable trees. The networking
|
|
||||||
maintainer, in particular, would rather not see individual developers
|
|
||||||
adding lines like the above to their patches.
|
|
||||||
|
|
||||||
If changes affect userland-kernel interfaces, please send the MAN-PAGES
|
If changes affect userland-kernel interfaces, please send the MAN-PAGES
|
||||||
maintainer (as listed in the MAINTAINERS file) a man-pages patch, or at
|
maintainer (as listed in the MAINTAINERS file) a man-pages patch, or at
|
||||||
least a notification of the change, so that some information makes its way
|
least a notification of the change, so that some information makes its way
|
||||||
|
@@ -189,12 +189,10 @@ num_phys
|
|||||||
The event interface::
|
The event interface::
|
||||||
|
|
||||||
/* LLDD calls these to notify the class of an event. */
|
/* LLDD calls these to notify the class of an event. */
|
||||||
void (*notify_port_event)(struct sas_phy *, enum port_event);
|
void sas_notify_port_event(struct sas_phy *, enum port_event);
|
||||||
void (*notify_phy_event)(struct sas_phy *, enum phy_event);
|
void sas_notify_phy_event(struct sas_phy *, enum phy_event);
|
||||||
|
void sas_notify_port_event_gfp(struct sas_phy *, enum port_event, gfp_t);
|
||||||
When sas_register_ha() returns, those are set and can be
|
void sas_notify_phy_event_gfp(struct sas_phy *, enum phy_event, gfp_t);
|
||||||
called by the LLDD to notify the SAS layer of such events
|
|
||||||
the SAS layer.
|
|
||||||
|
|
||||||
The port notification::
|
The port notification::
|
||||||
|
|
||||||
|
@@ -1155,7 +1155,7 @@ M: Joel Fernandes <joel@joelfernandes.org>
|
|||||||
M: Christian Brauner <christian@brauner.io>
|
M: Christian Brauner <christian@brauner.io>
|
||||||
M: Hridya Valsaraju <hridya@google.com>
|
M: Hridya Valsaraju <hridya@google.com>
|
||||||
M: Suren Baghdasaryan <surenb@google.com>
|
M: Suren Baghdasaryan <surenb@google.com>
|
||||||
L: devel@driverdev.osuosl.org
|
L: linux-kernel@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
||||||
F: drivers/android/
|
F: drivers/android/
|
||||||
@@ -8007,7 +8007,6 @@ F: drivers/crypto/hisilicon/sec2/sec_main.c
|
|||||||
|
|
||||||
HISILICON STAGING DRIVERS FOR HIKEY 960/970
|
HISILICON STAGING DRIVERS FOR HIKEY 960/970
|
||||||
M: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
|
M: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
|
||||||
L: devel@driverdev.osuosl.org
|
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/staging/hikey9xx/
|
F: drivers/staging/hikey9xx/
|
||||||
|
|
||||||
@@ -16696,7 +16695,7 @@ F: drivers/staging/vt665?/
|
|||||||
|
|
||||||
STAGING SUBSYSTEM
|
STAGING SUBSYSTEM
|
||||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||||
L: devel@driverdev.osuosl.org
|
L: linux-staging@lists.linux.dev
|
||||||
S: Supported
|
S: Supported
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
|
||||||
F: drivers/staging/
|
F: drivers/staging/
|
||||||
@@ -18738,7 +18737,7 @@ VME SUBSYSTEM
|
|||||||
M: Martyn Welch <martyn@welchs.me.uk>
|
M: Martyn Welch <martyn@welchs.me.uk>
|
||||||
M: Manohar Vanga <manohar.vanga@gmail.com>
|
M: Manohar Vanga <manohar.vanga@gmail.com>
|
||||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||||
L: devel@driverdev.osuosl.org
|
L: linux-kernel@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
|
||||||
F: Documentation/driver-api/vme.rst
|
F: Documentation/driver-api/vme.rst
|
||||||
|
14
Makefile
14
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 21
|
SUBLEVEL = 26
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Dare mighty things
|
NAME = Dare mighty things
|
||||||
|
|
||||||
@@ -1338,11 +1338,19 @@ define filechk_utsrelease.h
|
|||||||
endef
|
endef
|
||||||
|
|
||||||
define filechk_version.h
|
define filechk_version.h
|
||||||
|
if [ $(SUBLEVEL) -gt 255 ]; then \
|
||||||
echo \#define LINUX_VERSION_CODE $(shell \
|
echo \#define LINUX_VERSION_CODE $(shell \
|
||||||
expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
|
expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + 255); \
|
||||||
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'
|
else \
|
||||||
|
echo \#define LINUX_VERSION_CODE $(shell \
|
||||||
|
expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
|
||||||
|
fi; \
|
||||||
|
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + \
|
||||||
|
((c) > 255 ? 255 : (c)))'
|
||||||
endef
|
endef
|
||||||
|
|
||||||
|
$(version_h): PATCHLEVEL := $(if $(PATCHLEVEL), $(PATCHLEVEL), 0)
|
||||||
|
$(version_h): SUBLEVEL := $(if $(SUBLEVEL), $(SUBLEVEL), 0)
|
||||||
$(version_h): FORCE
|
$(version_h): FORCE
|
||||||
$(call filechk,version.h)
|
$(call filechk,version.h)
|
||||||
$(Q)rm -f $(old_version_h)
|
$(Q)rm -f $(old_version_h)
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -959,6 +959,7 @@ choice
|
|||||||
|
|
||||||
config CPU_BIG_ENDIAN
|
config CPU_BIG_ENDIAN
|
||||||
bool "Build big-endian kernel"
|
bool "Build big-endian kernel"
|
||||||
|
depends on !LD_IS_LLD || LLD_VERSION >= 130000
|
||||||
help
|
help
|
||||||
Say Y if you plan on running a kernel with a big-endian userspace.
|
Say Y if you plan on running a kernel with a big-endian userspace.
|
||||||
|
|
||||||
|
@@ -227,8 +227,6 @@
|
|||||||
"timing-adjustment";
|
"timing-adjustment";
|
||||||
rx-fifo-depth = <4096>;
|
rx-fifo-depth = <4096>;
|
||||||
tx-fifo-depth = <2048>;
|
tx-fifo-depth = <2048>;
|
||||||
resets = <&reset RESET_ETHERNET>;
|
|
||||||
reset-names = "stmmaceth";
|
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -224,8 +224,6 @@
|
|||||||
"timing-adjustment";
|
"timing-adjustment";
|
||||||
rx-fifo-depth = <4096>;
|
rx-fifo-depth = <4096>;
|
||||||
tx-fifo-depth = <2048>;
|
tx-fifo-depth = <2048>;
|
||||||
resets = <&reset RESET_ETHERNET>;
|
|
||||||
reset-names = "stmmaceth";
|
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
|
|
||||||
mdio0: mdio {
|
mdio0: mdio {
|
||||||
|
@@ -13,7 +13,6 @@
|
|||||||
#include <dt-bindings/interrupt-controller/irq.h>
|
#include <dt-bindings/interrupt-controller/irq.h>
|
||||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||||
#include <dt-bindings/power/meson-gxbb-power.h>
|
#include <dt-bindings/power/meson-gxbb-power.h>
|
||||||
#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
|
|
||||||
#include <dt-bindings/thermal/thermal.h>
|
#include <dt-bindings/thermal/thermal.h>
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
@@ -576,8 +575,6 @@
|
|||||||
interrupt-names = "macirq";
|
interrupt-names = "macirq";
|
||||||
rx-fifo-depth = <4096>;
|
rx-fifo-depth = <4096>;
|
||||||
tx-fifo-depth = <2048>;
|
tx-fifo-depth = <2048>;
|
||||||
resets = <&reset RESET_ETHERNET>;
|
|
||||||
reset-names = "stmmaceth";
|
|
||||||
power-domains = <&pwrc PWRC_GXBB_ETHERNET_MEM_ID>;
|
power-domains = <&pwrc PWRC_GXBB_ETHERNET_MEM_ID>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
@@ -65,10 +65,7 @@ extern u64 idmap_ptrs_per_pgd;
|
|||||||
|
|
||||||
static inline bool __cpu_uses_extended_idmap(void)
|
static inline bool __cpu_uses_extended_idmap(void)
|
||||||
{
|
{
|
||||||
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
|
return unlikely(idmap_t0sz != TCR_T0SZ(vabits_actual));
|
||||||
return false;
|
|
||||||
|
|
||||||
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -334,7 +334,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
|
|||||||
*/
|
*/
|
||||||
adrp x5, __idmap_text_end
|
adrp x5, __idmap_text_end
|
||||||
clz x5, x5
|
clz x5, x5
|
||||||
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
|
cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
|
||||||
b.ge 1f // .. then skip VA range extension
|
b.ge 1f // .. then skip VA range extension
|
||||||
|
|
||||||
adr_l x6, idmap_t0sz
|
adr_l x6, idmap_t0sz
|
||||||
|
@@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
|
|||||||
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
|
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 armv8pmu_read_evcntr(int idx)
|
static inline u64 armv8pmu_read_evcntr(int idx)
|
||||||
{
|
{
|
||||||
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
|
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
|
||||||
|
|
||||||
|
@@ -152,7 +152,7 @@ SYM_FUNC_END(__hyp_do_panic)
|
|||||||
|
|
||||||
.macro invalid_host_el1_vect
|
.macro invalid_host_el1_vect
|
||||||
.align 7
|
.align 7
|
||||||
mov x0, xzr /* restore_host = false */
|
mov x0, xzr /* host_ctxt = NULL */
|
||||||
mrs x1, spsr_el2
|
mrs x1, spsr_el2
|
||||||
mrs x2, elr_el2
|
mrs x2, elr_el2
|
||||||
mrs x3, par_el1
|
mrs x3, par_el1
|
||||||
|
@@ -29,6 +29,7 @@
|
|||||||
#include <linux/kexec.h>
|
#include <linux/kexec.h>
|
||||||
#include <linux/crash_dump.h>
|
#include <linux/crash_dump.h>
|
||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
|
#include <linux/acpi_iort.h>
|
||||||
|
|
||||||
#include <asm/boot.h>
|
#include <asm/boot.h>
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
@@ -43,8 +44,6 @@
|
|||||||
#include <asm/tlb.h>
|
#include <asm/tlb.h>
|
||||||
#include <asm/alternative.h>
|
#include <asm/alternative.h>
|
||||||
|
|
||||||
#define ARM64_ZONE_DMA_BITS 30
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to be able to catch inadvertent references to memstart_addr
|
* We need to be able to catch inadvertent references to memstart_addr
|
||||||
* that occur (potentially in generic code) before arm64_memblock_init()
|
* that occur (potentially in generic code) before arm64_memblock_init()
|
||||||
@@ -189,8 +188,14 @@ static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
|
|||||||
static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
||||||
{
|
{
|
||||||
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
|
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
|
||||||
|
unsigned int __maybe_unused acpi_zone_dma_bits;
|
||||||
|
unsigned int __maybe_unused dt_zone_dma_bits;
|
||||||
|
|
||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
|
acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
|
||||||
|
dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
|
||||||
|
zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
|
||||||
|
arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
|
||||||
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
|
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ZONE_DMA32
|
#ifdef CONFIG_ZONE_DMA32
|
||||||
@@ -214,6 +219,18 @@ int pfn_valid(unsigned long pfn)
|
|||||||
|
|
||||||
if (!valid_section(__pfn_to_section(pfn)))
|
if (!valid_section(__pfn_to_section(pfn)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ZONE_DEVICE memory does not have the memblock entries.
|
||||||
|
* memblock_is_map_memory() check for ZONE_DEVICE based
|
||||||
|
* addresses will always fail. Even the normal hotplugged
|
||||||
|
* memory will never have MEMBLOCK_NOMAP flag set in their
|
||||||
|
* memblock entries. Skip memblock search for all non early
|
||||||
|
* memory sections covering all of hotplug memory including
|
||||||
|
* both normal and ZONE_DEVICE based.
|
||||||
|
*/
|
||||||
|
if (!early_section(__pfn_to_section(pfn)))
|
||||||
|
return pfn_section_valid(__pfn_to_section(pfn), pfn);
|
||||||
#endif
|
#endif
|
||||||
return memblock_is_map_memory(addr);
|
return memblock_is_map_memory(addr);
|
||||||
}
|
}
|
||||||
@@ -380,18 +397,11 @@ void __init arm64_memblock_init(void)
|
|||||||
|
|
||||||
early_init_fdt_scan_reserved_mem();
|
early_init_fdt_scan_reserved_mem();
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
|
|
||||||
zone_dma_bits = ARM64_ZONE_DMA_BITS;
|
|
||||||
arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||||
arm64_dma32_phys_limit = max_zone_phys(32);
|
arm64_dma32_phys_limit = max_zone_phys(32);
|
||||||
else
|
else
|
||||||
arm64_dma32_phys_limit = PHYS_MASK + 1;
|
arm64_dma32_phys_limit = PHYS_MASK + 1;
|
||||||
|
|
||||||
reserve_crashkernel();
|
|
||||||
|
|
||||||
reserve_elfcorehdr();
|
reserve_elfcorehdr();
|
||||||
|
|
||||||
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
|
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
|
||||||
@@ -433,6 +443,12 @@ void __init bootmem_init(void)
|
|||||||
sparse_init();
|
sparse_init();
|
||||||
zone_sizes_init(min, max);
|
zone_sizes_init(min, max);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* request_standard_resources() depends on crashkernel's memory being
|
||||||
|
* reserved, so do it here.
|
||||||
|
*/
|
||||||
|
reserve_crashkernel();
|
||||||
|
|
||||||
memblock_dump_all();
|
memblock_dump_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -40,7 +40,7 @@
|
|||||||
#define NO_BLOCK_MAPPINGS BIT(0)
|
#define NO_BLOCK_MAPPINGS BIT(0)
|
||||||
#define NO_CONT_MAPPINGS BIT(1)
|
#define NO_CONT_MAPPINGS BIT(1)
|
||||||
|
|
||||||
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
|
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
|
||||||
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
|
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
|
||||||
|
|
||||||
u64 __section(".mmuoff.data.write") vabits_actual;
|
u64 __section(".mmuoff.data.write") vabits_actual;
|
||||||
|
@@ -36,6 +36,7 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
|
|||||||
|
|
||||||
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
|
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
|
||||||
KCOV_INSTRUMENT := n
|
KCOV_INSTRUMENT := n
|
||||||
|
UBSAN_SANITIZE := n
|
||||||
|
|
||||||
# decompressor objects (linked with vmlinuz)
|
# decompressor objects (linked with vmlinuz)
|
||||||
vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
|
vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
|
||||||
|
@@ -12,8 +12,8 @@ AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
|
|||||||
obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
|
obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
|
||||||
poly1305-mips-y := poly1305-core.o poly1305-glue.o
|
poly1305-mips-y := poly1305-core.o poly1305-glue.o
|
||||||
|
|
||||||
perlasm-flavour-$(CONFIG_CPU_MIPS32) := o32
|
perlasm-flavour-$(CONFIG_32BIT) := o32
|
||||||
perlasm-flavour-$(CONFIG_CPU_MIPS64) := 64
|
perlasm-flavour-$(CONFIG_64BIT) := 64
|
||||||
|
|
||||||
quiet_cmd_perlasm = PERLASM $@
|
quiet_cmd_perlasm = PERLASM $@
|
||||||
cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
|
cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
|
||||||
|
@@ -201,9 +201,12 @@ config PREFETCH
|
|||||||
def_bool y
|
def_bool y
|
||||||
depends on PA8X00 || PA7200
|
depends on PA8X00 || PA7200
|
||||||
|
|
||||||
|
config PARISC_HUGE_KERNEL
|
||||||
|
def_bool y if !MODULES || UBSAN || FTRACE || COMPILE_TEST
|
||||||
|
|
||||||
config MLONGCALLS
|
config MLONGCALLS
|
||||||
def_bool y if !MODULES || UBSAN || FTRACE
|
def_bool y if PARISC_HUGE_KERNEL
|
||||||
bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
|
bool "Enable the -mlong-calls compiler option for big kernels" if !PARISC_HUGE_KERNEL
|
||||||
depends on PA8X00
|
depends on PA8X00
|
||||||
help
|
help
|
||||||
If you configure the kernel to include many drivers built-in instead
|
If you configure the kernel to include many drivers built-in instead
|
||||||
|
@@ -73,7 +73,7 @@ void __patch_exception(int exc, unsigned long addr);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define OP_RT_RA_MASK 0xffff0000UL
|
#define OP_RT_RA_MASK 0xffff0000UL
|
||||||
#define LIS_R2 0x3c020000UL
|
#define LIS_R2 0x3c400000UL
|
||||||
#define ADDIS_R2_R12 0x3c4c0000UL
|
#define ADDIS_R2_R12 0x3c4c0000UL
|
||||||
#define ADDI_R2_R2 0x38420000UL
|
#define ADDI_R2_R2 0x38420000UL
|
||||||
|
|
||||||
|
@@ -7,7 +7,7 @@
|
|||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
|
|
||||||
static inline bool early_cpu_has_feature(unsigned long feature)
|
static __always_inline bool early_cpu_has_feature(unsigned long feature)
|
||||||
{
|
{
|
||||||
return !!((CPU_FTRS_ALWAYS & feature) ||
|
return !!((CPU_FTRS_ALWAYS & feature) ||
|
||||||
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
|
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
|
||||||
@@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
|
|||||||
return static_branch_likely(&cpu_feature_keys[i]);
|
return static_branch_likely(&cpu_feature_keys[i]);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline bool cpu_has_feature(unsigned long feature)
|
static __always_inline bool cpu_has_feature(unsigned long feature)
|
||||||
{
|
{
|
||||||
return early_cpu_has_feature(feature);
|
return early_cpu_has_feature(feature);
|
||||||
}
|
}
|
||||||
|
@@ -59,6 +59,9 @@ struct machdep_calls {
|
|||||||
int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
|
int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
|
||||||
*bridge);
|
*bridge);
|
||||||
|
|
||||||
|
/* finds all the pci_controllers present at boot */
|
||||||
|
void (*discover_phbs)(void);
|
||||||
|
|
||||||
/* To setup PHBs when using automatic OF platform driver for PCI */
|
/* To setup PHBs when using automatic OF platform driver for PCI */
|
||||||
int (*pci_setup_phb)(struct pci_controller *host);
|
int (*pci_setup_phb)(struct pci_controller *host);
|
||||||
|
|
||||||
|
@@ -62,6 +62,9 @@ struct pt_regs
|
|||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs))
|
||||||
|
|
||||||
#ifdef __powerpc64__
|
#ifdef __powerpc64__
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -190,7 +193,7 @@ extern int ptrace_put_reg(struct task_struct *task, int regno,
|
|||||||
#define TRAP_FLAGS_MASK 0x11
|
#define TRAP_FLAGS_MASK 0x11
|
||||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||||
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
||||||
#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
|
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
|
||||||
#endif
|
#endif
|
||||||
#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
|
#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
|
||||||
#define NV_REG_POISON 0xdeadbeefdeadbeefUL
|
#define NV_REG_POISON 0xdeadbeefdeadbeefUL
|
||||||
@@ -205,7 +208,7 @@ extern int ptrace_put_reg(struct task_struct *task, int regno,
|
|||||||
#define TRAP_FLAGS_MASK 0x1F
|
#define TRAP_FLAGS_MASK 0x1F
|
||||||
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
|
||||||
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
|
||||||
#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
|
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
|
||||||
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
|
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
|
||||||
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
|
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
|
||||||
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
|
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
|
||||||
|
@@ -71,6 +71,16 @@ static inline void disable_kernel_vsx(void)
|
|||||||
{
|
{
|
||||||
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
|
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
static inline void enable_kernel_vsx(void)
|
||||||
|
{
|
||||||
|
BUILD_BUG();
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void disable_kernel_vsx(void)
|
||||||
|
{
|
||||||
|
BUILD_BUG();
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SPE
|
#ifdef CONFIG_SPE
|
||||||
|
@@ -307,7 +307,7 @@ int main(void)
|
|||||||
|
|
||||||
/* Interrupt register frame */
|
/* Interrupt register frame */
|
||||||
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
|
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
|
||||||
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
|
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS);
|
||||||
STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
|
STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
|
||||||
STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
|
STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
|
||||||
STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
|
STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
|
||||||
|
@@ -470,7 +470,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
|
|||||||
|
|
||||||
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
|
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
|
||||||
/* MSR[RI] is clear iff using SRR regs */
|
/* MSR[RI] is clear iff using SRR regs */
|
||||||
.if IHSRR == EXC_HV_OR_STD
|
.if IHSRR_IF_HVMODE
|
||||||
BEGIN_FTR_SECTION
|
BEGIN_FTR_SECTION
|
||||||
xori r10,r10,MSR_RI
|
xori r10,r10,MSR_RI
|
||||||
END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
|
END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
|
||||||
|
@@ -461,10 +461,11 @@ InstructionTLBMiss:
|
|||||||
cmplw 0,r1,r3
|
cmplw 0,r1,r3
|
||||||
#endif
|
#endif
|
||||||
mfspr r2, SPRN_SPRG_PGDIR
|
mfspr r2, SPRN_SPRG_PGDIR
|
||||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
|
||||||
#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
|
#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
|
||||||
bgt- 112f
|
bgt- 112f
|
||||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||||
|
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
||||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||||
#endif
|
#endif
|
||||||
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
||||||
@@ -523,9 +524,10 @@ DataLoadTLBMiss:
|
|||||||
lis r1, TASK_SIZE@h /* check if kernel address */
|
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||||
cmplw 0,r1,r3
|
cmplw 0,r1,r3
|
||||||
mfspr r2, SPRN_SPRG_PGDIR
|
mfspr r2, SPRN_SPRG_PGDIR
|
||||||
li r1, _PAGE_PRESENT | _PAGE_ACCESSED
|
li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
|
||||||
bgt- 112f
|
bgt- 112f
|
||||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||||
|
li r1, _PAGE_PRESENT | _PAGE_ACCESSED
|
||||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||||
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
||||||
lwz r2,0(r2) /* get pmd entry */
|
lwz r2,0(r2) /* get pmd entry */
|
||||||
@@ -599,9 +601,10 @@ DataStoreTLBMiss:
|
|||||||
lis r1, TASK_SIZE@h /* check if kernel address */
|
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||||
cmplw 0,r1,r3
|
cmplw 0,r1,r3
|
||||||
mfspr r2, SPRN_SPRG_PGDIR
|
mfspr r2, SPRN_SPRG_PGDIR
|
||||||
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
|
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
|
||||||
bgt- 112f
|
bgt- 112f
|
||||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||||
|
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
|
||||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||||
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
|
||||||
lwz r2,0(r2) /* get pmd entry */
|
lwz r2,0(r2) /* get pmd entry */
|
||||||
|
@@ -1625,3 +1625,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
|
|||||||
}
|
}
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
|
||||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
|
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
|
||||||
|
|
||||||
|
|
||||||
|
static int __init discover_phbs(void)
|
||||||
|
{
|
||||||
|
if (ppc_md.discover_phbs)
|
||||||
|
ppc_md.discover_phbs();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
core_initcall(discover_phbs);
|
||||||
|
@@ -2170,7 +2170,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack,
|
|||||||
* See if this is an exception frame.
|
* See if this is an exception frame.
|
||||||
* We look for the "regshere" marker in the current frame.
|
* We look for the "regshere" marker in the current frame.
|
||||||
*/
|
*/
|
||||||
if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
|
if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
|
||||||
&& stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
|
&& stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
|
||||||
struct pt_regs *regs = (struct pt_regs *)
|
struct pt_regs *regs = (struct pt_regs *)
|
||||||
(sp + STACK_FRAME_OVERHEAD);
|
(sp + STACK_FRAME_OVERHEAD);
|
||||||
|
@@ -509,8 +509,11 @@ out:
|
|||||||
die("Unrecoverable nested System Reset", regs, SIGABRT);
|
die("Unrecoverable nested System Reset", regs, SIGABRT);
|
||||||
#endif
|
#endif
|
||||||
/* Must die if the interrupt is not recoverable */
|
/* Must die if the interrupt is not recoverable */
|
||||||
if (!(regs->msr & MSR_RI))
|
if (!(regs->msr & MSR_RI)) {
|
||||||
|
/* For the reason explained in die_mce, nmi_exit before die */
|
||||||
|
nmi_exit();
|
||||||
die("Unrecoverable System Reset", regs, SIGABRT);
|
die("Unrecoverable System Reset", regs, SIGABRT);
|
||||||
|
}
|
||||||
|
|
||||||
if (saved_hsrrs) {
|
if (saved_hsrrs) {
|
||||||
mtspr(SPRN_HSRR0, hsrr0);
|
mtspr(SPRN_HSRR0, hsrr0);
|
||||||
|
@@ -1853,7 +1853,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
|
|||||||
goto compute_done;
|
goto compute_done;
|
||||||
}
|
}
|
||||||
|
|
||||||
return -1;
|
goto unknown_opcode;
|
||||||
#ifdef __powerpc64__
|
#ifdef __powerpc64__
|
||||||
case 777: /* modsd */
|
case 777: /* modsd */
|
||||||
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
if (!cpu_has_feature(CPU_FTR_ARCH_300))
|
||||||
@@ -2909,6 +2909,20 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
|
||||||
|
switch (GETTYPE(op->type)) {
|
||||||
|
case LOAD:
|
||||||
|
if (ra == rd)
|
||||||
|
goto unknown_opcode;
|
||||||
|
fallthrough;
|
||||||
|
case STORE:
|
||||||
|
case LOAD_FP:
|
||||||
|
case STORE_FP:
|
||||||
|
if (ra == 0)
|
||||||
|
goto unknown_opcode;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_VSX
|
#ifdef CONFIG_VSX
|
||||||
if ((GETTYPE(op->type) == LOAD_VSX ||
|
if ((GETTYPE(op->type) == LOAD_VSX ||
|
||||||
GETTYPE(op->type) == STORE_VSX) &&
|
GETTYPE(op->type) == STORE_VSX) &&
|
||||||
|
@@ -211,7 +211,7 @@ static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *
|
|||||||
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
|
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
|
||||||
*addrp = mfspr(SPRN_SDAR);
|
*addrp = mfspr(SPRN_SDAR);
|
||||||
|
|
||||||
if (is_kernel_addr(mfspr(SPRN_SDAR)) && perf_allow_kernel(&event->attr) != 0)
|
if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel)
|
||||||
*addrp = 0;
|
*addrp = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -477,7 +477,7 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *
|
|||||||
* addresses, hence include a check before filtering code
|
* addresses, hence include a check before filtering code
|
||||||
*/
|
*/
|
||||||
if (!(ppmu->flags & PPMU_ARCH_31) &&
|
if (!(ppmu->flags & PPMU_ARCH_31) &&
|
||||||
is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0)
|
is_kernel_addr(addr) && event->attr.exclude_kernel)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Branches are read most recent first (ie. mfbhrb 0 is
|
/* Branches are read most recent first (ie. mfbhrb 0 is
|
||||||
@@ -2112,7 +2112,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
|||||||
left += period;
|
left += period;
|
||||||
if (left <= 0)
|
if (left <= 0)
|
||||||
left = period;
|
left = period;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If address is not requested in the sample via
|
||||||
|
* PERF_SAMPLE_IP, just record that sample irrespective
|
||||||
|
* of SIAR valid check.
|
||||||
|
*/
|
||||||
|
if (event->attr.sample_type & PERF_SAMPLE_IP)
|
||||||
record = siar_valid(regs);
|
record = siar_valid(regs);
|
||||||
|
else
|
||||||
|
record = 1;
|
||||||
|
|
||||||
event->hw.last_period = event->hw.sample_period;
|
event->hw.last_period = event->hw.sample_period;
|
||||||
}
|
}
|
||||||
if (left < 0x80000000LL)
|
if (left < 0x80000000LL)
|
||||||
@@ -2130,8 +2140,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
|||||||
* MMCR2. Check attr.exclude_kernel and address to drop the sample in
|
* MMCR2. Check attr.exclude_kernel and address to drop the sample in
|
||||||
* these cases.
|
* these cases.
|
||||||
*/
|
*/
|
||||||
if (event->attr.exclude_kernel && record)
|
if (event->attr.exclude_kernel &&
|
||||||
if (is_kernel_addr(mfspr(SPRN_SIAR)))
|
(event->attr.sample_type & PERF_SAMPLE_IP) &&
|
||||||
|
is_kernel_addr(mfspr(SPRN_SIAR)))
|
||||||
record = 0;
|
record = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -4,6 +4,7 @@
|
|||||||
* Copyright 2006-2007 Michael Ellerman, IBM Corp.
|
* Copyright 2006-2007 Michael Ellerman, IBM Corp.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/crash_dump.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/msi.h>
|
#include <linux/msi.h>
|
||||||
@@ -458,6 +459,26 @@ again:
|
|||||||
return hwirq;
|
return hwirq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Depending on the number of online CPUs in the original
|
||||||
|
* kernel, it is likely for CPU #0 to be offline in a kdump
|
||||||
|
* kernel. The associated IRQs in the affinity mappings
|
||||||
|
* provided by irq_create_affinity_masks() are thus not
|
||||||
|
* started by irq_startup(), as per-design for managed IRQs.
|
||||||
|
* This can be a problem with multi-queue block devices driven
|
||||||
|
* by blk-mq : such a non-started IRQ is very likely paired
|
||||||
|
* with the single queue enforced by blk-mq during kdump (see
|
||||||
|
* blk_mq_alloc_tag_set()). This causes the device to remain
|
||||||
|
* silent and likely hangs the guest at some point.
|
||||||
|
*
|
||||||
|
* We don't really care for fine-grained affinity when doing
|
||||||
|
* kdump actually : simply ignore the pre-computed affinity
|
||||||
|
* masks in this case and let the default mask with all CPUs
|
||||||
|
* be used when creating the IRQ mappings.
|
||||||
|
*/
|
||||||
|
if (is_kdump_kernel())
|
||||||
|
virq = irq_create_mapping(NULL, hwirq);
|
||||||
|
else
|
||||||
virq = irq_create_mapping_affinity(NULL, hwirq,
|
virq = irq_create_mapping_affinity(NULL, hwirq,
|
||||||
entry->affinity);
|
entry->affinity);
|
||||||
|
|
||||||
|
@@ -84,7 +84,6 @@ config RISCV
|
|||||||
select PCI_MSI if PCI
|
select PCI_MSI if PCI
|
||||||
select RISCV_INTC
|
select RISCV_INTC
|
||||||
select RISCV_TIMER if RISCV_SBI
|
select RISCV_TIMER if RISCV_SBI
|
||||||
select SPARSEMEM_STATIC if 32BIT
|
|
||||||
select SPARSE_IRQ
|
select SPARSE_IRQ
|
||||||
select SYSCTL_EXCEPTION_TRACE
|
select SYSCTL_EXCEPTION_TRACE
|
||||||
select THREAD_INFO_IN_TASK
|
select THREAD_INFO_IN_TASK
|
||||||
@@ -145,7 +144,8 @@ config ARCH_FLATMEM_ENABLE
|
|||||||
config ARCH_SPARSEMEM_ENABLE
|
config ARCH_SPARSEMEM_ENABLE
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on MMU
|
depends on MMU
|
||||||
select SPARSEMEM_VMEMMAP_ENABLE
|
select SPARSEMEM_STATIC if 32BIT && SPARSMEM
|
||||||
|
select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
|
||||||
|
|
||||||
config ARCH_SELECT_MEMORY_MODEL
|
config ARCH_SELECT_MEMORY_MODEL
|
||||||
def_bool ARCH_SPARSEMEM_ENABLE
|
def_bool ARCH_SPARSEMEM_ENABLE
|
||||||
|
@@ -51,10 +51,10 @@ enum sbi_ext_rfence_fid {
|
|||||||
SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
|
SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
|
||||||
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
|
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
|
||||||
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
|
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
|
||||||
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
|
||||||
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
|
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
|
||||||
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
||||||
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
|
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
|
||||||
|
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum sbi_ext_hsm_fid {
|
enum sbi_ext_hsm_fid {
|
||||||
|
@@ -201,8 +201,8 @@ extern unsigned int s390_pci_no_rid;
|
|||||||
Prototypes
|
Prototypes
|
||||||
----------------------------------------------------------------------------- */
|
----------------------------------------------------------------------------- */
|
||||||
/* Base stuff */
|
/* Base stuff */
|
||||||
int zpci_create_device(struct zpci_dev *);
|
int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
|
||||||
void zpci_remove_device(struct zpci_dev *zdev);
|
void zpci_remove_device(struct zpci_dev *zdev, bool set_error);
|
||||||
int zpci_enable_device(struct zpci_dev *);
|
int zpci_enable_device(struct zpci_dev *);
|
||||||
int zpci_disable_device(struct zpci_dev *);
|
int zpci_disable_device(struct zpci_dev *);
|
||||||
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
|
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
|
||||||
@@ -212,7 +212,7 @@ void zpci_remove_reserved_devices(void);
|
|||||||
/* CLP */
|
/* CLP */
|
||||||
int clp_setup_writeback_mio(void);
|
int clp_setup_writeback_mio(void);
|
||||||
int clp_scan_pci_devices(void);
|
int clp_scan_pci_devices(void);
|
||||||
int clp_add_pci_device(u32, u32, int);
|
int clp_query_pci_fn(struct zpci_dev *zdev);
|
||||||
int clp_enable_fh(struct zpci_dev *, u8);
|
int clp_enable_fh(struct zpci_dev *, u8);
|
||||||
int clp_disable_fh(struct zpci_dev *);
|
int clp_disable_fh(struct zpci_dev *);
|
||||||
int clp_get_state(u32 fid, enum zpci_state *state);
|
int clp_get_state(u32 fid, enum zpci_state *state);
|
||||||
|
@@ -775,7 +775,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
|
|||||||
static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
|
static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
|
||||||
{
|
{
|
||||||
struct sclp_core_entry *core;
|
struct sclp_core_entry *core;
|
||||||
cpumask_t avail;
|
static cpumask_t avail;
|
||||||
bool configured;
|
bool configured;
|
||||||
u16 core_id;
|
u16 core_id;
|
||||||
int nr, i;
|
int nr, i;
|
||||||
|
@@ -217,7 +217,7 @@ void vtime_flush(struct task_struct *tsk)
|
|||||||
avg_steal = S390_lowcore.avg_steal_timer / 2;
|
avg_steal = S390_lowcore.avg_steal_timer / 2;
|
||||||
if ((s64) steal > 0) {
|
if ((s64) steal > 0) {
|
||||||
S390_lowcore.steal_timer = 0;
|
S390_lowcore.steal_timer = 0;
|
||||||
account_steal_time(steal);
|
account_steal_time(cputime_to_nsecs(steal));
|
||||||
avg_steal += steal;
|
avg_steal += steal;
|
||||||
}
|
}
|
||||||
S390_lowcore.avg_steal_timer = avg_steal;
|
S390_lowcore.avg_steal_timer = avg_steal;
|
||||||
|
@@ -682,56 +682,101 @@ int zpci_disable_device(struct zpci_dev *zdev)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(zpci_disable_device);
|
EXPORT_SYMBOL_GPL(zpci_disable_device);
|
||||||
|
|
||||||
void zpci_remove_device(struct zpci_dev *zdev)
|
/* zpci_remove_device - Removes the given zdev from the PCI core
|
||||||
|
* @zdev: the zdev to be removed from the PCI core
|
||||||
|
* @set_error: if true the device's error state is set to permanent failure
|
||||||
|
*
|
||||||
|
* Sets a zPCI device to a configured but offline state; the zPCI
|
||||||
|
* device is still accessible through its hotplug slot and the zPCI
|
||||||
|
* API but is removed from the common code PCI bus, making it
|
||||||
|
* no longer available to drivers.
|
||||||
|
*/
|
||||||
|
void zpci_remove_device(struct zpci_dev *zdev, bool set_error)
|
||||||
{
|
{
|
||||||
struct zpci_bus *zbus = zdev->zbus;
|
struct zpci_bus *zbus = zdev->zbus;
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
|
|
||||||
|
if (!zdev->zbus->bus)
|
||||||
|
return;
|
||||||
|
|
||||||
pdev = pci_get_slot(zbus->bus, zdev->devfn);
|
pdev = pci_get_slot(zbus->bus, zdev->devfn);
|
||||||
if (pdev) {
|
if (pdev) {
|
||||||
if (pdev->is_virtfn)
|
if (set_error)
|
||||||
return zpci_iov_remove_virtfn(pdev, zdev->vfn);
|
pdev->error_state = pci_channel_io_perm_failure;
|
||||||
|
if (pdev->is_virtfn) {
|
||||||
|
zpci_iov_remove_virtfn(pdev, zdev->vfn);
|
||||||
|
/* balance pci_get_slot */
|
||||||
|
pci_dev_put(pdev);
|
||||||
|
return;
|
||||||
|
}
|
||||||
pci_stop_and_remove_bus_device_locked(pdev);
|
pci_stop_and_remove_bus_device_locked(pdev);
|
||||||
|
/* balance pci_get_slot */
|
||||||
|
pci_dev_put(pdev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int zpci_create_device(struct zpci_dev *zdev)
|
/**
|
||||||
|
* zpci_create_device() - Create a new zpci_dev and add it to the zbus
|
||||||
|
* @fid: Function ID of the device to be created
|
||||||
|
* @fh: Current Function Handle of the device to be created
|
||||||
|
* @state: Initial state after creation either Standby or Configured
|
||||||
|
*
|
||||||
|
* Creates a new zpci device and adds it to its, possibly newly created, zbus
|
||||||
|
* as well as zpci_list.
|
||||||
|
*
|
||||||
|
* Returns: 0 on success, an error value otherwise
|
||||||
|
*/
|
||||||
|
int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
|
||||||
{
|
{
|
||||||
|
struct zpci_dev *zdev;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
|
||||||
|
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
|
||||||
|
if (!zdev)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
/* FID and Function Handle are the static/dynamic identifiers */
|
||||||
|
zdev->fid = fid;
|
||||||
|
zdev->fh = fh;
|
||||||
|
|
||||||
|
/* Query function properties and update zdev */
|
||||||
|
rc = clp_query_pci_fn(zdev);
|
||||||
|
if (rc)
|
||||||
|
goto error;
|
||||||
|
zdev->state = state;
|
||||||
|
|
||||||
kref_init(&zdev->kref);
|
kref_init(&zdev->kref);
|
||||||
|
mutex_init(&zdev->lock);
|
||||||
|
|
||||||
|
rc = zpci_init_iommu(zdev);
|
||||||
|
if (rc)
|
||||||
|
goto error;
|
||||||
|
|
||||||
|
if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
|
||||||
|
rc = zpci_enable_device(zdev);
|
||||||
|
if (rc)
|
||||||
|
goto error_destroy_iommu;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = zpci_bus_device_register(zdev, &pci_root_ops);
|
||||||
|
if (rc)
|
||||||
|
goto error_disable;
|
||||||
|
|
||||||
spin_lock(&zpci_list_lock);
|
spin_lock(&zpci_list_lock);
|
||||||
list_add_tail(&zdev->entry, &zpci_list);
|
list_add_tail(&zdev->entry, &zpci_list);
|
||||||
spin_unlock(&zpci_list_lock);
|
spin_unlock(&zpci_list_lock);
|
||||||
|
|
||||||
rc = zpci_init_iommu(zdev);
|
|
||||||
if (rc)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
mutex_init(&zdev->lock);
|
|
||||||
if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
|
|
||||||
rc = zpci_enable_device(zdev);
|
|
||||||
if (rc)
|
|
||||||
goto out_destroy_iommu;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = zpci_bus_device_register(zdev, &pci_root_ops);
|
|
||||||
if (rc)
|
|
||||||
goto out_disable;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_disable:
|
error_disable:
|
||||||
if (zdev->state == ZPCI_FN_STATE_ONLINE)
|
if (zdev->state == ZPCI_FN_STATE_ONLINE)
|
||||||
zpci_disable_device(zdev);
|
zpci_disable_device(zdev);
|
||||||
|
error_destroy_iommu:
|
||||||
out_destroy_iommu:
|
|
||||||
zpci_destroy_iommu(zdev);
|
zpci_destroy_iommu(zdev);
|
||||||
out:
|
error:
|
||||||
spin_lock(&zpci_list_lock);
|
zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
|
||||||
list_del(&zdev->entry);
|
kfree(zdev);
|
||||||
spin_unlock(&zpci_list_lock);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -740,7 +785,7 @@ void zpci_release_device(struct kref *kref)
|
|||||||
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
|
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
|
||||||
|
|
||||||
if (zdev->zbus->bus)
|
if (zdev->zbus->bus)
|
||||||
zpci_remove_device(zdev);
|
zpci_remove_device(zdev, false);
|
||||||
|
|
||||||
switch (zdev->state) {
|
switch (zdev->state) {
|
||||||
case ZPCI_FN_STATE_ONLINE:
|
case ZPCI_FN_STATE_ONLINE:
|
||||||
|
@@ -181,7 +181,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
|
int clp_query_pci_fn(struct zpci_dev *zdev)
|
||||||
{
|
{
|
||||||
struct clp_req_rsp_query_pci *rrb;
|
struct clp_req_rsp_query_pci *rrb;
|
||||||
int rc;
|
int rc;
|
||||||
@@ -194,7 +194,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
|
|||||||
rrb->request.hdr.len = sizeof(rrb->request);
|
rrb->request.hdr.len = sizeof(rrb->request);
|
||||||
rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
|
rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
|
||||||
rrb->response.hdr.len = sizeof(rrb->response);
|
rrb->response.hdr.len = sizeof(rrb->response);
|
||||||
rrb->request.fh = fh;
|
rrb->request.fh = zdev->fh;
|
||||||
|
|
||||||
rc = clp_req(rrb, CLP_LPS_PCI);
|
rc = clp_req(rrb, CLP_LPS_PCI);
|
||||||
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
|
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
|
||||||
@@ -212,40 +212,6 @@ out:
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
int clp_add_pci_device(u32 fid, u32 fh, int configured)
|
|
||||||
{
|
|
||||||
struct zpci_dev *zdev;
|
|
||||||
int rc = -ENOMEM;
|
|
||||||
|
|
||||||
zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
|
|
||||||
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
|
|
||||||
if (!zdev)
|
|
||||||
goto error;
|
|
||||||
|
|
||||||
zdev->fh = fh;
|
|
||||||
zdev->fid = fid;
|
|
||||||
|
|
||||||
/* Query function properties and update zdev */
|
|
||||||
rc = clp_query_pci_fn(zdev, fh);
|
|
||||||
if (rc)
|
|
||||||
goto error;
|
|
||||||
|
|
||||||
if (configured)
|
|
||||||
zdev->state = ZPCI_FN_STATE_CONFIGURED;
|
|
||||||
else
|
|
||||||
zdev->state = ZPCI_FN_STATE_STANDBY;
|
|
||||||
|
|
||||||
rc = zpci_create_device(zdev);
|
|
||||||
if (rc)
|
|
||||||
goto error;
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
error:
|
|
||||||
zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
|
|
||||||
kfree(zdev);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int clp_refresh_fh(u32 fid);
|
static int clp_refresh_fh(u32 fid);
|
||||||
/*
|
/*
|
||||||
* Enable/Disable a given PCI function and update its function handle if
|
* Enable/Disable a given PCI function and update its function handle if
|
||||||
@@ -408,7 +374,7 @@ static void __clp_add(struct clp_fh_list_entry *entry, void *data)
|
|||||||
|
|
||||||
zdev = get_zdev_by_fid(entry->fid);
|
zdev = get_zdev_by_fid(entry->fid);
|
||||||
if (!zdev)
|
if (!zdev)
|
||||||
clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
|
zpci_create_device(entry->fid, entry->fh, entry->config_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
int clp_scan_pci_devices(void)
|
int clp_scan_pci_devices(void)
|
||||||
|
@@ -76,20 +76,17 @@ void zpci_event_error(void *data)
|
|||||||
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||||
{
|
{
|
||||||
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
|
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
|
||||||
struct pci_dev *pdev = NULL;
|
|
||||||
enum zpci_state state;
|
enum zpci_state state;
|
||||||
|
struct pci_dev *pdev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (zdev && zdev->zbus && zdev->zbus->bus)
|
|
||||||
pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
|
|
||||||
|
|
||||||
zpci_err("avail CCDF:\n");
|
zpci_err("avail CCDF:\n");
|
||||||
zpci_err_hex(ccdf, sizeof(*ccdf));
|
zpci_err_hex(ccdf, sizeof(*ccdf));
|
||||||
|
|
||||||
switch (ccdf->pec) {
|
switch (ccdf->pec) {
|
||||||
case 0x0301: /* Reserved|Standby -> Configured */
|
case 0x0301: /* Reserved|Standby -> Configured */
|
||||||
if (!zdev) {
|
if (!zdev) {
|
||||||
ret = clp_add_pci_device(ccdf->fid, ccdf->fh, 1);
|
zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
/* the configuration request may be stale */
|
/* the configuration request may be stale */
|
||||||
@@ -116,7 +113,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|||||||
break;
|
break;
|
||||||
case 0x0302: /* Reserved -> Standby */
|
case 0x0302: /* Reserved -> Standby */
|
||||||
if (!zdev) {
|
if (!zdev) {
|
||||||
clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
|
zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
zdev->fh = ccdf->fh;
|
zdev->fh = ccdf->fh;
|
||||||
@@ -124,8 +121,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|||||||
case 0x0303: /* Deconfiguration requested */
|
case 0x0303: /* Deconfiguration requested */
|
||||||
if (!zdev)
|
if (!zdev)
|
||||||
break;
|
break;
|
||||||
if (pdev)
|
zpci_remove_device(zdev, false);
|
||||||
zpci_remove_device(zdev);
|
|
||||||
|
|
||||||
ret = zpci_disable_device(zdev);
|
ret = zpci_disable_device(zdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
@@ -140,12 +136,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
|||||||
case 0x0304: /* Configured -> Standby|Reserved */
|
case 0x0304: /* Configured -> Standby|Reserved */
|
||||||
if (!zdev)
|
if (!zdev)
|
||||||
break;
|
break;
|
||||||
if (pdev) {
|
|
||||||
/* Give the driver a hint that the function is
|
/* Give the driver a hint that the function is
|
||||||
* already unusable. */
|
* already unusable.
|
||||||
pdev->error_state = pci_channel_io_perm_failure;
|
*/
|
||||||
zpci_remove_device(zdev);
|
zpci_remove_device(zdev, true);
|
||||||
}
|
|
||||||
|
|
||||||
zdev->fh = ccdf->fh;
|
zdev->fh = ccdf->fh;
|
||||||
zpci_disable_device(zdev);
|
zpci_disable_device(zdev);
|
||||||
|
@@ -57,20 +57,26 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr)
|
|||||||
{
|
{
|
||||||
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI))
|
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI))
|
||||||
return 0;
|
return 0;
|
||||||
if (prot & PROT_ADI) {
|
return 1;
|
||||||
if (!adi_capable())
|
}
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (addr) {
|
#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
|
||||||
struct vm_area_struct *vma;
|
/* arch_validate_flags() - Ensure combination of flags is valid for a
|
||||||
|
* VMA.
|
||||||
vma = find_vma(current->mm, addr);
|
|
||||||
if (vma) {
|
|
||||||
/* ADI can not be enabled on PFN
|
|
||||||
* mapped pages
|
|
||||||
*/
|
*/
|
||||||
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
|
static inline bool arch_validate_flags(unsigned long vm_flags)
|
||||||
return 0;
|
{
|
||||||
|
/* If ADI is being enabled on this VMA, check for ADI
|
||||||
|
* capability on the platform and ensure VMA is suitable
|
||||||
|
* for ADI
|
||||||
|
*/
|
||||||
|
if (vm_flags & VM_SPARC_ADI) {
|
||||||
|
if (!adi_capable())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* ADI can not be enabled on PFN mapped pages */
|
||||||
|
if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
|
||||||
|
return false;
|
||||||
|
|
||||||
/* Mergeable pages can become unmergeable
|
/* Mergeable pages can become unmergeable
|
||||||
* if ADI is enabled on them even if they
|
* if ADI is enabled on them even if they
|
||||||
@@ -80,12 +86,10 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr)
|
|||||||
* tags on them. Disallow ADI on mergeable
|
* tags on them. Disallow ADI on mergeable
|
||||||
* pages.
|
* pages.
|
||||||
*/
|
*/
|
||||||
if (vma->vm_flags & VM_MERGEABLE)
|
if (vm_flags & VM_MERGEABLE)
|
||||||
return 0;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
return true;
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_SPARC64 */
|
#endif /* CONFIG_SPARC64 */
|
||||||
|
|
||||||
|
@@ -197,6 +197,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
|
|||||||
size = memblock_phys_mem_size() - memblock_reserved_size();
|
size = memblock_phys_mem_size() - memblock_reserved_size();
|
||||||
*pages_avail = (size >> PAGE_SHIFT) - high_pages;
|
*pages_avail = (size >> PAGE_SHIFT) - high_pages;
|
||||||
|
|
||||||
|
/* Only allow low memory to be allocated via memblock allocation */
|
||||||
|
memblock_set_current_limit(max_low_pfn << PAGE_SHIFT);
|
||||||
|
|
||||||
return max_pfn;
|
return max_pfn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -318,7 +318,7 @@ _initial_blocks_\@:
|
|||||||
|
|
||||||
# Main loop - Encrypt/Decrypt remaining blocks
|
# Main loop - Encrypt/Decrypt remaining blocks
|
||||||
|
|
||||||
cmp $0, %r13
|
test %r13, %r13
|
||||||
je _zero_cipher_left_\@
|
je _zero_cipher_left_\@
|
||||||
sub $64, %r13
|
sub $64, %r13
|
||||||
je _four_cipher_left_\@
|
je _four_cipher_left_\@
|
||||||
@@ -437,7 +437,7 @@ _multiple_of_16_bytes_\@:
|
|||||||
|
|
||||||
mov PBlockLen(%arg2), %r12
|
mov PBlockLen(%arg2), %r12
|
||||||
|
|
||||||
cmp $0, %r12
|
test %r12, %r12
|
||||||
je _partial_done\@
|
je _partial_done\@
|
||||||
|
|
||||||
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
|
GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
|
||||||
@@ -474,7 +474,7 @@ _T_8_\@:
|
|||||||
add $8, %r10
|
add $8, %r10
|
||||||
sub $8, %r11
|
sub $8, %r11
|
||||||
psrldq $8, %xmm0
|
psrldq $8, %xmm0
|
||||||
cmp $0, %r11
|
test %r11, %r11
|
||||||
je _return_T_done_\@
|
je _return_T_done_\@
|
||||||
_T_4_\@:
|
_T_4_\@:
|
||||||
movd %xmm0, %eax
|
movd %xmm0, %eax
|
||||||
@@ -482,7 +482,7 @@ _T_4_\@:
|
|||||||
add $4, %r10
|
add $4, %r10
|
||||||
sub $4, %r11
|
sub $4, %r11
|
||||||
psrldq $4, %xmm0
|
psrldq $4, %xmm0
|
||||||
cmp $0, %r11
|
test %r11, %r11
|
||||||
je _return_T_done_\@
|
je _return_T_done_\@
|
||||||
_T_123_\@:
|
_T_123_\@:
|
||||||
movd %xmm0, %eax
|
movd %xmm0, %eax
|
||||||
@@ -619,7 +619,7 @@ _get_AAD_blocks\@:
|
|||||||
|
|
||||||
/* read the last <16B of AAD */
|
/* read the last <16B of AAD */
|
||||||
_get_AAD_rest\@:
|
_get_AAD_rest\@:
|
||||||
cmp $0, %r11
|
test %r11, %r11
|
||||||
je _get_AAD_done\@
|
je _get_AAD_done\@
|
||||||
|
|
||||||
READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
|
READ_PARTIAL_BLOCK %r10, %r11, \TMP1, \TMP7
|
||||||
@@ -640,7 +640,7 @@ _get_AAD_done\@:
|
|||||||
.macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
|
.macro PARTIAL_BLOCK CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
|
||||||
AAD_HASH operation
|
AAD_HASH operation
|
||||||
mov PBlockLen(%arg2), %r13
|
mov PBlockLen(%arg2), %r13
|
||||||
cmp $0, %r13
|
test %r13, %r13
|
||||||
je _partial_block_done_\@ # Leave Macro if no partial blocks
|
je _partial_block_done_\@ # Leave Macro if no partial blocks
|
||||||
# Read in input data without over reading
|
# Read in input data without over reading
|
||||||
cmp $16, \PLAIN_CYPH_LEN
|
cmp $16, \PLAIN_CYPH_LEN
|
||||||
@@ -692,7 +692,7 @@ _no_extra_mask_1_\@:
|
|||||||
pshufb %xmm2, %xmm3
|
pshufb %xmm2, %xmm3
|
||||||
pxor %xmm3, \AAD_HASH
|
pxor %xmm3, \AAD_HASH
|
||||||
|
|
||||||
cmp $0, %r10
|
test %r10, %r10
|
||||||
jl _partial_incomplete_1_\@
|
jl _partial_incomplete_1_\@
|
||||||
|
|
||||||
# GHASH computation for the last <16 Byte block
|
# GHASH computation for the last <16 Byte block
|
||||||
@@ -727,7 +727,7 @@ _no_extra_mask_2_\@:
|
|||||||
pshufb %xmm2, %xmm9
|
pshufb %xmm2, %xmm9
|
||||||
pxor %xmm9, \AAD_HASH
|
pxor %xmm9, \AAD_HASH
|
||||||
|
|
||||||
cmp $0, %r10
|
test %r10, %r10
|
||||||
jl _partial_incomplete_2_\@
|
jl _partial_incomplete_2_\@
|
||||||
|
|
||||||
# GHASH computation for the last <16 Byte block
|
# GHASH computation for the last <16 Byte block
|
||||||
@@ -747,7 +747,7 @@ _encode_done_\@:
|
|||||||
pshufb %xmm2, %xmm9
|
pshufb %xmm2, %xmm9
|
||||||
.endif
|
.endif
|
||||||
# output encrypted Bytes
|
# output encrypted Bytes
|
||||||
cmp $0, %r10
|
test %r10, %r10
|
||||||
jl _partial_fill_\@
|
jl _partial_fill_\@
|
||||||
mov %r13, %r12
|
mov %r13, %r12
|
||||||
mov $16, %r13
|
mov $16, %r13
|
||||||
@@ -2715,25 +2715,18 @@ SYM_FUNC_END(aesni_ctr_enc)
|
|||||||
pxor CTR, IV;
|
pxor CTR, IV;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *dst,
|
* void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
|
||||||
* const u8 *src, bool enc, le128 *iv)
|
* const u8 *src, unsigned int len, le128 *iv)
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(aesni_xts_crypt8)
|
SYM_FUNC_START(aesni_xts_encrypt)
|
||||||
FRAME_BEGIN
|
FRAME_BEGIN
|
||||||
cmpb $0, %cl
|
|
||||||
movl $0, %ecx
|
|
||||||
movl $240, %r10d
|
|
||||||
leaq _aesni_enc4, %r11
|
|
||||||
leaq _aesni_dec4, %rax
|
|
||||||
cmovel %r10d, %ecx
|
|
||||||
cmoveq %rax, %r11
|
|
||||||
|
|
||||||
movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK
|
movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK
|
||||||
movups (IVP), IV
|
movups (IVP), IV
|
||||||
|
|
||||||
mov 480(KEYP), KLEN
|
mov 480(KEYP), KLEN
|
||||||
addq %rcx, KEYP
|
|
||||||
|
|
||||||
|
.Lxts_enc_loop4:
|
||||||
movdqa IV, STATE1
|
movdqa IV, STATE1
|
||||||
movdqu 0x00(INP), INC
|
movdqu 0x00(INP), INC
|
||||||
pxor INC, STATE1
|
pxor INC, STATE1
|
||||||
@@ -2757,71 +2750,103 @@ SYM_FUNC_START(aesni_xts_crypt8)
|
|||||||
pxor INC, STATE4
|
pxor INC, STATE4
|
||||||
movdqu IV, 0x30(OUTP)
|
movdqu IV, 0x30(OUTP)
|
||||||
|
|
||||||
CALL_NOSPEC r11
|
call _aesni_enc4
|
||||||
|
|
||||||
movdqu 0x00(OUTP), INC
|
movdqu 0x00(OUTP), INC
|
||||||
pxor INC, STATE1
|
pxor INC, STATE1
|
||||||
movdqu STATE1, 0x00(OUTP)
|
movdqu STATE1, 0x00(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
|
||||||
movdqa IV, STATE1
|
|
||||||
movdqu 0x40(INP), INC
|
|
||||||
pxor INC, STATE1
|
|
||||||
movdqu IV, 0x40(OUTP)
|
|
||||||
|
|
||||||
movdqu 0x10(OUTP), INC
|
movdqu 0x10(OUTP), INC
|
||||||
pxor INC, STATE2
|
pxor INC, STATE2
|
||||||
movdqu STATE2, 0x10(OUTP)
|
movdqu STATE2, 0x10(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
|
||||||
movdqa IV, STATE2
|
|
||||||
movdqu 0x50(INP), INC
|
|
||||||
pxor INC, STATE2
|
|
||||||
movdqu IV, 0x50(OUTP)
|
|
||||||
|
|
||||||
movdqu 0x20(OUTP), INC
|
movdqu 0x20(OUTP), INC
|
||||||
pxor INC, STATE3
|
pxor INC, STATE3
|
||||||
movdqu STATE3, 0x20(OUTP)
|
movdqu STATE3, 0x20(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
|
||||||
movdqa IV, STATE3
|
|
||||||
movdqu 0x60(INP), INC
|
|
||||||
pxor INC, STATE3
|
|
||||||
movdqu IV, 0x60(OUTP)
|
|
||||||
|
|
||||||
movdqu 0x30(OUTP), INC
|
movdqu 0x30(OUTP), INC
|
||||||
pxor INC, STATE4
|
pxor INC, STATE4
|
||||||
movdqu STATE4, 0x30(OUTP)
|
movdqu STATE4, 0x30(OUTP)
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
_aesni_gf128mul_x_ble()
|
||||||
movdqa IV, STATE4
|
|
||||||
movdqu 0x70(INP), INC
|
|
||||||
pxor INC, STATE4
|
|
||||||
movdqu IV, 0x70(OUTP)
|
|
||||||
|
|
||||||
_aesni_gf128mul_x_ble()
|
add $64, INP
|
||||||
|
add $64, OUTP
|
||||||
|
sub $64, LEN
|
||||||
|
ja .Lxts_enc_loop4
|
||||||
|
|
||||||
movups IV, (IVP)
|
movups IV, (IVP)
|
||||||
|
|
||||||
CALL_NOSPEC r11
|
|
||||||
|
|
||||||
movdqu 0x40(OUTP), INC
|
|
||||||
pxor INC, STATE1
|
|
||||||
movdqu STATE1, 0x40(OUTP)
|
|
||||||
|
|
||||||
movdqu 0x50(OUTP), INC
|
|
||||||
pxor INC, STATE2
|
|
||||||
movdqu STATE2, 0x50(OUTP)
|
|
||||||
|
|
||||||
movdqu 0x60(OUTP), INC
|
|
||||||
pxor INC, STATE3
|
|
||||||
movdqu STATE3, 0x60(OUTP)
|
|
||||||
|
|
||||||
movdqu 0x70(OUTP), INC
|
|
||||||
pxor INC, STATE4
|
|
||||||
movdqu STATE4, 0x70(OUTP)
|
|
||||||
|
|
||||||
FRAME_END
|
FRAME_END
|
||||||
ret
|
ret
|
||||||
SYM_FUNC_END(aesni_xts_crypt8)
|
SYM_FUNC_END(aesni_xts_encrypt)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
|
||||||
|
* const u8 *src, unsigned int len, le128 *iv)
|
||||||
|
*/
|
||||||
|
SYM_FUNC_START(aesni_xts_decrypt)
|
||||||
|
FRAME_BEGIN
|
||||||
|
|
||||||
|
movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK
|
||||||
|
movups (IVP), IV
|
||||||
|
|
||||||
|
mov 480(KEYP), KLEN
|
||||||
|
add $240, KEYP
|
||||||
|
|
||||||
|
.Lxts_dec_loop4:
|
||||||
|
movdqa IV, STATE1
|
||||||
|
movdqu 0x00(INP), INC
|
||||||
|
pxor INC, STATE1
|
||||||
|
movdqu IV, 0x00(OUTP)
|
||||||
|
|
||||||
|
_aesni_gf128mul_x_ble()
|
||||||
|
movdqa IV, STATE2
|
||||||
|
movdqu 0x10(INP), INC
|
||||||
|
pxor INC, STATE2
|
||||||
|
movdqu IV, 0x10(OUTP)
|
||||||
|
|
||||||
|
_aesni_gf128mul_x_ble()
|
||||||
|
movdqa IV, STATE3
|
||||||
|
movdqu 0x20(INP), INC
|
||||||
|
pxor INC, STATE3
|
||||||
|
movdqu IV, 0x20(OUTP)
|
||||||
|
|
||||||
|
_aesni_gf128mul_x_ble()
|
||||||
|
movdqa IV, STATE4
|
||||||
|
movdqu 0x30(INP), INC
|
||||||
|
pxor INC, STATE4
|
||||||
|
movdqu IV, 0x30(OUTP)
|
||||||
|
|
||||||
|
call _aesni_dec4
|
||||||
|
|
||||||
|
movdqu 0x00(OUTP), INC
|
||||||
|
pxor INC, STATE1
|
||||||
|
movdqu STATE1, 0x00(OUTP)
|
||||||
|
|
||||||
|
movdqu 0x10(OUTP), INC
|
||||||
|
pxor INC, STATE2
|
||||||
|
movdqu STATE2, 0x10(OUTP)
|
||||||
|
|
||||||
|
movdqu 0x20(OUTP), INC
|
||||||
|
pxor INC, STATE3
|
||||||
|
movdqu STATE3, 0x20(OUTP)
|
||||||
|
|
||||||
|
movdqu 0x30(OUTP), INC
|
||||||
|
pxor INC, STATE4
|
||||||
|
movdqu STATE4, 0x30(OUTP)
|
||||||
|
|
||||||
|
_aesni_gf128mul_x_ble()
|
||||||
|
|
||||||
|
add $64, INP
|
||||||
|
add $64, OUTP
|
||||||
|
sub $64, LEN
|
||||||
|
ja .Lxts_dec_loop4
|
||||||
|
|
||||||
|
movups IV, (IVP)
|
||||||
|
|
||||||
|
FRAME_END
|
||||||
|
ret
|
||||||
|
SYM_FUNC_END(aesni_xts_decrypt)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -369,7 +369,7 @@ _initial_num_blocks_is_0\@:
|
|||||||
|
|
||||||
|
|
||||||
_initial_blocks_encrypted\@:
|
_initial_blocks_encrypted\@:
|
||||||
cmp $0, %r13
|
test %r13, %r13
|
||||||
je _zero_cipher_left\@
|
je _zero_cipher_left\@
|
||||||
|
|
||||||
sub $128, %r13
|
sub $128, %r13
|
||||||
@@ -528,7 +528,7 @@ _multiple_of_16_bytes\@:
|
|||||||
vmovdqu HashKey(arg2), %xmm13
|
vmovdqu HashKey(arg2), %xmm13
|
||||||
|
|
||||||
mov PBlockLen(arg2), %r12
|
mov PBlockLen(arg2), %r12
|
||||||
cmp $0, %r12
|
test %r12, %r12
|
||||||
je _partial_done\@
|
je _partial_done\@
|
||||||
|
|
||||||
#GHASH computation for the last <16 Byte block
|
#GHASH computation for the last <16 Byte block
|
||||||
@@ -573,7 +573,7 @@ _T_8\@:
|
|||||||
add $8, %r10
|
add $8, %r10
|
||||||
sub $8, %r11
|
sub $8, %r11
|
||||||
vpsrldq $8, %xmm9, %xmm9
|
vpsrldq $8, %xmm9, %xmm9
|
||||||
cmp $0, %r11
|
test %r11, %r11
|
||||||
je _return_T_done\@
|
je _return_T_done\@
|
||||||
_T_4\@:
|
_T_4\@:
|
||||||
vmovd %xmm9, %eax
|
vmovd %xmm9, %eax
|
||||||
@@ -581,7 +581,7 @@ _T_4\@:
|
|||||||
add $4, %r10
|
add $4, %r10
|
||||||
sub $4, %r11
|
sub $4, %r11
|
||||||
vpsrldq $4, %xmm9, %xmm9
|
vpsrldq $4, %xmm9, %xmm9
|
||||||
cmp $0, %r11
|
test %r11, %r11
|
||||||
je _return_T_done\@
|
je _return_T_done\@
|
||||||
_T_123\@:
|
_T_123\@:
|
||||||
vmovd %xmm9, %eax
|
vmovd %xmm9, %eax
|
||||||
@@ -625,7 +625,7 @@ _get_AAD_blocks\@:
|
|||||||
cmp $16, %r11
|
cmp $16, %r11
|
||||||
jge _get_AAD_blocks\@
|
jge _get_AAD_blocks\@
|
||||||
vmovdqu \T8, \T7
|
vmovdqu \T8, \T7
|
||||||
cmp $0, %r11
|
test %r11, %r11
|
||||||
je _get_AAD_done\@
|
je _get_AAD_done\@
|
||||||
|
|
||||||
vpxor \T7, \T7, \T7
|
vpxor \T7, \T7, \T7
|
||||||
@@ -644,7 +644,7 @@ _get_AAD_rest8\@:
|
|||||||
vpxor \T1, \T7, \T7
|
vpxor \T1, \T7, \T7
|
||||||
jmp _get_AAD_rest8\@
|
jmp _get_AAD_rest8\@
|
||||||
_get_AAD_rest4\@:
|
_get_AAD_rest4\@:
|
||||||
cmp $0, %r11
|
test %r11, %r11
|
||||||
jle _get_AAD_rest0\@
|
jle _get_AAD_rest0\@
|
||||||
mov (%r10), %eax
|
mov (%r10), %eax
|
||||||
movq %rax, \T1
|
movq %rax, \T1
|
||||||
@@ -749,7 +749,7 @@ _done_read_partial_block_\@:
|
|||||||
.macro PARTIAL_BLOCK GHASH_MUL CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
|
.macro PARTIAL_BLOCK GHASH_MUL CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
|
||||||
AAD_HASH ENC_DEC
|
AAD_HASH ENC_DEC
|
||||||
mov PBlockLen(arg2), %r13
|
mov PBlockLen(arg2), %r13
|
||||||
cmp $0, %r13
|
test %r13, %r13
|
||||||
je _partial_block_done_\@ # Leave Macro if no partial blocks
|
je _partial_block_done_\@ # Leave Macro if no partial blocks
|
||||||
# Read in input data without over reading
|
# Read in input data without over reading
|
||||||
cmp $16, \PLAIN_CYPH_LEN
|
cmp $16, \PLAIN_CYPH_LEN
|
||||||
@@ -801,7 +801,7 @@ _no_extra_mask_1_\@:
|
|||||||
vpshufb %xmm2, %xmm3, %xmm3
|
vpshufb %xmm2, %xmm3, %xmm3
|
||||||
vpxor %xmm3, \AAD_HASH, \AAD_HASH
|
vpxor %xmm3, \AAD_HASH, \AAD_HASH
|
||||||
|
|
||||||
cmp $0, %r10
|
test %r10, %r10
|
||||||
jl _partial_incomplete_1_\@
|
jl _partial_incomplete_1_\@
|
||||||
|
|
||||||
# GHASH computation for the last <16 Byte block
|
# GHASH computation for the last <16 Byte block
|
||||||
@@ -836,7 +836,7 @@ _no_extra_mask_2_\@:
|
|||||||
vpshufb %xmm2, %xmm9, %xmm9
|
vpshufb %xmm2, %xmm9, %xmm9
|
||||||
vpxor %xmm9, \AAD_HASH, \AAD_HASH
|
vpxor %xmm9, \AAD_HASH, \AAD_HASH
|
||||||
|
|
||||||
cmp $0, %r10
|
test %r10, %r10
|
||||||
jl _partial_incomplete_2_\@
|
jl _partial_incomplete_2_\@
|
||||||
|
|
||||||
# GHASH computation for the last <16 Byte block
|
# GHASH computation for the last <16 Byte block
|
||||||
@@ -856,7 +856,7 @@ _encode_done_\@:
|
|||||||
vpshufb %xmm2, %xmm9, %xmm9
|
vpshufb %xmm2, %xmm9, %xmm9
|
||||||
.endif
|
.endif
|
||||||
# output encrypted Bytes
|
# output encrypted Bytes
|
||||||
cmp $0, %r10
|
test %r10, %r10
|
||||||
jl _partial_fill_\@
|
jl _partial_fill_\@
|
||||||
mov %r13, %r12
|
mov %r13, %r12
|
||||||
mov $16, %r13
|
mov $16, %r13
|
||||||
|
@@ -97,6 +97,12 @@ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
|
|||||||
#define AVX_GEN2_OPTSIZE 640
|
#define AVX_GEN2_OPTSIZE 640
|
||||||
#define AVX_GEN4_OPTSIZE 4096
|
#define AVX_GEN4_OPTSIZE 4096
|
||||||
|
|
||||||
|
asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
|
||||||
|
const u8 *in, unsigned int len, u8 *iv);
|
||||||
|
|
||||||
|
asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
|
||||||
|
const u8 *in, unsigned int len, u8 *iv);
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
|
|
||||||
static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
|
static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
|
||||||
@@ -104,9 +110,6 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
|
|||||||
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
|
||||||
const u8 *in, unsigned int len, u8 *iv);
|
const u8 *in, unsigned int len, u8 *iv);
|
||||||
|
|
||||||
asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out,
|
|
||||||
const u8 *in, bool enc, le128 *iv);
|
|
||||||
|
|
||||||
/* asmlinkage void aesni_gcm_enc()
|
/* asmlinkage void aesni_gcm_enc()
|
||||||
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
|
* void *ctx, AES Key schedule. Starts on a 16 byte boundary.
|
||||||
* struct gcm_context_data. May be uninitialized.
|
* struct gcm_context_data. May be uninitialized.
|
||||||
@@ -547,14 +550,14 @@ static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
|||||||
glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
|
glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
static void aesni_xts_enc32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
aesni_xts_crypt8(ctx, dst, src, true, iv);
|
aesni_xts_encrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
static void aesni_xts_dec32(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
|
||||||
{
|
{
|
||||||
aesni_xts_crypt8(ctx, dst, src, false, iv);
|
aesni_xts_decrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct common_glue_ctx aesni_enc_xts = {
|
static const struct common_glue_ctx aesni_enc_xts = {
|
||||||
@@ -562,8 +565,8 @@ static const struct common_glue_ctx aesni_enc_xts = {
|
|||||||
.fpu_blocks_limit = 1,
|
.fpu_blocks_limit = 1,
|
||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 8,
|
.num_blocks = 32,
|
||||||
.fn_u = { .xts = aesni_xts_enc8 }
|
.fn_u = { .xts = aesni_xts_enc32 }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = aesni_xts_enc }
|
.fn_u = { .xts = aesni_xts_enc }
|
||||||
@@ -575,8 +578,8 @@ static const struct common_glue_ctx aesni_dec_xts = {
|
|||||||
.fpu_blocks_limit = 1,
|
.fpu_blocks_limit = 1,
|
||||||
|
|
||||||
.funcs = { {
|
.funcs = { {
|
||||||
.num_blocks = 8,
|
.num_blocks = 32,
|
||||||
.fn_u = { .xts = aesni_xts_dec8 }
|
.fn_u = { .xts = aesni_xts_dec32 }
|
||||||
}, {
|
}, {
|
||||||
.num_blocks = 1,
|
.num_blocks = 1,
|
||||||
.fn_u = { .xts = aesni_xts_dec }
|
.fn_u = { .xts = aesni_xts_dec }
|
||||||
|
@@ -128,7 +128,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
|
|||||||
regs->ax = -EFAULT;
|
regs->ax = -EFAULT;
|
||||||
|
|
||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
syscall_exit_to_user_mode(regs);
|
local_irq_disable();
|
||||||
|
irqentry_exit_to_user_mode(regs);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -213,40 +214,6 @@ SYSCALL_DEFINE0(ni_syscall)
|
|||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
bool irq_state = lockdep_hardirqs_enabled();
|
|
||||||
|
|
||||||
__nmi_enter();
|
|
||||||
lockdep_hardirqs_off(CALLER_ADDR0);
|
|
||||||
lockdep_hardirq_enter();
|
|
||||||
rcu_nmi_enter();
|
|
||||||
|
|
||||||
instrumentation_begin();
|
|
||||||
trace_hardirqs_off_finish();
|
|
||||||
ftrace_nmi_enter();
|
|
||||||
instrumentation_end();
|
|
||||||
|
|
||||||
return irq_state;
|
|
||||||
}
|
|
||||||
|
|
||||||
noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore)
|
|
||||||
{
|
|
||||||
instrumentation_begin();
|
|
||||||
ftrace_nmi_exit();
|
|
||||||
if (restore) {
|
|
||||||
trace_hardirqs_on_prepare();
|
|
||||||
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
|
|
||||||
}
|
|
||||||
instrumentation_end();
|
|
||||||
|
|
||||||
rcu_nmi_exit();
|
|
||||||
lockdep_hardirq_exit();
|
|
||||||
if (restore)
|
|
||||||
lockdep_hardirqs_on(CALLER_ADDR0);
|
|
||||||
__nmi_exit();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_XEN_PV
|
#ifdef CONFIG_XEN_PV
|
||||||
#ifndef CONFIG_PREEMPTION
|
#ifndef CONFIG_PREEMPTION
|
||||||
/*
|
/*
|
||||||
|
@@ -210,6 +210,8 @@ SYM_CODE_START(entry_SYSCALL_compat)
|
|||||||
/* Switch to the kernel stack */
|
/* Switch to the kernel stack */
|
||||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||||
|
|
||||||
|
SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL)
|
||||||
|
|
||||||
/* Construct struct pt_regs on stack */
|
/* Construct struct pt_regs on stack */
|
||||||
pushq $__USER32_DS /* pt_regs->ss */
|
pushq $__USER32_DS /* pt_regs->ss */
|
||||||
pushq %r8 /* pt_regs->sp */
|
pushq %r8 /* pt_regs->sp */
|
||||||
|
@@ -3562,11 +3562,16 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (event->attr.precise_ip) {
|
if (event->attr.precise_ip) {
|
||||||
|
if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
|
if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
|
||||||
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
||||||
if (!(event->attr.sample_type &
|
if (!(event->attr.sample_type &
|
||||||
~intel_pmu_large_pebs_flags(event)))
|
~intel_pmu_large_pebs_flags(event))) {
|
||||||
event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
|
event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
|
||||||
|
event->attach_state |= PERF_ATTACH_SCHED_CB;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (x86_pmu.pebs_aliases)
|
if (x86_pmu.pebs_aliases)
|
||||||
x86_pmu.pebs_aliases(event);
|
x86_pmu.pebs_aliases(event);
|
||||||
@@ -3579,6 +3584,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||||||
ret = intel_pmu_setup_lbr_filter(event);
|
ret = intel_pmu_setup_lbr_filter(event);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
event->attach_state |= PERF_ATTACH_SCHED_CB;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* BTS is set up earlier in this path, so don't account twice
|
* BTS is set up earlier in this path, so don't account twice
|
||||||
|
@@ -1894,7 +1894,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
|
|||||||
*/
|
*/
|
||||||
if (!pebs_status && cpuc->pebs_enabled &&
|
if (!pebs_status && cpuc->pebs_enabled &&
|
||||||
!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
|
!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
|
||||||
pebs_status = cpuc->pebs_enabled;
|
pebs_status = p->status = cpuc->pebs_enabled;
|
||||||
|
|
||||||
bit = find_first_bit((unsigned long *)&pebs_status,
|
bit = find_first_bit((unsigned long *)&pebs_status,
|
||||||
x86_pmu.max_pebs_events);
|
x86_pmu.max_pebs_events);
|
||||||
|
@@ -11,9 +11,6 @@
|
|||||||
|
|
||||||
#include <asm/irq_stack.h>
|
#include <asm/irq_stack.h>
|
||||||
|
|
||||||
bool idtentry_enter_nmi(struct pt_regs *regs);
|
|
||||||
void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
|
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
|
||||||
* No error code pushed by hardware
|
* No error code pushed by hardware
|
||||||
|
@@ -23,6 +23,8 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
|
|||||||
int insn_get_code_seg_params(struct pt_regs *regs);
|
int insn_get_code_seg_params(struct pt_regs *regs);
|
||||||
int insn_fetch_from_user(struct pt_regs *regs,
|
int insn_fetch_from_user(struct pt_regs *regs,
|
||||||
unsigned char buf[MAX_INSN_SIZE]);
|
unsigned char buf[MAX_INSN_SIZE]);
|
||||||
|
int insn_fetch_from_user_inatomic(struct pt_regs *regs,
|
||||||
|
unsigned char buf[MAX_INSN_SIZE]);
|
||||||
bool insn_decode(struct insn *insn, struct pt_regs *regs,
|
bool insn_decode(struct insn *insn, struct pt_regs *regs,
|
||||||
unsigned char buf[MAX_INSN_SIZE], int buf_size);
|
unsigned char buf[MAX_INSN_SIZE], int buf_size);
|
||||||
|
|
||||||
|
@@ -552,15 +552,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
|||||||
*size = fpu_kernel_xstate_size;
|
*size = fpu_kernel_xstate_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Thread-synchronous status.
|
|
||||||
*
|
|
||||||
* This is different from the flags in that nobody else
|
|
||||||
* ever touches our thread-synchronous status, so we don't
|
|
||||||
* have to worry about atomic accesses.
|
|
||||||
*/
|
|
||||||
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
|
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
native_load_sp0(unsigned long sp0)
|
native_load_sp0(unsigned long sp0)
|
||||||
{
|
{
|
||||||
|
@@ -25,6 +25,7 @@ void __end_SYSENTER_singlestep_region(void);
|
|||||||
void entry_SYSENTER_compat(void);
|
void entry_SYSENTER_compat(void);
|
||||||
void __end_entry_SYSENTER_compat(void);
|
void __end_entry_SYSENTER_compat(void);
|
||||||
void entry_SYSCALL_compat(void);
|
void entry_SYSCALL_compat(void);
|
||||||
|
void entry_SYSCALL_compat_safe_stack(void);
|
||||||
void entry_INT80_compat(void);
|
void entry_INT80_compat(void);
|
||||||
#ifdef CONFIG_XEN_PV
|
#ifdef CONFIG_XEN_PV
|
||||||
void xen_entry_INT80_compat(void);
|
void xen_entry_INT80_compat(void);
|
||||||
|
@@ -94,6 +94,8 @@ struct pt_regs {
|
|||||||
#include <asm/paravirt_types.h>
|
#include <asm/paravirt_types.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#include <asm/proto.h>
|
||||||
|
|
||||||
struct cpuinfo_x86;
|
struct cpuinfo_x86;
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
|
|
||||||
@@ -175,6 +177,19 @@ static inline bool any_64bit_mode(struct pt_regs *regs)
|
|||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
#define current_user_stack_pointer() current_pt_regs()->sp
|
#define current_user_stack_pointer() current_pt_regs()->sp
|
||||||
#define compat_user_stack_pointer() current_pt_regs()->sp
|
#define compat_user_stack_pointer() current_pt_regs()->sp
|
||||||
|
|
||||||
|
static inline bool ip_within_syscall_gap(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
bool ret = (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
|
||||||
|
regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack);
|
||||||
|
|
||||||
|
#ifdef CONFIG_IA32_EMULATION
|
||||||
|
ret = ret || (regs->ip >= (unsigned long)entry_SYSCALL_compat &&
|
||||||
|
regs->ip < (unsigned long)entry_SYSCALL_compat_safe_stack);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
||||||
|
@@ -216,10 +216,31 @@ static inline int arch_within_stack_frames(const void * const stack,
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Thread-synchronous status.
|
||||||
|
*
|
||||||
|
* This is different from the flags in that nobody else
|
||||||
|
* ever touches our thread-synchronous status, so we don't
|
||||||
|
* have to worry about atomic accesses.
|
||||||
|
*/
|
||||||
|
#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
|
#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
|
||||||
|
#define TS_COMPAT_RESTART 0x0008
|
||||||
|
|
||||||
|
#define arch_set_restart_data arch_set_restart_data
|
||||||
|
|
||||||
|
static inline void arch_set_restart_data(struct restart_block *restart)
|
||||||
|
{
|
||||||
|
struct thread_info *ti = current_thread_info();
|
||||||
|
if (ti->status & TS_COMPAT)
|
||||||
|
ti->status |= TS_COMPAT_RESTART;
|
||||||
|
else
|
||||||
|
ti->status &= ~TS_COMPAT_RESTART;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
#define in_ia32_syscall() true
|
#define in_ia32_syscall() true
|
||||||
|
@@ -2317,6 +2317,11 @@ static int cpuid_to_apicid[] = {
|
|||||||
[0 ... NR_CPUS - 1] = -1,
|
[0 ... NR_CPUS - 1] = -1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
|
||||||
|
{
|
||||||
|
return phys_id == cpuid_to_apicid[cpu];
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/**
|
/**
|
||||||
* apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
|
* apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
|
||||||
|
@@ -1033,6 +1033,16 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
|
|||||||
if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
|
if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
|
||||||
irq = mp_irqs[idx].srcbusirq;
|
irq = mp_irqs[idx].srcbusirq;
|
||||||
legacy = mp_is_legacy_irq(irq);
|
legacy = mp_is_legacy_irq(irq);
|
||||||
|
/*
|
||||||
|
* IRQ2 is unusable for historical reasons on systems which
|
||||||
|
* have a legacy PIC. See the comment vs. IRQ2 further down.
|
||||||
|
*
|
||||||
|
* If this gets removed at some point then the related code
|
||||||
|
* in lapic_assign_system_vectors() needs to be adjusted as
|
||||||
|
* well.
|
||||||
|
*/
|
||||||
|
if (legacy && irq == PIC_CASCADE_IR)
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&ioapic_mutex);
|
mutex_lock(&ioapic_mutex);
|
||||||
|
@@ -1986,7 +1986,7 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
|
|||||||
|
|
||||||
static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
|
static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
bool irq_state;
|
irqentry_state_t irq_state;
|
||||||
|
|
||||||
WARN_ON_ONCE(user_mode(regs));
|
WARN_ON_ONCE(user_mode(regs));
|
||||||
|
|
||||||
@@ -1998,7 +1998,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
|
|||||||
mce_check_crashing_cpu())
|
mce_check_crashing_cpu())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
irq_state = idtentry_enter_nmi(regs);
|
irq_state = irqentry_nmi_enter(regs);
|
||||||
/*
|
/*
|
||||||
* The call targets are marked noinstr, but objtool can't figure
|
* The call targets are marked noinstr, but objtool can't figure
|
||||||
* that out because it's an indirect call. Annotate it.
|
* that out because it's an indirect call. Annotate it.
|
||||||
@@ -2009,7 +2009,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
|
|||||||
if (regs->flags & X86_EFLAGS_IF)
|
if (regs->flags & X86_EFLAGS_IF)
|
||||||
trace_hardirqs_on_prepare();
|
trace_hardirqs_on_prepare();
|
||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
idtentry_exit_nmi(regs, irq_state);
|
irqentry_nmi_exit(regs, irq_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
|
static __always_inline void exc_machine_check_user(struct pt_regs *regs)
|
||||||
|
@@ -269,21 +269,20 @@ static void __init kvmclock_init_mem(void)
|
|||||||
|
|
||||||
static int __init kvm_setup_vsyscall_timeinfo(void)
|
static int __init kvm_setup_vsyscall_timeinfo(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_64
|
kvmclock_init_mem();
|
||||||
u8 flags;
|
|
||||||
|
|
||||||
if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
|
#ifdef CONFIG_X86_64
|
||||||
return 0;
|
if (per_cpu(hv_clock_per_cpu, 0) && kvmclock_vsyscall) {
|
||||||
|
u8 flags;
|
||||||
|
|
||||||
flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
|
flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
|
||||||
if (!(flags & PVCLOCK_TSC_STABLE_BIT))
|
if (!(flags & PVCLOCK_TSC_STABLE_BIT))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
|
kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
kvmclock_init_mem();
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_initcall(kvm_setup_vsyscall_timeinfo);
|
early_initcall(kvm_setup_vsyscall_timeinfo);
|
||||||
|
@@ -475,7 +475,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);
|
|||||||
|
|
||||||
DEFINE_IDTENTRY_RAW(exc_nmi)
|
DEFINE_IDTENTRY_RAW(exc_nmi)
|
||||||
{
|
{
|
||||||
bool irq_state;
|
irqentry_state_t irq_state;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Re-enable NMIs right here when running as an SEV-ES guest. This might
|
* Re-enable NMIs right here when running as an SEV-ES guest. This might
|
||||||
@@ -502,14 +502,14 @@ nmi_restart:
|
|||||||
|
|
||||||
this_cpu_write(nmi_dr7, local_db_save());
|
this_cpu_write(nmi_dr7, local_db_save());
|
||||||
|
|
||||||
irq_state = idtentry_enter_nmi(regs);
|
irq_state = irqentry_nmi_enter(regs);
|
||||||
|
|
||||||
inc_irq_stat(__nmi_count);
|
inc_irq_stat(__nmi_count);
|
||||||
|
|
||||||
if (!ignore_nmis)
|
if (!ignore_nmis)
|
||||||
default_do_nmi(regs);
|
default_do_nmi(regs);
|
||||||
|
|
||||||
idtentry_exit_nmi(regs, irq_state);
|
irqentry_nmi_exit(regs, irq_state);
|
||||||
|
|
||||||
local_db_restore(this_cpu_read(nmi_dr7));
|
local_db_restore(this_cpu_read(nmi_dr7));
|
||||||
|
|
||||||
|
@@ -121,8 +121,18 @@ static void __init setup_vc_stacks(int cpu)
|
|||||||
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
|
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool on_vc_stack(unsigned long sp)
|
static __always_inline bool on_vc_stack(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
unsigned long sp = regs->sp;
|
||||||
|
|
||||||
|
/* User-mode RSP is not trusted */
|
||||||
|
if (user_mode(regs))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* SYSCALL gap still has user-mode RSP */
|
||||||
|
if (ip_within_syscall_gap(regs))
|
||||||
|
return false;
|
||||||
|
|
||||||
return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
|
return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -144,7 +154,7 @@ void noinstr __sev_es_ist_enter(struct pt_regs *regs)
|
|||||||
old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
|
old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
|
||||||
|
|
||||||
/* Make room on the IST stack */
|
/* Make room on the IST stack */
|
||||||
if (on_vc_stack(regs->sp))
|
if (on_vc_stack(regs))
|
||||||
new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
|
new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
|
||||||
else
|
else
|
||||||
new_ist = old_ist - sizeof(old_ist);
|
new_ist = old_ist - sizeof(old_ist);
|
||||||
@@ -248,7 +258,7 @@ static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
|
|||||||
int res;
|
int res;
|
||||||
|
|
||||||
if (user_mode(ctxt->regs)) {
|
if (user_mode(ctxt->regs)) {
|
||||||
res = insn_fetch_from_user(ctxt->regs, buffer);
|
res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
|
||||||
if (!res) {
|
if (!res) {
|
||||||
ctxt->fi.vector = X86_TRAP_PF;
|
ctxt->fi.vector = X86_TRAP_PF;
|
||||||
ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
|
ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
|
||||||
@@ -1248,13 +1258,12 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
|
|||||||
DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
||||||
{
|
{
|
||||||
struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
|
struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
|
||||||
|
irqentry_state_t irq_state;
|
||||||
struct ghcb_state state;
|
struct ghcb_state state;
|
||||||
struct es_em_ctxt ctxt;
|
struct es_em_ctxt ctxt;
|
||||||
enum es_result result;
|
enum es_result result;
|
||||||
struct ghcb *ghcb;
|
struct ghcb *ghcb;
|
||||||
|
|
||||||
lockdep_assert_irqs_disabled();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Handle #DB before calling into !noinstr code to avoid recursive #DB.
|
* Handle #DB before calling into !noinstr code to avoid recursive #DB.
|
||||||
*/
|
*/
|
||||||
@@ -1263,6 +1272,8 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
irq_state = irqentry_nmi_enter(regs);
|
||||||
|
lockdep_assert_irqs_disabled();
|
||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -1325,6 +1336,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
|
irqentry_nmi_exit(regs, irq_state);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@@ -766,30 +766,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
|||||||
|
|
||||||
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
|
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* This function is fundamentally broken as currently
|
|
||||||
* implemented.
|
|
||||||
*
|
|
||||||
* The idea is that we want to trigger a call to the
|
|
||||||
* restart_block() syscall and that we want in_ia32_syscall(),
|
|
||||||
* in_x32_syscall(), etc. to match whatever they were in the
|
|
||||||
* syscall being restarted. We assume that the syscall
|
|
||||||
* instruction at (regs->ip - 2) matches whatever syscall
|
|
||||||
* instruction we used to enter in the first place.
|
|
||||||
*
|
|
||||||
* The problem is that we can get here when ptrace pokes
|
|
||||||
* syscall-like values into regs even if we're not in a syscall
|
|
||||||
* at all.
|
|
||||||
*
|
|
||||||
* For now, we maintain historical behavior and guess based on
|
|
||||||
* stored state. We could do better by saving the actual
|
|
||||||
* syscall arch in restart_block or (with caveats on x32) by
|
|
||||||
* checking if regs->ip points to 'int $0x80'. The current
|
|
||||||
* behavior is incorrect if a tracer has a different bitness
|
|
||||||
* than the tracee.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_IA32_EMULATION
|
#ifdef CONFIG_IA32_EMULATION
|
||||||
if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
|
if (current_thread_info()->status & TS_COMPAT_RESTART)
|
||||||
return __NR_ia32_restart_syscall;
|
return __NR_ia32_restart_syscall;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_X86_X32_ABI
|
#ifdef CONFIG_X86_X32_ABI
|
||||||
|
@@ -406,7 +406,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
idtentry_enter_nmi(regs);
|
irqentry_nmi_enter(regs);
|
||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
|
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
|
||||||
|
|
||||||
@@ -652,12 +652,13 @@ DEFINE_IDTENTRY_RAW(exc_int3)
|
|||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
irqentry_exit_to_user_mode(regs);
|
irqentry_exit_to_user_mode(regs);
|
||||||
} else {
|
} else {
|
||||||
bool irq_state = idtentry_enter_nmi(regs);
|
irqentry_state_t irq_state = irqentry_nmi_enter(regs);
|
||||||
|
|
||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
if (!do_int3(regs))
|
if (!do_int3(regs))
|
||||||
die("int3", regs, 0);
|
die("int3", regs, 0);
|
||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
idtentry_exit_nmi(regs, irq_state);
|
irqentry_nmi_exit(regs, irq_state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -686,8 +687,7 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r
|
|||||||
* In the SYSCALL entry path the RSP value comes from user-space - don't
|
* In the SYSCALL entry path the RSP value comes from user-space - don't
|
||||||
* trust it and switch to the current kernel stack
|
* trust it and switch to the current kernel stack
|
||||||
*/
|
*/
|
||||||
if (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
|
if (ip_within_syscall_gap(regs)) {
|
||||||
regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) {
|
|
||||||
sp = this_cpu_read(cpu_current_top_of_stack);
|
sp = this_cpu_read(cpu_current_top_of_stack);
|
||||||
goto sync;
|
goto sync;
|
||||||
}
|
}
|
||||||
@@ -852,7 +852,7 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
|
|||||||
* includes the entry stack is excluded for everything.
|
* includes the entry stack is excluded for everything.
|
||||||
*/
|
*/
|
||||||
unsigned long dr7 = local_db_save();
|
unsigned long dr7 = local_db_save();
|
||||||
bool irq_state = idtentry_enter_nmi(regs);
|
irqentry_state_t irq_state = irqentry_nmi_enter(regs);
|
||||||
instrumentation_begin();
|
instrumentation_begin();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -909,7 +909,7 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
|
|||||||
regs->flags &= ~X86_EFLAGS_TF;
|
regs->flags &= ~X86_EFLAGS_TF;
|
||||||
out:
|
out:
|
||||||
instrumentation_end();
|
instrumentation_end();
|
||||||
idtentry_exit_nmi(regs, irq_state);
|
irqentry_nmi_exit(regs, irq_state);
|
||||||
|
|
||||||
local_db_restore(dr7);
|
local_db_restore(dr7);
|
||||||
}
|
}
|
||||||
@@ -927,7 +927,7 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* NB: We can't easily clear DR7 here because
|
* NB: We can't easily clear DR7 here because
|
||||||
* idtentry_exit_to_usermode() can invoke ptrace, schedule, access
|
* irqentry_exit_to_usermode() can invoke ptrace, schedule, access
|
||||||
* user memory, etc. This means that a recursive #DB is possible. If
|
* user memory, etc. This means that a recursive #DB is possible. If
|
||||||
* this happens, that #DB will hit exc_debug_kernel() and clear DR7.
|
* this happens, that #DB will hit exc_debug_kernel() and clear DR7.
|
||||||
* Since we're not on the IST stack right now, everything will be
|
* Since we're not on the IST stack right now, everything will be
|
||||||
|
@@ -367,8 +367,8 @@ static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
|
|||||||
if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
|
if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
*ip = regs->ip;
|
*ip = READ_ONCE_NOCHECK(regs->ip);
|
||||||
*sp = regs->sp;
|
*sp = READ_ONCE_NOCHECK(regs->sp);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -380,8 +380,8 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
|
|||||||
if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
|
if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
*ip = regs->ip;
|
*ip = READ_ONCE_NOCHECK(regs->ip);
|
||||||
*sp = regs->sp;
|
*sp = READ_ONCE_NOCHECK(regs->sp);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -402,12 +402,12 @@ static bool get_reg(struct unwind_state *state, unsigned int reg_off,
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (state->full_regs) {
|
if (state->full_regs) {
|
||||||
*val = ((unsigned long *)state->regs)[reg];
|
*val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (state->prev_regs) {
|
if (state->prev_regs) {
|
||||||
*val = ((unsigned long *)state->prev_regs)[reg];
|
*val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1641,7 +1641,16 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
|
if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
|
||||||
kvm_wait_lapic_expire(vcpu);
|
/*
|
||||||
|
* Ensure the guest's timer has truly expired before posting an
|
||||||
|
* interrupt. Open code the relevant checks to avoid querying
|
||||||
|
* lapic_timer_int_injected(), which will be false since the
|
||||||
|
* interrupt isn't yet injected. Waiting until after injecting
|
||||||
|
* is not an option since that won't help a posted interrupt.
|
||||||
|
*/
|
||||||
|
if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
|
||||||
|
vcpu->arch.apic->lapic_timer.timer_advance_ns)
|
||||||
|
__kvm_wait_lapic_expire(vcpu);
|
||||||
kvm_apic_inject_pending_timer_irqs(apic);
|
kvm_apic_inject_pending_timer_irqs(apic);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@@ -1192,6 +1192,7 @@ static void init_vmcb(struct vcpu_svm *svm)
|
|||||||
init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
|
init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
|
||||||
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
|
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
|
||||||
|
|
||||||
|
svm_set_cr4(&svm->vcpu, 0);
|
||||||
svm_set_efer(&svm->vcpu, 0);
|
svm_set_efer(&svm->vcpu, 0);
|
||||||
save->dr6 = 0xffff0ff0;
|
save->dr6 = 0xffff0ff0;
|
||||||
kvm_set_rflags(&svm->vcpu, 2);
|
kvm_set_rflags(&svm->vcpu, 2);
|
||||||
|
@@ -404,6 +404,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
|
|||||||
__reserved_bits |= X86_CR4_UMIP; \
|
__reserved_bits |= X86_CR4_UMIP; \
|
||||||
if (!__cpu_has(__c, X86_FEATURE_VMX)) \
|
if (!__cpu_has(__c, X86_FEATURE_VMX)) \
|
||||||
__reserved_bits |= X86_CR4_VMXE; \
|
__reserved_bits |= X86_CR4_VMXE; \
|
||||||
|
if (!__cpu_has(__c, X86_FEATURE_PCID)) \
|
||||||
|
__reserved_bits |= X86_CR4_PCIDE; \
|
||||||
__reserved_bits; \
|
__reserved_bits; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@@ -1415,6 +1415,25 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned long insn_get_effective_ip(struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
unsigned long seg_base = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If not in user-space long mode, a custom code segment could be in
|
||||||
|
* use. This is true in protected mode (if the process defined a local
|
||||||
|
* descriptor table), or virtual-8086 mode. In most of the cases
|
||||||
|
* seg_base will be zero as in USER_CS.
|
||||||
|
*/
|
||||||
|
if (!user_64bit_mode(regs)) {
|
||||||
|
seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
|
||||||
|
if (seg_base == -1L)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return seg_base + regs->ip;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* insn_fetch_from_user() - Copy instruction bytes from user-space memory
|
* insn_fetch_from_user() - Copy instruction bytes from user-space memory
|
||||||
* @regs: Structure with register values as seen when entering kernel mode
|
* @regs: Structure with register values as seen when entering kernel mode
|
||||||
@@ -1431,24 +1450,43 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
|
|||||||
*/
|
*/
|
||||||
int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
|
int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
|
||||||
{
|
{
|
||||||
unsigned long seg_base = 0;
|
unsigned long ip;
|
||||||
int not_copied;
|
int not_copied;
|
||||||
|
|
||||||
/*
|
ip = insn_get_effective_ip(regs);
|
||||||
* If not in user-space long mode, a custom code segment could be in
|
if (!ip)
|
||||||
* use. This is true in protected mode (if the process defined a local
|
|
||||||
* descriptor table), or virtual-8086 mode. In most of the cases
|
|
||||||
* seg_base will be zero as in USER_CS.
|
|
||||||
*/
|
|
||||||
if (!user_64bit_mode(regs)) {
|
|
||||||
seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
|
|
||||||
if (seg_base == -1L)
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
|
|
||||||
|
not_copied = copy_from_user(buf, (void __user *)ip, MAX_INSN_SIZE);
|
||||||
|
|
||||||
not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip),
|
return MAX_INSN_SIZE - not_copied;
|
||||||
MAX_INSN_SIZE);
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* insn_fetch_from_user_inatomic() - Copy instruction bytes from user-space memory
|
||||||
|
* while in atomic code
|
||||||
|
* @regs: Structure with register values as seen when entering kernel mode
|
||||||
|
* @buf: Array to store the fetched instruction
|
||||||
|
*
|
||||||
|
* Gets the linear address of the instruction and copies the instruction bytes
|
||||||
|
* to the buf. This function must be used in atomic context.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
*
|
||||||
|
* Number of instruction bytes copied.
|
||||||
|
*
|
||||||
|
* 0 if nothing was copied.
|
||||||
|
*/
|
||||||
|
int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
|
||||||
|
{
|
||||||
|
unsigned long ip;
|
||||||
|
int not_copied;
|
||||||
|
|
||||||
|
ip = insn_get_effective_ip(regs);
|
||||||
|
if (!ip)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
not_copied = __copy_from_user_inatomic(buf, (void __user *)ip, MAX_INSN_SIZE);
|
||||||
|
|
||||||
return MAX_INSN_SIZE - not_copied;
|
return MAX_INSN_SIZE - not_copied;
|
||||||
}
|
}
|
||||||
|
@@ -318,6 +318,22 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
|
||||||
|
const struct blk_zone_range *zrange)
|
||||||
|
{
|
||||||
|
loff_t start, end;
|
||||||
|
|
||||||
|
if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
|
||||||
|
zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
|
||||||
|
/* Out of range */
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
start = zrange->sector << SECTOR_SHIFT;
|
||||||
|
end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
|
||||||
|
|
||||||
|
return truncate_bdev_range(bdev, mode, start, end);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
|
* BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
|
||||||
* Called from blkdev_ioctl.
|
* Called from blkdev_ioctl.
|
||||||
@@ -329,6 +345,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|||||||
struct request_queue *q;
|
struct request_queue *q;
|
||||||
struct blk_zone_range zrange;
|
struct blk_zone_range zrange;
|
||||||
enum req_opf op;
|
enum req_opf op;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!argp)
|
if (!argp)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -352,6 +369,11 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case BLKRESETZONE:
|
case BLKRESETZONE:
|
||||||
op = REQ_OP_ZONE_RESET;
|
op = REQ_OP_ZONE_RESET;
|
||||||
|
|
||||||
|
/* Invalidate the page cache, including dirty pages. */
|
||||||
|
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
break;
|
break;
|
||||||
case BLKOPENZONE:
|
case BLKOPENZONE:
|
||||||
op = REQ_OP_ZONE_OPEN;
|
op = REQ_OP_ZONE_OPEN;
|
||||||
@@ -366,8 +388,20 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
|
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Invalidate the page cache again for zone reset: writes can only be
|
||||||
|
* direct for zoned devices so concurrent writes would not add any page
|
||||||
|
* to the page cache after/during reset. The page cache may be filled
|
||||||
|
* again due to concurrent reads though and dropping the pages for
|
||||||
|
* these is fine.
|
||||||
|
*/
|
||||||
|
if (!ret && cmd == BLKRESETZONE)
|
||||||
|
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long *blk_alloc_zone_bitmap(int node,
|
static inline unsigned long *blk_alloc_zone_bitmap(int node,
|
||||||
|
@@ -772,7 +772,7 @@ config CRYPTO_POLY1305_X86_64
|
|||||||
|
|
||||||
config CRYPTO_POLY1305_MIPS
|
config CRYPTO_POLY1305_MIPS
|
||||||
tristate "Poly1305 authenticator algorithm (MIPS optimized)"
|
tristate "Poly1305 authenticator algorithm (MIPS optimized)"
|
||||||
depends on CPU_MIPS32 || (CPU_MIPS64 && 64BIT)
|
depends on MIPS
|
||||||
select CRYPTO_ARCH_HAVE_LIB_POLY1305
|
select CRYPTO_ARCH_HAVE_LIB_POLY1305
|
||||||
|
|
||||||
config CRYPTO_MD4
|
config CRYPTO_MD4
|
||||||
|
@@ -284,6 +284,7 @@ struct acpi_object_addr_handler {
|
|||||||
acpi_adr_space_handler handler;
|
acpi_adr_space_handler handler;
|
||||||
struct acpi_namespace_node *node; /* Parent device */
|
struct acpi_namespace_node *node; /* Parent device */
|
||||||
void *context;
|
void *context;
|
||||||
|
acpi_mutex context_mutex;
|
||||||
acpi_adr_space_setup setup;
|
acpi_adr_space_setup setup;
|
||||||
union acpi_operand_object *region_list; /* Regions using this handler */
|
union acpi_operand_object *region_list; /* Regions using this handler */
|
||||||
union acpi_operand_object *next;
|
union acpi_operand_object *next;
|
||||||
|
@@ -489,6 +489,13 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node,
|
|||||||
|
|
||||||
/* Init handler obj */
|
/* Init handler obj */
|
||||||
|
|
||||||
|
status =
|
||||||
|
acpi_os_create_mutex(&handler_obj->address_space.context_mutex);
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
acpi_ut_remove_reference(handler_obj);
|
||||||
|
goto unlock_and_exit;
|
||||||
|
}
|
||||||
|
|
||||||
handler_obj->address_space.space_id = (u8)space_id;
|
handler_obj->address_space.space_id = (u8)space_id;
|
||||||
handler_obj->address_space.handler_flags = flags;
|
handler_obj->address_space.handler_flags = flags;
|
||||||
handler_obj->address_space.region_list = NULL;
|
handler_obj->address_space.region_list = NULL;
|
||||||
|
@@ -111,6 +111,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|||||||
union acpi_operand_object *region_obj2;
|
union acpi_operand_object *region_obj2;
|
||||||
void *region_context = NULL;
|
void *region_context = NULL;
|
||||||
struct acpi_connection_info *context;
|
struct acpi_connection_info *context;
|
||||||
|
acpi_mutex context_mutex;
|
||||||
|
u8 context_locked;
|
||||||
acpi_physical_address address;
|
acpi_physical_address address;
|
||||||
|
|
||||||
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
|
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
|
||||||
@@ -135,6 +137,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|||||||
}
|
}
|
||||||
|
|
||||||
context = handler_desc->address_space.context;
|
context = handler_desc->address_space.context;
|
||||||
|
context_mutex = handler_desc->address_space.context_mutex;
|
||||||
|
context_locked = FALSE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It may be the case that the region has never been initialized.
|
* It may be the case that the region has never been initialized.
|
||||||
@@ -203,41 +207,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|||||||
handler = handler_desc->address_space.handler;
|
handler = handler_desc->address_space.handler;
|
||||||
address = (region_obj->region.address + region_offset);
|
address = (region_obj->region.address + region_offset);
|
||||||
|
|
||||||
/*
|
|
||||||
* Special handling for generic_serial_bus and general_purpose_io:
|
|
||||||
* There are three extra parameters that must be passed to the
|
|
||||||
* handler via the context:
|
|
||||||
* 1) Connection buffer, a resource template from Connection() op
|
|
||||||
* 2) Length of the above buffer
|
|
||||||
* 3) Actual access length from the access_as() op
|
|
||||||
*
|
|
||||||
* In addition, for general_purpose_io, the Address and bit_width fields
|
|
||||||
* are defined as follows:
|
|
||||||
* 1) Address is the pin number index of the field (bit offset from
|
|
||||||
* the previous Connection)
|
|
||||||
* 2) bit_width is the actual bit length of the field (number of pins)
|
|
||||||
*/
|
|
||||||
if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
|
|
||||||
context && field_obj) {
|
|
||||||
|
|
||||||
/* Get the Connection (resource_template) buffer */
|
|
||||||
|
|
||||||
context->connection = field_obj->field.resource_buffer;
|
|
||||||
context->length = field_obj->field.resource_length;
|
|
||||||
context->access_length = field_obj->field.access_length;
|
|
||||||
}
|
|
||||||
if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
|
|
||||||
context && field_obj) {
|
|
||||||
|
|
||||||
/* Get the Connection (resource_template) buffer */
|
|
||||||
|
|
||||||
context->connection = field_obj->field.resource_buffer;
|
|
||||||
context->length = field_obj->field.resource_length;
|
|
||||||
context->access_length = field_obj->field.access_length;
|
|
||||||
address = field_obj->field.pin_number_index;
|
|
||||||
bit_width = field_obj->field.bit_length;
|
|
||||||
}
|
|
||||||
|
|
||||||
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
|
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
|
||||||
"Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
|
"Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
|
||||||
®ion_obj->region.handler->address_space, handler,
|
®ion_obj->region.handler->address_space, handler,
|
||||||
@@ -255,11 +224,71 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|||||||
acpi_ex_exit_interpreter();
|
acpi_ex_exit_interpreter();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Special handling for generic_serial_bus and general_purpose_io:
|
||||||
|
* There are three extra parameters that must be passed to the
|
||||||
|
* handler via the context:
|
||||||
|
* 1) Connection buffer, a resource template from Connection() op
|
||||||
|
* 2) Length of the above buffer
|
||||||
|
* 3) Actual access length from the access_as() op
|
||||||
|
*
|
||||||
|
* Since we pass these extra parameters via the context, which is
|
||||||
|
* shared between threads, we must lock the context to avoid these
|
||||||
|
* parameters being changed from another thread before the handler
|
||||||
|
* has completed running.
|
||||||
|
*
|
||||||
|
* In addition, for general_purpose_io, the Address and bit_width fields
|
||||||
|
* are defined as follows:
|
||||||
|
* 1) Address is the pin number index of the field (bit offset from
|
||||||
|
* the previous Connection)
|
||||||
|
* 2) bit_width is the actual bit length of the field (number of pins)
|
||||||
|
*/
|
||||||
|
if ((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) &&
|
||||||
|
context && field_obj) {
|
||||||
|
|
||||||
|
status =
|
||||||
|
acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER);
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
goto re_enter_interpreter;
|
||||||
|
}
|
||||||
|
|
||||||
|
context_locked = TRUE;
|
||||||
|
|
||||||
|
/* Get the Connection (resource_template) buffer */
|
||||||
|
|
||||||
|
context->connection = field_obj->field.resource_buffer;
|
||||||
|
context->length = field_obj->field.resource_length;
|
||||||
|
context->access_length = field_obj->field.access_length;
|
||||||
|
}
|
||||||
|
if ((region_obj->region.space_id == ACPI_ADR_SPACE_GPIO) &&
|
||||||
|
context && field_obj) {
|
||||||
|
|
||||||
|
status =
|
||||||
|
acpi_os_acquire_mutex(context_mutex, ACPI_WAIT_FOREVER);
|
||||||
|
if (ACPI_FAILURE(status)) {
|
||||||
|
goto re_enter_interpreter;
|
||||||
|
}
|
||||||
|
|
||||||
|
context_locked = TRUE;
|
||||||
|
|
||||||
|
/* Get the Connection (resource_template) buffer */
|
||||||
|
|
||||||
|
context->connection = field_obj->field.resource_buffer;
|
||||||
|
context->length = field_obj->field.resource_length;
|
||||||
|
context->access_length = field_obj->field.access_length;
|
||||||
|
address = field_obj->field.pin_number_index;
|
||||||
|
bit_width = field_obj->field.bit_length;
|
||||||
|
}
|
||||||
|
|
||||||
/* Call the handler */
|
/* Call the handler */
|
||||||
|
|
||||||
status = handler(function, address, bit_width, value, context,
|
status = handler(function, address, bit_width, value, context,
|
||||||
region_obj2->extra.region_context);
|
region_obj2->extra.region_context);
|
||||||
|
|
||||||
|
if (context_locked) {
|
||||||
|
acpi_os_release_mutex(context_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
if (ACPI_FAILURE(status)) {
|
if (ACPI_FAILURE(status)) {
|
||||||
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
|
ACPI_EXCEPTION((AE_INFO, status, "Returned by Handler for [%s]",
|
||||||
acpi_ut_get_region_name(region_obj->region.
|
acpi_ut_get_region_name(region_obj->region.
|
||||||
@@ -276,6 +305,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
re_enter_interpreter:
|
||||||
if (!(handler_desc->address_space.handler_flags &
|
if (!(handler_desc->address_space.handler_flags &
|
||||||
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
|
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
|
||||||
/*
|
/*
|
||||||
|
@@ -201,6 +201,8 @@ acpi_remove_address_space_handler(acpi_handle device,
|
|||||||
|
|
||||||
/* Now we can delete the handler object */
|
/* Now we can delete the handler object */
|
||||||
|
|
||||||
|
acpi_os_release_mutex(handler_obj->address_space.
|
||||||
|
context_mutex);
|
||||||
acpi_ut_remove_reference(handler_obj);
|
acpi_ut_remove_reference(handler_obj);
|
||||||
goto unlock_and_exit;
|
goto unlock_and_exit;
|
||||||
}
|
}
|
||||||
|
@@ -1730,3 +1730,58 @@ void __init acpi_iort_init(void)
|
|||||||
|
|
||||||
iort_init_platform_devices();
|
iort_init_platform_devices();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_ZONE_DMA
|
||||||
|
/*
|
||||||
|
* Extract the highest CPU physical address accessible to all DMA masters in
|
||||||
|
* the system. PHYS_ADDR_MAX is returned when no constrained device is found.
|
||||||
|
*/
|
||||||
|
phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
|
||||||
|
{
|
||||||
|
phys_addr_t limit = PHYS_ADDR_MAX;
|
||||||
|
struct acpi_iort_node *node, *end;
|
||||||
|
struct acpi_table_iort *iort;
|
||||||
|
acpi_status status;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (acpi_disabled)
|
||||||
|
return limit;
|
||||||
|
|
||||||
|
status = acpi_get_table(ACPI_SIG_IORT, 0,
|
||||||
|
(struct acpi_table_header **)&iort);
|
||||||
|
if (ACPI_FAILURE(status))
|
||||||
|
return limit;
|
||||||
|
|
||||||
|
node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
|
||||||
|
end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
|
||||||
|
|
||||||
|
for (i = 0; i < iort->node_count; i++) {
|
||||||
|
if (node >= end)
|
||||||
|
break;
|
||||||
|
|
||||||
|
switch (node->type) {
|
||||||
|
struct acpi_iort_named_component *ncomp;
|
||||||
|
struct acpi_iort_root_complex *rc;
|
||||||
|
phys_addr_t local_limit;
|
||||||
|
|
||||||
|
case ACPI_IORT_NODE_NAMED_COMPONENT:
|
||||||
|
ncomp = (struct acpi_iort_named_component *)node->node_data;
|
||||||
|
local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
|
||||||
|
limit = min_not_zero(limit, local_limit);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
|
||||||
|
if (node->revision < 1)
|
||||||
|
break;
|
||||||
|
|
||||||
|
rc = (struct acpi_iort_root_complex *)node->node_data;
|
||||||
|
local_limit = DMA_BIT_MASK(rc->memory_address_limit);
|
||||||
|
limit = min_not_zero(limit, local_limit);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
|
||||||
|
}
|
||||||
|
acpi_put_table(&iort->header);
|
||||||
|
return limit;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@@ -140,6 +140,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
.callback = video_detect_force_vendor,
|
.callback = video_detect_force_vendor,
|
||||||
|
.ident = "GIGABYTE GB-BXBT-2807",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "GB-BXBT-2807"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
.ident = "Sony VPCEH3U1E",
|
.ident = "Sony VPCEH3U1E",
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
||||||
|
@@ -290,20 +290,20 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* phys_device is a bad name for this. What I really want
|
* Legacy interface that we cannot remove: s390x exposes the storage increment
|
||||||
* is a way to differentiate between memory ranges that
|
* covered by a memory block, allowing for identifying which memory blocks
|
||||||
* are part of physical devices that constitute
|
* comprise a storage increment. Since a memory block spans complete
|
||||||
* a complete removable unit or fru.
|
* storage increments nowadays, this interface is basically unused. Other
|
||||||
* i.e. do these ranges belong to the same physical device,
|
* archs never exposed != 0.
|
||||||
* s.t. if I offline all of these sections I can then
|
|
||||||
* remove the physical device?
|
|
||||||
*/
|
*/
|
||||||
static ssize_t phys_device_show(struct device *dev,
|
static ssize_t phys_device_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
struct memory_block *mem = to_memory_block(dev);
|
struct memory_block *mem = to_memory_block(dev);
|
||||||
|
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
|
||||||
|
|
||||||
return sysfs_emit(buf, "%d\n", mem->phys_device);
|
return sysfs_emit(buf, "%d\n",
|
||||||
|
arch_get_memory_phys_device(start_pfn));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||||
@@ -488,11 +488,7 @@ static DEVICE_ATTR_WO(soft_offline_page);
|
|||||||
static DEVICE_ATTR_WO(hard_offline_page);
|
static DEVICE_ATTR_WO(hard_offline_page);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/* See phys_device_show(). */
|
||||||
* Note that phys_device is optional. It is here to allow for
|
|
||||||
* differentiation between which *physical* devices each
|
|
||||||
* section belongs to...
|
|
||||||
*/
|
|
||||||
int __weak arch_get_memory_phys_device(unsigned long start_pfn)
|
int __weak arch_get_memory_phys_device(unsigned long start_pfn)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
@@ -574,7 +570,6 @@ int register_memory(struct memory_block *memory)
|
|||||||
static int init_memory_block(unsigned long block_id, unsigned long state)
|
static int init_memory_block(unsigned long block_id, unsigned long state)
|
||||||
{
|
{
|
||||||
struct memory_block *mem;
|
struct memory_block *mem;
|
||||||
unsigned long start_pfn;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mem = find_memory_block_by_id(block_id);
|
mem = find_memory_block_by_id(block_id);
|
||||||
@@ -588,8 +583,6 @@ static int init_memory_block(unsigned long block_id, unsigned long state)
|
|||||||
|
|
||||||
mem->start_section_nr = block_id * sections_per_block;
|
mem->start_section_nr = block_id * sections_per_block;
|
||||||
mem->state = state;
|
mem->state = state;
|
||||||
start_pfn = section_nr_to_pfn(mem->start_section_nr);
|
|
||||||
mem->phys_device = arch_get_memory_phys_device(start_pfn);
|
|
||||||
mem->nid = NUMA_NO_NODE;
|
mem->nid = NUMA_NO_NODE;
|
||||||
|
|
||||||
ret = register_memory(mem);
|
ret = register_memory(mem);
|
||||||
|
@@ -799,6 +799,9 @@ int software_node_register(const struct software_node *node)
|
|||||||
if (software_node_to_swnode(node))
|
if (software_node_to_swnode(node))
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
|
|
||||||
|
if (node->parent && !parent)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
|
return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(software_node_register);
|
EXPORT_SYMBOL_GPL(software_node_register);
|
||||||
|
@@ -2,3 +2,4 @@
|
|||||||
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
|
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
|
||||||
|
|
||||||
obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
|
obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
|
||||||
|
CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all
|
||||||
|
@@ -165,15 +165,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
|
|||||||
{
|
{
|
||||||
struct rsxx_cardinfo *card = file_inode(fp)->i_private;
|
struct rsxx_cardinfo *card = file_inode(fp)->i_private;
|
||||||
char *buf;
|
char *buf;
|
||||||
ssize_t st;
|
int st;
|
||||||
|
|
||||||
buf = kzalloc(cnt, GFP_KERNEL);
|
buf = kzalloc(cnt, GFP_KERNEL);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
|
st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
|
||||||
if (!st)
|
if (!st) {
|
||||||
st = copy_to_user(ubuf, buf, cnt);
|
if (copy_to_user(ubuf, buf, cnt))
|
||||||
|
st = -EFAULT;
|
||||||
|
}
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
if (st)
|
if (st)
|
||||||
return st;
|
return st;
|
||||||
@@ -869,6 +871,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
|
|||||||
card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
|
card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
|
||||||
if (!card->event_wq) {
|
if (!card->event_wq) {
|
||||||
dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
|
dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
|
||||||
|
st = -ENOMEM;
|
||||||
goto failed_event_handler;
|
goto failed_event_handler;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -78,6 +78,7 @@ enum qca_flags {
|
|||||||
|
|
||||||
enum qca_capabilities {
|
enum qca_capabilities {
|
||||||
QCA_CAP_WIDEBAND_SPEECH = BIT(0),
|
QCA_CAP_WIDEBAND_SPEECH = BIT(0),
|
||||||
|
QCA_CAP_VALID_LE_STATES = BIT(1),
|
||||||
};
|
};
|
||||||
|
|
||||||
/* HCI_IBS transmit side sleep protocol states */
|
/* HCI_IBS transmit side sleep protocol states */
|
||||||
@@ -1782,7 +1783,7 @@ static const struct qca_device_data qca_soc_data_wcn3991 = {
|
|||||||
{ "vddch0", 450000 },
|
{ "vddch0", 450000 },
|
||||||
},
|
},
|
||||||
.num_vregs = 4,
|
.num_vregs = 4,
|
||||||
.capabilities = QCA_CAP_WIDEBAND_SPEECH,
|
.capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct qca_device_data qca_soc_data_wcn3998 = {
|
static const struct qca_device_data qca_soc_data_wcn3998 = {
|
||||||
@@ -2019,11 +2020,17 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
|||||||
hdev->shutdown = qca_power_off;
|
hdev->shutdown = qca_power_off;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Wideband speech support must be set per driver since it can't be
|
if (data) {
|
||||||
* queried via hci.
|
/* Wideband speech support must be set per driver since it can't
|
||||||
|
* be queried via hci. Same with the valid le states quirk.
|
||||||
*/
|
*/
|
||||||
if (data && (data->capabilities & QCA_CAP_WIDEBAND_SPEECH))
|
if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)
|
||||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
|
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
|
||||||
|
&hdev->quirks);
|
||||||
|
|
||||||
|
if (data->capabilities & QCA_CAP_VALID_LE_STATES)
|
||||||
|
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -1379,6 +1379,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
|
|||||||
SYSC_QUIRK_CLKDM_NOAUTO),
|
SYSC_QUIRK_CLKDM_NOAUTO),
|
||||||
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
|
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
|
||||||
SYSC_QUIRK_CLKDM_NOAUTO),
|
SYSC_QUIRK_CLKDM_NOAUTO),
|
||||||
|
SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
|
||||||
|
SYSC_QUIRK_GPMC_DEBUG),
|
||||||
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
|
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
|
||||||
SYSC_QUIRK_OPT_CLKS_NEEDED),
|
SYSC_QUIRK_OPT_CLKS_NEEDED),
|
||||||
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
|
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
|
||||||
@@ -1814,6 +1816,14 @@ static void sysc_init_module_quirks(struct sysc *ddata)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_OMAP_GPMC_DEBUG
|
||||||
|
if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
|
||||||
|
ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
|
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
|
||||||
ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
|
ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
|
||||||
ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
|
ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
|
||||||
|
@@ -707,12 +707,22 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
|
|||||||
const char *desc = "attempting to generate an interrupt";
|
const char *desc = "attempting to generate an interrupt";
|
||||||
u32 cap2;
|
u32 cap2;
|
||||||
cap_t cap;
|
cap_t cap;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/* TPM 2.0 */
|
||||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||||
return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
|
return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
|
||||||
else
|
|
||||||
return tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc,
|
/* TPM 1.2 */
|
||||||
0);
|
ret = request_locality(chip, 0);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
|
||||||
|
|
||||||
|
release_locality(chip, 0);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Register the IRQ and issue a command that will cause an interrupt. If an
|
/* Register the IRQ and issue a command that will cause an interrupt. If an
|
||||||
@@ -1019,11 +1029,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||||||
init_waitqueue_head(&priv->read_queue);
|
init_waitqueue_head(&priv->read_queue);
|
||||||
init_waitqueue_head(&priv->int_queue);
|
init_waitqueue_head(&priv->int_queue);
|
||||||
if (irq != -1) {
|
if (irq != -1) {
|
||||||
/* Before doing irq testing issue a command to the TPM in polling mode
|
/*
|
||||||
|
* Before doing irq testing issue a command to the TPM in polling mode
|
||||||
* to make sure it works. May as well use that command to set the
|
* to make sure it works. May as well use that command to set the
|
||||||
* proper timeouts for the driver.
|
* proper timeouts for the driver.
|
||||||
*/
|
*/
|
||||||
if (tpm_get_timeouts(chip)) {
|
|
||||||
|
rc = request_locality(chip, 0);
|
||||||
|
if (rc < 0)
|
||||||
|
goto out_err;
|
||||||
|
|
||||||
|
rc = tpm_get_timeouts(chip);
|
||||||
|
|
||||||
|
release_locality(chip, 0);
|
||||||
|
|
||||||
|
if (rc) {
|
||||||
dev_err(dev, "Could not get TPM timeouts and durations\n");
|
dev_err(dev, "Could not get TPM timeouts and durations\n");
|
||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
@@ -183,7 +183,10 @@ static inline int gdsc_assert_reset(struct gdsc *sc)
|
|||||||
static inline void gdsc_force_mem_on(struct gdsc *sc)
|
static inline void gdsc_force_mem_on(struct gdsc *sc)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
u32 mask = RETAIN_MEM | RETAIN_PERIPH;
|
u32 mask = RETAIN_MEM;
|
||||||
|
|
||||||
|
if (!(sc->flags & NO_RET_PERIPH))
|
||||||
|
mask |= RETAIN_PERIPH;
|
||||||
|
|
||||||
for (i = 0; i < sc->cxc_count; i++)
|
for (i = 0; i < sc->cxc_count; i++)
|
||||||
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
|
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
|
||||||
@@ -192,7 +195,10 @@ static inline void gdsc_force_mem_on(struct gdsc *sc)
|
|||||||
static inline void gdsc_clear_mem_on(struct gdsc *sc)
|
static inline void gdsc_clear_mem_on(struct gdsc *sc)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
u32 mask = RETAIN_MEM | RETAIN_PERIPH;
|
u32 mask = RETAIN_MEM;
|
||||||
|
|
||||||
|
if (!(sc->flags & NO_RET_PERIPH))
|
||||||
|
mask |= RETAIN_PERIPH;
|
||||||
|
|
||||||
for (i = 0; i < sc->cxc_count; i++)
|
for (i = 0; i < sc->cxc_count; i++)
|
||||||
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
|
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
|
||||||
|
@@ -42,7 +42,7 @@ struct gdsc {
|
|||||||
#define PWRSTS_ON BIT(2)
|
#define PWRSTS_ON BIT(2)
|
||||||
#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
|
#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
|
||||||
#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
|
#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
|
||||||
const u8 flags;
|
const u16 flags;
|
||||||
#define VOTABLE BIT(0)
|
#define VOTABLE BIT(0)
|
||||||
#define CLAMP_IO BIT(1)
|
#define CLAMP_IO BIT(1)
|
||||||
#define HW_CTRL BIT(2)
|
#define HW_CTRL BIT(2)
|
||||||
@@ -51,6 +51,7 @@ struct gdsc {
|
|||||||
#define POLL_CFG_GDSCR BIT(5)
|
#define POLL_CFG_GDSCR BIT(5)
|
||||||
#define ALWAYS_ON BIT(6)
|
#define ALWAYS_ON BIT(6)
|
||||||
#define RETAIN_FF_ENABLE BIT(7)
|
#define RETAIN_FF_ENABLE BIT(7)
|
||||||
|
#define NO_RET_PERIPH BIT(8)
|
||||||
struct reset_controller_dev *rcdev;
|
struct reset_controller_dev *rcdev;
|
||||||
unsigned int *resets;
|
unsigned int *resets;
|
||||||
unsigned int reset_count;
|
unsigned int reset_count;
|
||||||
|
@@ -253,12 +253,16 @@ static struct gdsc gpu_cx_gdsc = {
|
|||||||
static struct gdsc gpu_gx_gdsc = {
|
static struct gdsc gpu_gx_gdsc = {
|
||||||
.gdscr = 0x1094,
|
.gdscr = 0x1094,
|
||||||
.clamp_io_ctrl = 0x130,
|
.clamp_io_ctrl = 0x130,
|
||||||
|
.resets = (unsigned int []){ GPU_GX_BCR },
|
||||||
|
.reset_count = 1,
|
||||||
|
.cxcs = (unsigned int []){ 0x1098 },
|
||||||
|
.cxc_count = 1,
|
||||||
.pd = {
|
.pd = {
|
||||||
.name = "gpu_gx",
|
.name = "gpu_gx",
|
||||||
},
|
},
|
||||||
.parent = &gpu_cx_gdsc.pd,
|
.parent = &gpu_cx_gdsc.pd,
|
||||||
.pwrsts = PWRSTS_OFF_ON,
|
.pwrsts = PWRSTS_OFF_ON | PWRSTS_RET,
|
||||||
.flags = CLAMP_IO | AON_RESET,
|
.flags = CLAMP_IO | SW_RESET | AON_RESET | NO_RET_PERIPH,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct clk_regmap *gpucc_msm8998_clocks[] = {
|
static struct clk_regmap *gpucc_msm8998_clocks[] = {
|
||||||
|
@@ -31,7 +31,7 @@ struct stm32_timer_cnt {
|
|||||||
struct counter_device counter;
|
struct counter_device counter;
|
||||||
struct regmap *regmap;
|
struct regmap *regmap;
|
||||||
struct clk *clk;
|
struct clk *clk;
|
||||||
u32 ceiling;
|
u32 max_arr;
|
||||||
bool enabled;
|
bool enabled;
|
||||||
struct stm32_timer_regs bak;
|
struct stm32_timer_regs bak;
|
||||||
};
|
};
|
||||||
@@ -44,13 +44,14 @@ struct stm32_timer_cnt {
|
|||||||
* @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
|
* @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
|
||||||
*/
|
*/
|
||||||
enum stm32_count_function {
|
enum stm32_count_function {
|
||||||
STM32_COUNT_SLAVE_MODE_DISABLED = -1,
|
STM32_COUNT_SLAVE_MODE_DISABLED,
|
||||||
STM32_COUNT_ENCODER_MODE_1,
|
STM32_COUNT_ENCODER_MODE_1,
|
||||||
STM32_COUNT_ENCODER_MODE_2,
|
STM32_COUNT_ENCODER_MODE_2,
|
||||||
STM32_COUNT_ENCODER_MODE_3,
|
STM32_COUNT_ENCODER_MODE_3,
|
||||||
};
|
};
|
||||||
|
|
||||||
static enum counter_count_function stm32_count_functions[] = {
|
static enum counter_count_function stm32_count_functions[] = {
|
||||||
|
[STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE,
|
||||||
[STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
|
[STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
|
||||||
[STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
|
[STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
|
||||||
[STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
|
[STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
|
||||||
@@ -73,8 +74,10 @@ static int stm32_count_write(struct counter_device *counter,
|
|||||||
const unsigned long val)
|
const unsigned long val)
|
||||||
{
|
{
|
||||||
struct stm32_timer_cnt *const priv = counter->priv;
|
struct stm32_timer_cnt *const priv = counter->priv;
|
||||||
|
u32 ceiling;
|
||||||
|
|
||||||
if (val > priv->ceiling)
|
regmap_read(priv->regmap, TIM_ARR, &ceiling);
|
||||||
|
if (val > ceiling)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return regmap_write(priv->regmap, TIM_CNT, val);
|
return regmap_write(priv->regmap, TIM_CNT, val);
|
||||||
@@ -90,6 +93,9 @@ static int stm32_count_function_get(struct counter_device *counter,
|
|||||||
regmap_read(priv->regmap, TIM_SMCR, &smcr);
|
regmap_read(priv->regmap, TIM_SMCR, &smcr);
|
||||||
|
|
||||||
switch (smcr & TIM_SMCR_SMS) {
|
switch (smcr & TIM_SMCR_SMS) {
|
||||||
|
case 0:
|
||||||
|
*function = STM32_COUNT_SLAVE_MODE_DISABLED;
|
||||||
|
return 0;
|
||||||
case 1:
|
case 1:
|
||||||
*function = STM32_COUNT_ENCODER_MODE_1;
|
*function = STM32_COUNT_ENCODER_MODE_1;
|
||||||
return 0;
|
return 0;
|
||||||
@@ -99,9 +105,9 @@ static int stm32_count_function_get(struct counter_device *counter,
|
|||||||
case 3:
|
case 3:
|
||||||
*function = STM32_COUNT_ENCODER_MODE_3;
|
*function = STM32_COUNT_ENCODER_MODE_3;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
default:
|
||||||
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int stm32_count_function_set(struct counter_device *counter,
|
static int stm32_count_function_set(struct counter_device *counter,
|
||||||
@@ -112,6 +118,9 @@ static int stm32_count_function_set(struct counter_device *counter,
|
|||||||
u32 cr1, sms;
|
u32 cr1, sms;
|
||||||
|
|
||||||
switch (function) {
|
switch (function) {
|
||||||
|
case STM32_COUNT_SLAVE_MODE_DISABLED:
|
||||||
|
sms = 0;
|
||||||
|
break;
|
||||||
case STM32_COUNT_ENCODER_MODE_1:
|
case STM32_COUNT_ENCODER_MODE_1:
|
||||||
sms = 1;
|
sms = 1;
|
||||||
break;
|
break;
|
||||||
@@ -122,8 +131,7 @@ static int stm32_count_function_set(struct counter_device *counter,
|
|||||||
sms = 3;
|
sms = 3;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
sms = 0;
|
return -EINVAL;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Store enable status */
|
/* Store enable status */
|
||||||
@@ -131,10 +139,6 @@ static int stm32_count_function_set(struct counter_device *counter,
|
|||||||
|
|
||||||
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
|
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
|
||||||
|
|
||||||
/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
|
|
||||||
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
|
|
||||||
regmap_write(priv->regmap, TIM_ARR, priv->ceiling);
|
|
||||||
|
|
||||||
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
|
regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
|
||||||
|
|
||||||
/* Make sure that registers are updated */
|
/* Make sure that registers are updated */
|
||||||
@@ -185,11 +189,13 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (ceiling > priv->max_arr)
|
||||||
|
return -ERANGE;
|
||||||
|
|
||||||
/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
|
/* TIMx_ARR register shouldn't be buffered (ARPE=0) */
|
||||||
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
|
regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
|
||||||
regmap_write(priv->regmap, TIM_ARR, ceiling);
|
regmap_write(priv->regmap, TIM_ARR, ceiling);
|
||||||
|
|
||||||
priv->ceiling = ceiling;
|
|
||||||
return len;
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -274,31 +280,36 @@ static int stm32_action_get(struct counter_device *counter,
|
|||||||
size_t function;
|
size_t function;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */
|
|
||||||
*action = STM32_SYNAPSE_ACTION_NONE;
|
|
||||||
|
|
||||||
err = stm32_count_function_get(counter, count, &function);
|
err = stm32_count_function_get(counter, count, &function);
|
||||||
if (err)
|
if (err)
|
||||||
return 0;
|
return err;
|
||||||
|
|
||||||
switch (function) {
|
switch (function) {
|
||||||
|
case STM32_COUNT_SLAVE_MODE_DISABLED:
|
||||||
|
/* counts on internal clock when CEN=1 */
|
||||||
|
*action = STM32_SYNAPSE_ACTION_NONE;
|
||||||
|
return 0;
|
||||||
case STM32_COUNT_ENCODER_MODE_1:
|
case STM32_COUNT_ENCODER_MODE_1:
|
||||||
/* counts up/down on TI1FP1 edge depending on TI2FP2 level */
|
/* counts up/down on TI1FP1 edge depending on TI2FP2 level */
|
||||||
if (synapse->signal->id == count->synapses[0].signal->id)
|
if (synapse->signal->id == count->synapses[0].signal->id)
|
||||||
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
||||||
break;
|
else
|
||||||
|
*action = STM32_SYNAPSE_ACTION_NONE;
|
||||||
|
return 0;
|
||||||
case STM32_COUNT_ENCODER_MODE_2:
|
case STM32_COUNT_ENCODER_MODE_2:
|
||||||
/* counts up/down on TI2FP2 edge depending on TI1FP1 level */
|
/* counts up/down on TI2FP2 edge depending on TI1FP1 level */
|
||||||
if (synapse->signal->id == count->synapses[1].signal->id)
|
if (synapse->signal->id == count->synapses[1].signal->id)
|
||||||
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
||||||
break;
|
else
|
||||||
|
*action = STM32_SYNAPSE_ACTION_NONE;
|
||||||
|
return 0;
|
||||||
case STM32_COUNT_ENCODER_MODE_3:
|
case STM32_COUNT_ENCODER_MODE_3:
|
||||||
/* counts up/down on both TI1FP1 and TI2FP2 edges */
|
/* counts up/down on both TI1FP1 and TI2FP2 edges */
|
||||||
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
*action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
default:
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct counter_ops stm32_timer_cnt_ops = {
|
static const struct counter_ops stm32_timer_cnt_ops = {
|
||||||
@@ -359,7 +370,7 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
priv->regmap = ddata->regmap;
|
priv->regmap = ddata->regmap;
|
||||||
priv->clk = ddata->clk;
|
priv->clk = ddata->clk;
|
||||||
priv->ceiling = ddata->max_arr;
|
priv->max_arr = ddata->max_arr;
|
||||||
|
|
||||||
priv->counter.name = dev_name(dev);
|
priv->counter.name = dev_name(dev);
|
||||||
priv->counter.parent = dev;
|
priv->counter.parent = dev;
|
||||||
|
@@ -317,9 +317,9 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||||||
}
|
}
|
||||||
|
|
||||||
base = ioremap(res->start, resource_size(res));
|
base = ioremap(res->start, resource_size(res));
|
||||||
if (IS_ERR(base)) {
|
if (!base) {
|
||||||
dev_err(dev, "failed to map resource %pR\n", res);
|
dev_err(dev, "failed to map resource %pR\n", res);
|
||||||
ret = PTR_ERR(base);
|
ret = -ENOMEM;
|
||||||
goto release_region;
|
goto release_region;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -368,7 +368,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||||||
error:
|
error:
|
||||||
kfree(data);
|
kfree(data);
|
||||||
unmap_base:
|
unmap_base:
|
||||||
iounmap(data->base);
|
iounmap(base);
|
||||||
release_region:
|
release_region:
|
||||||
release_mem_region(res->start, resource_size(res));
|
release_mem_region(res->start, resource_size(res));
|
||||||
return ret;
|
return ret;
|
||||||
|
@@ -927,7 +927,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* first try to find a slot in an existing linked list entry */
|
/* first try to find a slot in an existing linked list entry */
|
||||||
for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
|
for (prsv = efi_memreserve_root->next; prsv; ) {
|
||||||
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
|
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
|
||||||
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
|
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
|
||||||
if (index < rsv->size) {
|
if (index < rsv->size) {
|
||||||
@@ -937,6 +937,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
|||||||
memunmap(rsv);
|
memunmap(rsv);
|
||||||
return efi_mem_reserve_iomem(addr, size);
|
return efi_mem_reserve_iomem(addr, size);
|
||||||
}
|
}
|
||||||
|
prsv = rsv->next;
|
||||||
memunmap(rsv);
|
memunmap(rsv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -96,6 +96,18 @@ static void install_memreserve_table(void)
|
|||||||
efi_err("Failed to install memreserve config table!\n");
|
efi_err("Failed to install memreserve config table!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 get_supported_rt_services(void)
|
||||||
|
{
|
||||||
|
const efi_rt_properties_table_t *rt_prop_table;
|
||||||
|
u32 supported = EFI_RT_SUPPORTED_ALL;
|
||||||
|
|
||||||
|
rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID);
|
||||||
|
if (rt_prop_table)
|
||||||
|
supported &= rt_prop_table->runtime_services_supported;
|
||||||
|
|
||||||
|
return supported;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
|
* EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
|
||||||
* that is described in the PE/COFF header. Most of the code is the same
|
* that is described in the PE/COFF header. Most of the code is the same
|
||||||
@@ -250,6 +262,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
|
|||||||
(prop_tbl->memory_protection_attribute &
|
(prop_tbl->memory_protection_attribute &
|
||||||
EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
|
EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
|
||||||
|
|
||||||
|
/* force efi_novamap if SetVirtualAddressMap() is unsupported */
|
||||||
|
efi_novamap |= !(get_supported_rt_services() &
|
||||||
|
EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP);
|
||||||
|
|
||||||
/* hibernation expects the runtime regions to stay in the same place */
|
/* hibernation expects the runtime regions to stay in the same place */
|
||||||
if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) {
|
if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) {
|
||||||
/*
|
/*
|
||||||
|
@@ -484,6 +484,10 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
case EFI_UNSUPPORTED:
|
||||||
|
err = -EOPNOTSUPP;
|
||||||
|
status = EFI_NOT_FOUND;
|
||||||
break;
|
break;
|
||||||
case EFI_NOT_FOUND:
|
case EFI_NOT_FOUND:
|
||||||
break;
|
break;
|
||||||
|
@@ -112,8 +112,29 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
|
|||||||
#ifdef CONFIG_GPIO_PCA953X_IRQ
|
#ifdef CONFIG_GPIO_PCA953X_IRQ
|
||||||
|
|
||||||
#include <linux/dmi.h>
|
#include <linux/dmi.h>
|
||||||
#include <linux/gpio.h>
|
|
||||||
#include <linux/list.h>
|
static const struct acpi_gpio_params pca953x_irq_gpios = { 0, 0, true };
|
||||||
|
|
||||||
|
static const struct acpi_gpio_mapping pca953x_acpi_irq_gpios[] = {
|
||||||
|
{ "irq-gpios", &pca953x_irq_gpios, 1, ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER },
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
|
||||||
|
static int pca953x_acpi_get_irq(struct device *dev)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = devm_acpi_dev_add_driver_gpios(dev, pca953x_acpi_irq_gpios);
|
||||||
|
if (ret)
|
||||||
|
dev_warn(dev, "can't add GPIO ACPI mapping\n");
|
||||||
|
|
||||||
|
ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
dev_info(dev, "ACPI interrupt quirk (IRQ %d)\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
|
static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
|
||||||
{
|
{
|
||||||
@@ -132,59 +153,6 @@ static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
|
|||||||
},
|
},
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI
|
|
||||||
static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data)
|
|
||||||
{
|
|
||||||
struct acpi_resource_gpio *agpio;
|
|
||||||
int *pin = data;
|
|
||||||
|
|
||||||
if (acpi_gpio_get_irq_resource(ares, &agpio))
|
|
||||||
*pin = agpio->pin_table[0];
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pca953x_acpi_find_pin(struct device *dev)
|
|
||||||
{
|
|
||||||
struct acpi_device *adev = ACPI_COMPANION(dev);
|
|
||||||
int pin = -ENOENT, ret;
|
|
||||||
LIST_HEAD(r);
|
|
||||||
|
|
||||||
ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin);
|
|
||||||
acpi_dev_free_resource_list(&r);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return pin;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int pca953x_acpi_get_irq(struct device *dev)
|
|
||||||
{
|
|
||||||
int pin, ret;
|
|
||||||
|
|
||||||
pin = pca953x_acpi_find_pin(dev);
|
|
||||||
if (pin < 0)
|
|
||||||
return pin;
|
|
||||||
|
|
||||||
dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin);
|
|
||||||
|
|
||||||
if (!gpio_is_valid(pin))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
ret = gpio_request(pin, "pca953x interrupt");
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = gpio_to_irq(pin);
|
|
||||||
|
|
||||||
/* When pin is used as an IRQ, no need to keep it requested */
|
|
||||||
gpio_free(pin);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct acpi_device_id pca953x_acpi_ids[] = {
|
static const struct acpi_device_id pca953x_acpi_ids[] = {
|
||||||
|
@@ -649,6 +649,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
|
|||||||
if (!lookup->desc) {
|
if (!lookup->desc) {
|
||||||
const struct acpi_resource_gpio *agpio = &ares->data.gpio;
|
const struct acpi_resource_gpio *agpio = &ares->data.gpio;
|
||||||
bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
|
bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
|
||||||
|
struct gpio_desc *desc;
|
||||||
int pin_index;
|
int pin_index;
|
||||||
|
|
||||||
if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
|
if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
|
||||||
@@ -661,8 +662,12 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
|
|||||||
if (pin_index >= agpio->pin_table_length)
|
if (pin_index >= agpio->pin_table_length)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
|
if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
|
||||||
|
desc = gpio_to_desc(agpio->pin_table[pin_index]);
|
||||||
|
else
|
||||||
|
desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
|
||||||
agpio->pin_table[pin_index]);
|
agpio->pin_table[pin_index]);
|
||||||
|
lookup->desc = desc;
|
||||||
lookup->info.pin_config = agpio->pin_config;
|
lookup->info.pin_config = agpio->pin_config;
|
||||||
lookup->info.gpioint = gpioint;
|
lookup->info.gpioint = gpioint;
|
||||||
|
|
||||||
@@ -911,8 +916,9 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* acpi_dev_gpio_irq_get() - Find GpioInt and translate it to Linux IRQ number
|
* acpi_dev_gpio_irq_get_by() - Find GpioInt and translate it to Linux IRQ number
|
||||||
* @adev: pointer to a ACPI device to get IRQ from
|
* @adev: pointer to a ACPI device to get IRQ from
|
||||||
|
* @name: optional name of GpioInt resource
|
||||||
* @index: index of GpioInt resource (starting from %0)
|
* @index: index of GpioInt resource (starting from %0)
|
||||||
*
|
*
|
||||||
* If the device has one or more GpioInt resources, this function can be
|
* If the device has one or more GpioInt resources, this function can be
|
||||||
@@ -922,9 +928,12 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
|
|||||||
* The function is idempotent, though each time it runs it will configure GPIO
|
* The function is idempotent, though each time it runs it will configure GPIO
|
||||||
* pin direction according to the flags in GpioInt resource.
|
* pin direction according to the flags in GpioInt resource.
|
||||||
*
|
*
|
||||||
|
* The function takes optional @name parameter. If the resource has a property
|
||||||
|
* name, then only those will be taken into account.
|
||||||
|
*
|
||||||
* Return: Linux IRQ number (> %0) on success, negative errno on failure.
|
* Return: Linux IRQ number (> %0) on success, negative errno on failure.
|
||||||
*/
|
*/
|
||||||
int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
|
int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index)
|
||||||
{
|
{
|
||||||
int idx, i;
|
int idx, i;
|
||||||
unsigned int irq_flags;
|
unsigned int irq_flags;
|
||||||
@@ -934,7 +943,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
|
|||||||
struct acpi_gpio_info info;
|
struct acpi_gpio_info info;
|
||||||
struct gpio_desc *desc;
|
struct gpio_desc *desc;
|
||||||
|
|
||||||
desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
|
desc = acpi_get_gpiod_by_index(adev, name, i, &info);
|
||||||
|
|
||||||
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
|
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
|
||||||
if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
|
if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
|
||||||
@@ -971,7 +980,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
|
|||||||
}
|
}
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
|
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get_by);
|
||||||
|
|
||||||
static acpi_status
|
static acpi_status
|
||||||
acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
|
acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
|
||||||
|
@@ -476,8 +476,12 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
|
|||||||
static void gpiodevice_release(struct device *dev)
|
static void gpiodevice_release(struct device *dev)
|
||||||
{
|
{
|
||||||
struct gpio_device *gdev = dev_get_drvdata(dev);
|
struct gpio_device *gdev = dev_get_drvdata(dev);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&gpio_lock, flags);
|
||||||
list_del(&gdev->list);
|
list_del(&gdev->list);
|
||||||
|
spin_unlock_irqrestore(&gpio_lock, flags);
|
||||||
|
|
||||||
ida_free(&gpio_ida, gdev->id);
|
ida_free(&gpio_ida, gdev->id);
|
||||||
kfree_const(gdev->label);
|
kfree_const(gdev->label);
|
||||||
kfree(gdev->descs);
|
kfree(gdev->descs);
|
||||||
@@ -572,6 +576,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
|||||||
struct lock_class_key *lock_key,
|
struct lock_class_key *lock_key,
|
||||||
struct lock_class_key *request_key)
|
struct lock_class_key *request_key)
|
||||||
{
|
{
|
||||||
|
struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
@@ -596,6 +601,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
|
|||||||
|
|
||||||
of_gpio_dev_init(gc, gdev);
|
of_gpio_dev_init(gc, gdev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Assign fwnode depending on the result of the previous calls,
|
||||||
|
* if none of them succeed, assign it to the parent's one.
|
||||||
|
*/
|
||||||
|
gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode;
|
||||||
|
|
||||||
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
|
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
|
||||||
if (gdev->id < 0) {
|
if (gdev->id < 0) {
|
||||||
ret = gdev->id;
|
ret = gdev->id;
|
||||||
|
@@ -178,6 +178,7 @@ extern uint amdgpu_smu_memory_pool_size;
|
|||||||
extern uint amdgpu_dc_feature_mask;
|
extern uint amdgpu_dc_feature_mask;
|
||||||
extern uint amdgpu_dc_debug_mask;
|
extern uint amdgpu_dc_debug_mask;
|
||||||
extern uint amdgpu_dm_abm_level;
|
extern uint amdgpu_dm_abm_level;
|
||||||
|
extern int amdgpu_backlight;
|
||||||
extern struct amdgpu_mgpu_info mgpu_info;
|
extern struct amdgpu_mgpu_info mgpu_info;
|
||||||
extern int amdgpu_ras_enable;
|
extern int amdgpu_ras_enable;
|
||||||
extern uint amdgpu_ras_mask;
|
extern uint amdgpu_ras_mask;
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user