Merge 4ef8451b33 ("Merge tag 'perf-tools-for-v5.10-2020-11-03' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux") into android-mainline

Steps on the way to 5.10-rc3

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ia09418a96a25f6c602af953db5d3258e032c0f30
This commit is contained in:
Greg Kroah-Hartman
2020-11-05 16:10:31 +01:00
95 changed files with 965 additions and 411 deletions

View File

@@ -26,6 +26,10 @@ BUILDDIR = $(obj)/output
PDFLATEX = xelatex PDFLATEX = xelatex
LATEXOPTS = -interaction=batchmode LATEXOPTS = -interaction=batchmode
ifeq ($(KBUILD_VERBOSE),0)
SPHINXOPTS += "-q"
endif
# User-friendly check for sphinx-build # User-friendly check for sphinx-build
HAVE_SPHINX := $(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi) HAVE_SPHINX := $(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi)

View File

@@ -107,7 +107,7 @@ for a UID/GID will prevent that UID/GID from obtaining auxiliary setid
privileges, such as allowing a user to set up user namespace UID/GID mappings. privileges, such as allowing a user to set up user namespace UID/GID mappings.
Note on GID policies and setgroups() Note on GID policies and setgroups()
================== ====================================
In v5.9 we are adding support for limiting CAP_SETGID privileges as was done In v5.9 we are adding support for limiting CAP_SETGID privileges as was done
previously for CAP_SETUID. However, for compatibility with common sandboxing previously for CAP_SETUID. However, for compatibility with common sandboxing
related code conventions in userspace, we currently allow arbitrary related code conventions in userspace, we currently allow arbitrary

View File

@@ -300,6 +300,7 @@ Note:
0: 0 1 2 3 4 5 6 7 0: 0 1 2 3 4 5 6 7
RSS hash key: RSS hash key:
84:50:f4:00:a8:15:d1:a7:e9:7f:1d:60:35:c7:47:25:42:97:74:ca:56:bb:b6:a1:d8:43:e3:c9:0c:fd:17:55:c2:3a:4d:69:ed:f1:42:89 84:50:f4:00:a8:15:d1:a7:e9:7f:1d:60:35:c7:47:25:42:97:74:ca:56:bb:b6:a1:d8:43:e3:c9:0c:fd:17:55:c2:3a:4d:69:ed:f1:42:89
netdev_tstamp_prequeue netdev_tstamp_prequeue
---------------------- ----------------------

View File

@@ -148,3 +148,13 @@ SunXi family
* User Manual * User Manual
http://dl.linux-sunxi.org/A64/Allwinner%20A64%20User%20Manual%20v1.0.pdf http://dl.linux-sunxi.org/A64/Allwinner%20A64%20User%20Manual%20v1.0.pdf
- Allwinner H6
* Datasheet
https://linux-sunxi.org/images/5/5c/Allwinner_H6_V200_Datasheet_V1.1.pdf
* User Manual
https://linux-sunxi.org/images/4/46/Allwinner_H6_V200_User_Manual_V1.1.pdf

View File

@@ -51,7 +51,7 @@ if major >= 3:
support for Sphinx v3.0 and above is brand new. Be prepared for support for Sphinx v3.0 and above is brand new. Be prepared for
possible issues in the generated output. possible issues in the generated output.
''') ''')
if minor > 0 or patch >= 2: if (major > 3) or (minor > 0 or patch >= 2):
# Sphinx c function parser is more pedantic with regards to type # Sphinx c function parser is more pedantic with regards to type
# checking. Due to that, having macros at c:function cause problems. # checking. Due to that, having macros at c:function cause problems.
# Those needed to be scaped by using c_id_attributes[] array # Those needed to be scaped by using c_id_attributes[] array

View File

@@ -295,11 +295,13 @@ print the number of the test and the status of the test:
pass:: pass::
ok 28 - kmalloc_double_kzfree ok 28 - kmalloc_double_kzfree
or, if kmalloc failed:: or, if kmalloc failed::
# kmalloc_large_oob_right: ASSERTION FAILED at lib/test_kasan.c:163 # kmalloc_large_oob_right: ASSERTION FAILED at lib/test_kasan.c:163
Expected ptr is not null, but is Expected ptr is not null, but is
not ok 4 - kmalloc_large_oob_right not ok 4 - kmalloc_large_oob_right
or, if a KASAN report was expected, but not found:: or, if a KASAN report was expected, but not found::
# kmalloc_double_kzfree: EXPECTATION FAILED at lib/test_kasan.c:629 # kmalloc_double_kzfree: EXPECTATION FAILED at lib/test_kasan.c:629

View File

@@ -4,7 +4,7 @@ Clock control registers reside in different Hi6220 system controllers,
please refer the following document to know more about the binding rules please refer the following document to know more about the binding rules
for these system controllers: for these system controllers:
Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt Documentation/devicetree/bindings/arm/hisilicon/hisilicon.yaml
Required Properties: Required Properties:

View File

@@ -86,9 +86,6 @@ Other Functions
.. kernel-doc:: fs/dax.c .. kernel-doc:: fs/dax.c
:export: :export:
.. kernel-doc:: fs/direct-io.c
:export:
.. kernel-doc:: fs/libfs.c .. kernel-doc:: fs/libfs.c
:export: :export:

View File

@@ -83,10 +83,6 @@ AMDGPU XGMI Support
=================== ===================
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
:doc: AMDGPU XGMI Support
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
:internal:
AMDGPU RAS Support AMDGPU RAS Support
================== ==================
@@ -124,9 +120,6 @@ RAS VRAM Bad Pages sysfs Interface
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
:doc: AMDGPU RAS sysfs gpu_vram_bad_pages Interface :doc: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
:internal:
Sample Code Sample Code
----------- -----------
Sample code for testing error injection can be found here: Sample code for testing error injection can be found here:

View File

@@ -20,7 +20,7 @@ ADM1266 is a sequencer that features voltage readback from 17 channels via an
integrated 12 bit SAR ADC, accessed using a PMBus interface. integrated 12 bit SAR ADC, accessed using a PMBus interface.
The driver is a client driver to the core PMBus driver. Please see The driver is a client driver to the core PMBus driver. Please see
Documentation/hwmon/pmbus for details on PMBus client drivers. Documentation/hwmon/pmbus.rst for details on PMBus client drivers.
Sysfs entries Sysfs entries

View File

@@ -132,6 +132,7 @@ Hardware Monitoring Kernel Drivers
mcp3021 mcp3021
menf21bmc menf21bmc
mlxreg-fan mlxreg-fan
mp2975
nct6683 nct6683
nct6775 nct6775
nct7802 nct7802

View File

@@ -20,6 +20,7 @@ This driver implements support for Monolithic Power Systems, Inc. (MPS)
vendor dual-loop, digital, multi-phase controller MP2975. vendor dual-loop, digital, multi-phase controller MP2975.
This device: This device:
- Supports up to two power rail. - Supports up to two power rail.
- Provides 8 pulse-width modulations (PWMs), and can be configured up - Provides 8 pulse-width modulations (PWMs), and can be configured up
to 8-phase operation for rail 1 and up to 4-phase operation for rail to 8-phase operation for rail 1 and up to 4-phase operation for rail
@@ -32,10 +33,12 @@ This device:
10-mV DAC, IMVP9 mode with 5-mV DAC. 10-mV DAC, IMVP9 mode with 5-mV DAC.
Device supports: Device supports:
- SVID interface. - SVID interface.
- AVSBus interface. - AVSBus interface.
Device complaint with: Device complaint with:
- PMBus rev 1.3 interface. - PMBus rev 1.3 interface.
Device supports direct format for reading output current, output voltage, Device supports direct format for reading output current, output voltage,
@@ -45,11 +48,14 @@ Device supports VID and direct formats for reading output voltage.
The below VID modes are supported: VR12, VR13, IMVP9. The below VID modes are supported: VR12, VR13, IMVP9.
The driver provides the next attributes for the current: The driver provides the next attributes for the current:
- for current in: input, maximum alarm; - for current in: input, maximum alarm;
- for current out input, maximum alarm and highest values; - for current out input, maximum alarm and highest values;
- for phase current: input and label. - for phase current: input and label.
attributes. attributes.
The driver exports the following attributes via the 'sysfs' files, where The driver exports the following attributes via the 'sysfs' files, where
- 'n' is number of telemetry pages (from 1 to 2); - 'n' is number of telemetry pages (from 1 to 2);
- 'k' is number of configured phases (from 1 to 8); - 'k' is number of configured phases (from 1 to 8);
- indexes 1, 1*n for "iin"; - indexes 1, 1*n for "iin";
@@ -65,11 +71,14 @@ The driver exports the following attributes via the 'sysfs' files, where
**curr[1-{2n+k}]_label** **curr[1-{2n+k}]_label**
The driver provides the next attributes for the voltage: The driver provides the next attributes for the voltage:
- for voltage in: input, high critical threshold, high critical alarm, all only - for voltage in: input, high critical threshold, high critical alarm, all only
from page 0; from page 0;
- for voltage out: input, low and high critical thresholds, low and high - for voltage out: input, low and high critical thresholds, low and high
critical alarms, from pages 0 and 1; critical alarms, from pages 0 and 1;
The driver exports the following attributes via the 'sysfs' files, where The driver exports the following attributes via the 'sysfs' files, where
- 'n' is number of telemetry pages (from 1 to 2); - 'n' is number of telemetry pages (from 1 to 2);
- indexes 1 for "iin"; - indexes 1 for "iin";
- indexes n+1, n+2 for "vout"; - indexes n+1, n+2 for "vout";
@@ -87,9 +96,12 @@ The driver exports the following attributes via the 'sysfs' files, where
**in[2-{n+1}1_lcrit_alarm** **in[2-{n+1}1_lcrit_alarm**
The driver provides the next attributes for the power: The driver provides the next attributes for the power:
- for power in alarm and input. - for power in alarm and input.
- for power out: highest and input. - for power out: highest and input.
The driver exports the following attributes via the 'sysfs' files, where The driver exports the following attributes via the 'sysfs' files, where
- 'n' is number of telemetry pages (from 1 to 2); - 'n' is number of telemetry pages (from 1 to 2);
- indexes 1 for "pin"; - indexes 1 for "pin";
- indexes n+1, n+2 for "pout"; - indexes n+1, n+2 for "pout";

View File

@@ -42,6 +42,7 @@ The validator tracks lock-class usage history and divides the usage into
(4 usages * n STATEs + 1) categories: (4 usages * n STATEs + 1) categories:
where the 4 usages can be: where the 4 usages can be:
- 'ever held in STATE context' - 'ever held in STATE context'
- 'ever held as readlock in STATE context' - 'ever held as readlock in STATE context'
- 'ever held with STATE enabled' - 'ever held with STATE enabled'
@@ -49,10 +50,12 @@ where the 4 usages can be:
where the n STATEs are coded in kernel/locking/lockdep_states.h and as of where the n STATEs are coded in kernel/locking/lockdep_states.h and as of
now they include: now they include:
- hardirq - hardirq
- softirq - softirq
where the last 1 category is: where the last 1 category is:
- 'ever used' [ == !unused ] - 'ever used' [ == !unused ]
When locking rules are violated, these usage bits are presented in the When locking rules are violated, these usage bits are presented in the
@@ -96,9 +99,9 @@ exact case is for the lock as of the reporting time.
+--------------+-------------+--------------+ +--------------+-------------+--------------+
| | irq enabled | irq disabled | | | irq enabled | irq disabled |
+--------------+-------------+--------------+ +--------------+-------------+--------------+
| ever in irq | ? | - | | ever in irq | '?' | '-' |
+--------------+-------------+--------------+ +--------------+-------------+--------------+
| never in irq | + | . | | never in irq | '+' | '.' |
+--------------+-------------+--------------+ +--------------+-------------+--------------+
The character '-' suggests irq is disabled because if otherwise the The character '-' suggests irq is disabled because if otherwise the
@@ -334,7 +337,7 @@ Troubleshooting:
---------------- ----------------
The validator tracks a maximum of MAX_LOCKDEP_KEYS number of lock classes. The validator tracks a maximum of MAX_LOCKDEP_KEYS number of lock classes.
Exceeding this number will trigger the following lockdep warning: Exceeding this number will trigger the following lockdep warning::
(DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
@@ -420,7 +423,8 @@ the critical section of another reader of the same lock instance.
The difference between recursive readers and non-recursive readers is because: The difference between recursive readers and non-recursive readers is because:
recursive readers get blocked only by a write lock *holder*, while non-recursive recursive readers get blocked only by a write lock *holder*, while non-recursive
readers could get blocked by a write lock *waiter*. Considering the follow example: readers could get blocked by a write lock *waiter*. Considering the follow
example::
TASK A: TASK B: TASK A: TASK B:
@@ -448,20 +452,22 @@ There are simply four block conditions:
Block condition matrix, Y means the row blocks the column, and N means otherwise. Block condition matrix, Y means the row blocks the column, and N means otherwise.
| E | r | R |
+---+---+---+---+ +---+---+---+---+
E | Y | Y | Y | | | E | r | R |
+---+---+---+---+ +---+---+---+---+
r | Y | Y | N | | E | Y | Y | Y |
+---+---+---+---+
| r | Y | Y | N |
+---+---+---+---+
| R | Y | Y | N |
+---+---+---+---+ +---+---+---+---+
R | Y | Y | N |
(W: writers, r: non-recursive readers, R: recursive readers) (W: writers, r: non-recursive readers, R: recursive readers)
acquired recursively. Unlike non-recursive read locks, recursive read locks acquired recursively. Unlike non-recursive read locks, recursive read locks
only get blocked by current write lock *holders* other than write lock only get blocked by current write lock *holders* other than write lock
*waiters*, for example: *waiters*, for example::
TASK A: TASK B: TASK A: TASK B:
@@ -491,7 +497,7 @@ Recursive locks don't block each other, while non-recursive locks do (this is
even true for two non-recursive read locks). A non-recursive lock can block the even true for two non-recursive read locks). A non-recursive lock can block the
corresponding recursive lock, and vice versa. corresponding recursive lock, and vice versa.
A deadlock case with recursive locks involved is as follow: A deadlock case with recursive locks involved is as follow::
TASK A: TASK B: TASK A: TASK B:
@@ -510,7 +516,7 @@ because there are 3 types for lockers, there are, in theory, 9 types of lock
dependencies, but we can show that 4 types of lock dependencies are enough for dependencies, but we can show that 4 types of lock dependencies are enough for
deadlock detection. deadlock detection.
For each lock dependency: For each lock dependency::
L1 -> L2 L1 -> L2
@@ -525,20 +531,25 @@ same types).
With the above combination for simplification, there are 4 types of dependency edges With the above combination for simplification, there are 4 types of dependency edges
in the lockdep graph: in the lockdep graph:
1) -(ER)->: exclusive writer to recursive reader dependency, "X -(ER)-> Y" means 1) -(ER)->:
exclusive writer to recursive reader dependency, "X -(ER)-> Y" means
X -> Y and X is a writer and Y is a recursive reader. X -> Y and X is a writer and Y is a recursive reader.
2) -(EN)->: exclusive writer to non-recursive locker dependency, "X -(EN)-> Y" means 2) -(EN)->:
exclusive writer to non-recursive locker dependency, "X -(EN)-> Y" means
X -> Y and X is a writer and Y is either a writer or non-recursive reader. X -> Y and X is a writer and Y is either a writer or non-recursive reader.
3) -(SR)->: shared reader to recursive reader dependency, "X -(SR)-> Y" means 3) -(SR)->:
shared reader to recursive reader dependency, "X -(SR)-> Y" means
X -> Y and X is a reader (recursive or not) and Y is a recursive reader. X -> Y and X is a reader (recursive or not) and Y is a recursive reader.
4) -(SN)->: shared reader to non-recursive locker dependency, "X -(SN)-> Y" means 4) -(SN)->:
shared reader to non-recursive locker dependency, "X -(SN)-> Y" means
X -> Y and X is a reader (recursive or not) and Y is either a writer or X -> Y and X is a reader (recursive or not) and Y is either a writer or
non-recursive reader. non-recursive reader.
Note that given two locks, they may have multiple dependencies between them, for example: Note that given two locks, they may have multiple dependencies between them,
for example::
TASK A: TASK A:
@@ -592,11 +603,11 @@ circles that won't cause deadlocks.
Proof for sufficiency (Lemma 1): Proof for sufficiency (Lemma 1):
Let's say we have a strong circle: Let's say we have a strong circle::
L1 -> L2 ... -> Ln -> L1 L1 -> L2 ... -> Ln -> L1
, which means we have dependencies: , which means we have dependencies::
L1 -> L2 L1 -> L2
L2 -> L3 L2 -> L3
@@ -633,7 +644,7 @@ a lock held by P2, and P2 is waiting for a lock held by P3, ... and Pn is waitin
for a lock held by P1. Let's name the lock Px is waiting as Lx, so since P1 is waiting for a lock held by P1. Let's name the lock Px is waiting as Lx, so since P1 is waiting
for L1 and holding Ln, so we will have Ln -> L1 in the dependency graph. Similarly, for L1 and holding Ln, so we will have Ln -> L1 in the dependency graph. Similarly,
we have L1 -> L2, L2 -> L3, ..., Ln-1 -> Ln in the dependency graph, which means we we have L1 -> L2, L2 -> L3, ..., Ln-1 -> Ln in the dependency graph, which means we
have a circle: have a circle::
Ln -> L1 -> L2 -> ... -> Ln Ln -> L1 -> L2 -> ... -> Ln

View File

@@ -70,6 +70,7 @@ The ``ice`` driver reports the following versions
that both the name (as reported by ``fw.app.name``) and version are that both the name (as reported by ``fw.app.name``) and version are
required to uniquely identify the package. required to uniquely identify the package.
* - ``fw.app.bundle_id`` * - ``fw.app.bundle_id``
- running
- 0xc0000001 - 0xc0000001
- Unique identifier for the DDP package loaded in the device. Also - Unique identifier for the DDP package loaded in the device. Also
referred to as the DDP Track ID. Can be used to uniquely identify referred to as the DDP Track ID. Can be used to uniquely identify

View File

@@ -175,5 +175,4 @@ The following structures are internal to the kernel, their members are
translated to netlink attributes when dumped. Drivers must not overwrite translated to netlink attributes when dumped. Drivers must not overwrite
the statistics they don't report with 0. the statistics they don't report with 0.
.. kernel-doc:: include/linux/ethtool.h - ethtool_pause_stats()
:identifiers: ethtool_pause_stats

View File

@@ -15,6 +15,14 @@ else:
import re import re
from itertools import chain from itertools import chain
#
# Python 2 lacks re.ASCII...
#
try:
ascii_p3 = re.ASCII
except AttributeError:
ascii_p3 = 0
# #
# Regex nastiness. Of course. # Regex nastiness. Of course.
# Try to identify "function()" that's not already marked up some # Try to identify "function()" that's not already marked up some
@@ -22,22 +30,22 @@ from itertools import chain
# :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last # :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last
# bit tries to restrict matches to things that won't create trouble. # bit tries to restrict matches to things that won't create trouble.
# #
RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=re.ASCII) RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3)
# #
# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef # Sphinx 2 uses the same :c:type role for struct, union, enum and typedef
# #
RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)', RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)',
flags=re.ASCII) flags=ascii_p3)
# #
# Sphinx 3 uses a different C role for each one of struct, union, enum and # Sphinx 3 uses a different C role for each one of struct, union, enum and
# typedef # typedef
# #
RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=re.ASCII) RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=re.ASCII) RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=re.ASCII) RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=re.ASCII) RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
# #
# Detects a reference to a documentation page of the form Documentation/... with # Detects a reference to a documentation page of the form Documentation/... with

View File

@@ -22,6 +22,7 @@ place where this information is gathered.
spec_ctrl spec_ctrl
accelerators/ocxl accelerators/ocxl
ioctl/index ioctl/index
iommu
media/index media/index
.. only:: subproject and html .. only:: subproject and html

View File

@@ -978,7 +978,7 @@ M: Michael Hennerich <Michael.Hennerich@analog.com>
L: linux-iio@vger.kernel.org L: linux-iio@vger.kernel.org
S: Supported S: Supported
W: http://ez.analog.com/community/linux-device-drivers W: http://ez.analog.com/community/linux-device-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt F: Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.yaml
F: drivers/iio/adc/ad7768-1.c F: drivers/iio/adc/ad7768-1.c
ANALOG DEVICES INC AD7780 DRIVER ANALOG DEVICES INC AD7780 DRIVER
@@ -3857,7 +3857,7 @@ M: Roger Quadros <rogerq@ti.com>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
F: Documentation/devicetree/bindings/usb/cdns-usb3.txt F: Documentation/devicetree/bindings/usb/cdns,usb3.yaml
F: drivers/usb/cdns3/ F: drivers/usb/cdns3/
CADET FM/AM RADIO RECEIVER DRIVER CADET FM/AM RADIO RECEIVER DRIVER
@@ -7916,7 +7916,7 @@ HISILICON LPC BUS DRIVER
M: john.garry@huawei.com M: john.garry@huawei.com
S: Maintained S: Maintained
W: http://www.hisilicon.com W: http://www.hisilicon.com
F: Documentation/devicetree/bindings/arm/hisilicon/hisilicon-low-pin-count.txt F: Documentation/devicetree/bindings/arm/hisilicon/low-pin-count.yaml
F: drivers/bus/hisi_lpc.c F: drivers/bus/hisi_lpc.c
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3) HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
@@ -14895,7 +14895,6 @@ RENESAS ETHERNET DRIVERS
R: Sergei Shtylyov <sergei.shtylyov@gmail.com> R: Sergei Shtylyov <sergei.shtylyov@gmail.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: linux-renesas-soc@vger.kernel.org L: linux-renesas-soc@vger.kernel.org
F: Documentation/devicetree/bindings/net/renesas,*.txt
F: Documentation/devicetree/bindings/net/renesas,*.yaml F: Documentation/devicetree/bindings/net/renesas,*.yaml
F: drivers/net/ethernet/renesas/ F: drivers/net/ethernet/renesas/
F: include/linux/sh_eth.h F: include/linux/sh_eth.h
@@ -18096,7 +18095,7 @@ M: Yu Chen <chenyu56@huawei.com>
M: Binghui Wang <wangbinghui@hisilicon.com> M: Binghui Wang <wangbinghui@hisilicon.com>
L: linux-usb@vger.kernel.org L: linux-usb@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/devicetree/bindings/phy/phy-hi3660-usb3.txt F: Documentation/devicetree/bindings/phy/hisilicon,hi3660-usb3.yaml
F: drivers/phy/hisilicon/phy-hi3660-usb3.c F: drivers/phy/hisilicon/phy-hi3660-usb3.c
USB ISP116X DRIVER USB ISP116X DRIVER

View File

@@ -164,6 +164,7 @@ void initialize_identity_maps(void *rmode)
add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE); add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE);
/* Load the new page-table. */ /* Load the new page-table. */
sev_verify_cbit(top_level_pgt);
write_cr3(top_level_pgt); write_cr3(top_level_pgt);
} }

View File

@@ -68,6 +68,9 @@ SYM_FUNC_START(get_sev_encryption_bit)
SYM_FUNC_END(get_sev_encryption_bit) SYM_FUNC_END(get_sev_encryption_bit)
.code64 .code64
#include "../../kernel/sev_verify_cbit.S"
SYM_FUNC_START(set_sev_encryption_mask) SYM_FUNC_START(set_sev_encryption_mask)
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
push %rbp push %rbp
@@ -81,6 +84,19 @@ SYM_FUNC_START(set_sev_encryption_mask)
bts %rax, sme_me_mask(%rip) /* Create the encryption mask */ bts %rax, sme_me_mask(%rip) /* Create the encryption mask */
/*
* Read MSR_AMD64_SEV again and store it to sev_status. Can't do this in
* get_sev_encryption_bit() because this function is 32-bit code and
* shared between 64-bit and 32-bit boot path.
*/
movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
rdmsr
/* Store MSR value in sev_status */
shlq $32, %rdx
orq %rdx, %rax
movq %rax, sev_status(%rip)
.Lno_sev_mask: .Lno_sev_mask:
movq %rbp, %rsp /* Restore original stack pointer */ movq %rbp, %rsp /* Restore original stack pointer */
@@ -97,4 +113,6 @@ SYM_FUNC_END(set_sev_encryption_mask)
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
.balign 8 .balign 8
SYM_DATA(sme_me_mask, .quad 0) SYM_DATA(sme_me_mask, .quad 0)
SYM_DATA(sev_status, .quad 0)
SYM_DATA(sev_check_data, .quad 0)
#endif #endif

View File

@@ -159,4 +159,6 @@ void boot_page_fault(void);
void boot_stage1_vc(void); void boot_stage1_vc(void);
void boot_stage2_vc(void); void boot_stage2_vc(void);
unsigned long sev_verify_cbit(unsigned long cr3);
#endif /* BOOT_COMPRESSED_MISC_H */ #endif /* BOOT_COMPRESSED_MISC_H */

View File

@@ -161,6 +161,21 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
/* Setup early boot stage 4-/5-level pagetables. */ /* Setup early boot stage 4-/5-level pagetables. */
addq phys_base(%rip), %rax addq phys_base(%rip), %rax
/*
* For SEV guests: Verify that the C-bit is correct. A malicious
* hypervisor could lie about the C-bit position to perform a ROP
* attack on the guest by writing to the unencrypted stack and wait for
* the next RET instruction.
* %rsi carries pointer to realmode data and is callee-clobbered. Save
* and restore it.
*/
pushq %rsi
movq %rax, %rdi
call sev_verify_cbit
popq %rsi
/* Switch to new page-table */
movq %rax, %cr3 movq %rax, %cr3
/* Ensure I am executing from virtual addresses */ /* Ensure I am executing from virtual addresses */
@@ -279,6 +294,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
SYM_CODE_END(secondary_startup_64) SYM_CODE_END(secondary_startup_64)
#include "verify_cpu.S" #include "verify_cpu.S"
#include "sev_verify_cbit.S"
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* /*

View File

@@ -178,6 +178,32 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
goto fail; goto fail;
regs->dx = val >> 32; regs->dx = val >> 32;
/*
* This is a VC handler and the #VC is only raised when SEV-ES is
* active, which means SEV must be active too. Do sanity checks on the
* CPUID results to make sure the hypervisor does not trick the kernel
* into the no-sev path. This could map sensitive data unencrypted and
* make it accessible to the hypervisor.
*
* In particular, check for:
* - Hypervisor CPUID bit
* - Availability of CPUID leaf 0x8000001f
* - SEV CPUID bit.
*
* The hypervisor might still report the wrong C-bit position, but this
* can't be checked here.
*/
if ((fn == 1 && !(regs->cx & BIT(31))))
/* Hypervisor bit */
goto fail;
else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
/* SEV leaf check */
goto fail;
else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
/* SEV bit */
goto fail;
/* Skip over the CPUID two-byte opcode */ /* Skip over the CPUID two-byte opcode */
regs->ip += 2; regs->ip += 2;

View File

@@ -374,7 +374,7 @@ fault:
return ES_EXCEPTION; return ES_EXCEPTION;
} }
static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
unsigned long vaddr, phys_addr_t *paddr) unsigned long vaddr, phys_addr_t *paddr)
{ {
unsigned long va = (unsigned long)vaddr; unsigned long va = (unsigned long)vaddr;
@@ -394,15 +394,19 @@ static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
if (user_mode(ctxt->regs)) if (user_mode(ctxt->regs))
ctxt->fi.error_code |= X86_PF_USER; ctxt->fi.error_code |= X86_PF_USER;
return false; return ES_EXCEPTION;
} }
if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
/* Emulated MMIO to/from encrypted memory not supported */
return ES_UNSUPPORTED;
pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
pa |= va & ~page_level_mask(level); pa |= va & ~page_level_mask(level);
*paddr = pa; *paddr = pa;
return true; return ES_OK;
} }
/* Include code shared with pre-decompression boot stage */ /* Include code shared with pre-decompression boot stage */
@@ -731,6 +735,7 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
{ {
u64 exit_code, exit_info_1, exit_info_2; u64 exit_code, exit_info_1, exit_info_2;
unsigned long ghcb_pa = __pa(ghcb); unsigned long ghcb_pa = __pa(ghcb);
enum es_result res;
phys_addr_t paddr; phys_addr_t paddr;
void __user *ref; void __user *ref;
@@ -740,11 +745,12 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE; exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
if (!vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr)) { res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
if (!read) if (res != ES_OK) {
if (res == ES_EXCEPTION && !read)
ctxt->fi.error_code |= X86_PF_WRITE; ctxt->fi.error_code |= X86_PF_WRITE;
return ES_EXCEPTION; return res;
} }
exit_info_1 = paddr; exit_info_1 = paddr;

View File

@@ -0,0 +1,89 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* sev_verify_cbit.S - Code for verification of the C-bit position reported
* by the Hypervisor when running with SEV enabled.
*
* Copyright (c) 2020 Joerg Roedel (jroedel@suse.de)
*
* sev_verify_cbit() is called before switching to a new long-mode page-table
* at boot.
*
* Verify that the C-bit position is correct by writing a random value to
* an encrypted memory location while on the current page-table. Then it
* switches to the new page-table to verify the memory content is still the
* same. After that it switches back to the current page-table and when the
* check succeeded it returns. If the check failed the code invalidates the
* stack pointer and goes into a hlt loop. The stack-pointer is invalidated to
* make sure no interrupt or exception can get the CPU out of the hlt loop.
*
* New page-table pointer is expected in %rdi (first parameter)
*
*/
SYM_FUNC_START(sev_verify_cbit)
#ifdef CONFIG_AMD_MEM_ENCRYPT
/* First check if a C-bit was detected */
movq sme_me_mask(%rip), %rsi
testq %rsi, %rsi
jz 3f
/* sme_me_mask != 0 could mean SME or SEV - Check also for SEV */
movq sev_status(%rip), %rsi
testq %rsi, %rsi
jz 3f
/* Save CR4 in %rsi */
movq %cr4, %rsi
/* Disable Global Pages */
movq %rsi, %rdx
andq $(~X86_CR4_PGE), %rdx
movq %rdx, %cr4
/*
* Verified that running under SEV - now get a random value using
* RDRAND. This instruction is mandatory when running as an SEV guest.
*
* Don't bail out of the loop if RDRAND returns errors. It is better to
* prevent forward progress than to work with a non-random value here.
*/
1: rdrand %rdx
jnc 1b
/* Store value to memory and keep it in %rdx */
movq %rdx, sev_check_data(%rip)
/* Backup current %cr3 value to restore it later */
movq %cr3, %rcx
/* Switch to new %cr3 - This might unmap the stack */
movq %rdi, %cr3
/*
* Compare value in %rdx with memory location. If C-bit is incorrect
* this would read the encrypted data and make the check fail.
*/
cmpq %rdx, sev_check_data(%rip)
/* Restore old %cr3 */
movq %rcx, %cr3
/* Restore previous CR4 */
movq %rsi, %cr4
/* Check CMPQ result */
je 3f
/*
* The check failed, prevent any forward progress to prevent ROP
* attacks, invalidate the stack and go into a hlt loop.
*/
xorq %rsp, %rsp
subq $0x1000, %rsp
2: hlt
jmp 2b
3:
#endif
/* Return page-table pointer */
movq %rdi, %rax
ret
SYM_FUNC_END(sev_verify_cbit)

View File

@@ -39,6 +39,7 @@
*/ */
u64 sme_me_mask __section(".data") = 0; u64 sme_me_mask __section(".data") = 0;
u64 sev_status __section(".data") = 0; u64 sev_status __section(".data") = 0;
u64 sev_check_data __section(".data") = 0;
EXPORT_SYMBOL(sme_me_mask); EXPORT_SYMBOL(sme_me_mask);
DEFINE_STATIC_KEY_FALSE(sev_enable_key); DEFINE_STATIC_KEY_FALSE(sev_enable_key);
EXPORT_SYMBOL_GPL(sev_enable_key); EXPORT_SYMBOL_GPL(sev_enable_key);

View File

@@ -7,7 +7,7 @@
* *
* This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512. * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
* *
* You could find the datasheet in Documentation/arm/sunxi/README * You could find the datasheet in Documentation/arm/sunxi.rst
*/ */
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>

View File

@@ -7,7 +7,7 @@
* *
* This file handle the PRNG * This file handle the PRNG
* *
* You could find a link for the datasheet in Documentation/arm/sunxi/README * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/ */
#include "sun8i-ce.h" #include "sun8i-ce.h"
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>

View File

@@ -7,7 +7,7 @@
* *
* This file handle the TRNG * This file handle the TRNG
* *
* You could find a link for the datasheet in Documentation/arm/sunxi/README * You could find a link for the datasheet in Documentation/arm/sunxi.rst
*/ */
#include "sun8i-ce.h" #include "sun8i-ce.h"
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>

View File

@@ -239,9 +239,11 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
return amdgpu_asic_supports_baco(adev); return amdgpu_asic_supports_baco(adev);
} }
/*
* VRAM access helper functions
*/
/** /**
* VRAM access helper functions.
*
* amdgpu_device_vram_access - read/write a buffer in vram * amdgpu_device_vram_access - read/write a buffer in vram
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
@@ -705,7 +707,7 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
/** /**
* amdgpu_invalid_rreg - dummy reg read function * amdgpu_invalid_rreg - dummy reg read function
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* @reg: offset of register * @reg: offset of register
* *
* Dummy register read function. Used for register blocks * Dummy register read function. Used for register blocks
@@ -722,7 +724,7 @@ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
/** /**
* amdgpu_invalid_wreg - dummy reg write function * amdgpu_invalid_wreg - dummy reg write function
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* @reg: offset of register * @reg: offset of register
* @v: value to write to the register * @v: value to write to the register
* *
@@ -739,7 +741,7 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32
/** /**
* amdgpu_invalid_rreg64 - dummy 64 bit reg read function * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* @reg: offset of register * @reg: offset of register
* *
* Dummy register read function. Used for register blocks * Dummy register read function. Used for register blocks
@@ -756,7 +758,7 @@ static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
/** /**
* amdgpu_invalid_wreg64 - dummy reg write function * amdgpu_invalid_wreg64 - dummy reg write function
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* @reg: offset of register * @reg: offset of register
* @v: value to write to the register * @v: value to write to the register
* *
@@ -773,7 +775,7 @@ static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint
/** /**
* amdgpu_block_invalid_rreg - dummy reg read function * amdgpu_block_invalid_rreg - dummy reg read function
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* @block: offset of instance * @block: offset of instance
* @reg: offset of register * @reg: offset of register
* *
@@ -793,7 +795,7 @@ static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
/** /**
* amdgpu_block_invalid_wreg - dummy reg write function * amdgpu_block_invalid_wreg - dummy reg write function
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* @block: offset of instance * @block: offset of instance
* @reg: offset of register * @reg: offset of register
* @v: value to write to the register * @v: value to write to the register
@@ -813,7 +815,7 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
/** /**
* amdgpu_device_asic_init - Wrapper for atom asic_init * amdgpu_device_asic_init - Wrapper for atom asic_init
* *
* @dev: drm_device pointer * @adev: amdgpu_device pointer
* *
* Does any asic specific work and then calls atom asic init. * Does any asic specific work and then calls atom asic init.
*/ */
@@ -827,7 +829,7 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
/** /**
* amdgpu_device_vram_scratch_init - allocate the VRAM scratch page * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* *
* Allocates a scratch page of VRAM for use by various things in the * Allocates a scratch page of VRAM for use by various things in the
* driver. * driver.
@@ -844,7 +846,7 @@ static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
/** /**
* amdgpu_device_vram_scratch_fini - Free the VRAM scratch page * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* *
* Frees the VRAM scratch page. * Frees the VRAM scratch page.
*/ */
@@ -3011,7 +3013,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
/** /**
* amdgpu_device_has_dc_support - check if dc is supported * amdgpu_device_has_dc_support - check if dc is supported
* *
* @adev: amdgpu_device_pointer * @adev: amdgpu_device pointer
* *
* Returns true for supported, false for not supported * Returns true for supported, false for not supported
*/ */
@@ -4045,7 +4047,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
/** /**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* @from_hypervisor: request from hypervisor * @from_hypervisor: request from hypervisor
* *
* do VF FLR and reinitialize Asic * do VF FLR and reinitialize Asic
@@ -4100,7 +4102,7 @@ error:
/** /**
* amdgpu_device_has_job_running - check if there is any job in mirror list * amdgpu_device_has_job_running - check if there is any job in mirror list
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* *
* check if there is any job in mirror list * check if there is any job in mirror list
*/ */
@@ -4128,7 +4130,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
/** /**
* amdgpu_device_should_recover_gpu - check if we should try GPU recovery * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* *
* Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
* a hung GPU. * a hung GPU.
@@ -4477,7 +4479,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
/** /**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler * amdgpu_device_gpu_recover - reset the asic and recover scheduler
* *
* @adev: amdgpu device pointer * @adev: amdgpu_device pointer
* @job: which job trigger hang * @job: which job trigger hang
* *
* Attempt to reset the GPU if it has hung (all asics). * Attempt to reset the GPU if it has hung (all asics).
@@ -4497,7 +4499,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool need_emergency_restart = false; bool need_emergency_restart = false;
bool audio_suspended = false; bool audio_suspended = false;
/** /*
* Special case: RAS triggered and full reset isn't supported * Special case: RAS triggered and full reset isn't supported
*/ */
need_emergency_restart = amdgpu_ras_need_emergency_restart(adev); need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);

View File

@@ -81,8 +81,8 @@ static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func;
/** /**
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
* *
* @man: TTM memory type manager * @adev: amdgpu_device pointer
* @p_size: maximum size of GTT * @gtt_size: maximum size of GTT
* *
* Allocate and initialize the GTT manager. * Allocate and initialize the GTT manager.
*/ */
@@ -123,7 +123,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
/** /**
* amdgpu_gtt_mgr_fini - free and destroy GTT manager * amdgpu_gtt_mgr_fini - free and destroy GTT manager
* *
* @man: TTM memory type manager * @adev: amdgpu_device pointer
* *
* Destroy and free the GTT manager, returns -EBUSY if ranges are still * Destroy and free the GTT manager, returns -EBUSY if ranges are still
* allocated inside it. * allocated inside it.

View File

@@ -168,8 +168,7 @@ static const struct ttm_resource_manager_func amdgpu_vram_mgr_func;
/** /**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
* *
* @man: TTM memory type manager * @adev: amdgpu_device pointer
* @p_size: maximum size of VRAM
* *
* Allocate and initialize the VRAM manager. * Allocate and initialize the VRAM manager.
*/ */
@@ -199,7 +198,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
/** /**
* amdgpu_vram_mgr_fini - free and destroy VRAM manager * amdgpu_vram_mgr_fini - free and destroy VRAM manager
* *
* @man: TTM memory type manager * @adev: amdgpu_device pointer
* *
* Destroy and free the VRAM manager, returns -EBUSY if ranges are still * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
* allocated inside it. * allocated inside it.
@@ -229,7 +228,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
/** /**
* amdgpu_vram_mgr_vis_size - Calculate visible node size * amdgpu_vram_mgr_vis_size - Calculate visible node size
* *
* @adev: amdgpu device structure * @adev: amdgpu_device pointer
* @node: MM node structure * @node: MM node structure
* *
* Calculate how many bytes of the MM node are inside visible VRAM * Calculate how many bytes of the MM node are inside visible VRAM

View File

@@ -583,7 +583,7 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
struct dm_comressor_info *compressor = &adev->dm.compressor; struct dm_compressor_info *compressor = &adev->dm.compressor;
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
struct drm_display_mode *mode; struct drm_display_mode *mode;
unsigned long max_size = 0; unsigned long max_size = 0;

View File

@@ -86,7 +86,7 @@ struct irq_list_head {
* @bo_ptr: Pointer to the buffer object * @bo_ptr: Pointer to the buffer object
* @gpu_addr: MMIO gpu addr * @gpu_addr: MMIO gpu addr
*/ */
struct dm_comressor_info { struct dm_compressor_info {
void *cpu_addr; void *cpu_addr;
struct amdgpu_bo *bo_ptr; struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr; uint64_t gpu_addr;
@@ -148,7 +148,7 @@ struct amdgpu_dm_backlight_caps {
* @soc_bounding_box: SOC bounding box values provided by gpu_info FW * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
* @cached_state: Caches device atomic state for suspend/resume * @cached_state: Caches device atomic state for suspend/resume
* @cached_dc_state: Cached state of content streams * @cached_dc_state: Cached state of content streams
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info
* @force_timing_sync: set via debugfs. When set, indicates that all connected * @force_timing_sync: set via debugfs. When set, indicates that all connected
* displays will be forced to synchronize. * displays will be forced to synchronize.
*/ */
@@ -324,7 +324,7 @@ struct amdgpu_display_manager {
struct drm_atomic_state *cached_state; struct drm_atomic_state *cached_state;
struct dc_state *cached_dc_state; struct dc_state *cached_dc_state;
struct dm_comressor_info compressor; struct dm_compressor_info compressor;
const struct firmware *fw_dmcu; const struct firmware *fw_dmcu;
uint32_t dmcu_fw_version; uint32_t dmcu_fw_version;

View File

@@ -256,6 +256,7 @@ enum rdma_ch_state {
* @rdma_cm: See below. * @rdma_cm: See below.
* @rdma_cm.cm_id: RDMA CM ID associated with the channel. * @rdma_cm.cm_id: RDMA CM ID associated with the channel.
* @cq: IB completion queue for this channel. * @cq: IB completion queue for this channel.
* @cq_size: Number of CQEs in @cq.
* @zw_cqe: Zero-length write CQE. * @zw_cqe: Zero-length write CQE.
* @rcu: RCU head. * @rcu: RCU head.
* @kref: kref for this channel. * @kref: kref for this channel.

View File

@@ -112,7 +112,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
u64 dma_end = 0; u64 dma_end = 0;
/* Determine the overall bounds of all DMA regions */ /* Determine the overall bounds of all DMA regions */
for (dma_start = ~0ULL; r->size; r++) { for (dma_start = ~0; r->size; r++) {
/* Take lower and upper limits */ /* Take lower and upper limits */
if (r->dma_start < dma_start) if (r->dma_start < dma_start)
dma_start = r->dma_start; dma_start = r->dma_start;

View File

@@ -24,7 +24,7 @@ description:
In addition, it is recommended to declare a mmc-pwrseq on SDIO host above In addition, it is recommended to declare a mmc-pwrseq on SDIO host above
WFx. Without it, you may encounter issues with warm boot. The mmc-pwrseq WFx. Without it, you may encounter issues with warm boot. The mmc-pwrseq
should be compatible with mmc-pwrseq-simple. Please consult should be compatible with mmc-pwrseq-simple. Please consult
Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt for more Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml for more
information. information.
For SPI':' For SPI':'

View File

@@ -484,7 +484,7 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd,
return 0; return 0;
} }
static inline int do_fontx_ioctl(int cmd, static inline int do_fontx_ioctl(struct vc_data *vc, int cmd,
struct consolefontdesc __user *user_cfd, struct consolefontdesc __user *user_cfd,
struct console_font_op *op) struct console_font_op *op)
{ {
@@ -502,15 +502,16 @@ static inline int do_fontx_ioctl(int cmd,
op->height = cfdarg.charheight; op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount; op->charcount = cfdarg.charcount;
op->data = cfdarg.chardata; op->data = cfdarg.chardata;
return con_font_op(vc_cons[fg_console].d, op); return con_font_op(vc, op);
case GIO_FONTX: {
case GIO_FONTX:
op->op = KD_FONT_OP_GET; op->op = KD_FONT_OP_GET;
op->flags = KD_FONT_FLAG_OLD; op->flags = KD_FONT_FLAG_OLD;
op->width = 8; op->width = 8;
op->height = cfdarg.charheight; op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount; op->charcount = cfdarg.charcount;
op->data = cfdarg.chardata; op->data = cfdarg.chardata;
i = con_font_op(vc_cons[fg_console].d, op); i = con_font_op(vc, op);
if (i) if (i)
return i; return i;
cfdarg.charheight = op->height; cfdarg.charheight = op->height;
@@ -519,11 +520,10 @@ static inline int do_fontx_ioctl(int cmd,
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
}
return -EINVAL; return -EINVAL;
} }
static int vt_io_fontreset(struct console_font_op *op) static int vt_io_fontreset(struct vc_data *vc, struct console_font_op *op)
{ {
int ret; int ret;
@@ -537,12 +537,12 @@ static int vt_io_fontreset(struct console_font_op *op)
op->op = KD_FONT_OP_SET_DEFAULT; op->op = KD_FONT_OP_SET_DEFAULT;
op->data = NULL; op->data = NULL;
ret = con_font_op(vc_cons[fg_console].d, op); ret = con_font_op(vc, op);
if (ret) if (ret)
return ret; return ret;
console_lock(); console_lock();
con_set_default_unimap(vc_cons[fg_console].d); con_set_default_unimap(vc);
console_unlock(); console_unlock();
return 0; return 0;
@@ -584,7 +584,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
op.height = 0; op.height = 0;
op.charcount = 256; op.charcount = 256;
op.data = up; op.data = up;
return con_font_op(vc_cons[fg_console].d, &op); return con_font_op(vc, &op);
case GIO_FONT: case GIO_FONT:
op.op = KD_FONT_OP_GET; op.op = KD_FONT_OP_GET;
@@ -593,7 +593,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
op.height = 32; op.height = 32;
op.charcount = 256; op.charcount = 256;
op.data = up; op.data = up;
return con_font_op(vc_cons[fg_console].d, &op); return con_font_op(vc, &op);
case PIO_CMAP: case PIO_CMAP:
if (!perm) if (!perm)
@@ -609,13 +609,13 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up,
fallthrough; fallthrough;
case GIO_FONTX: case GIO_FONTX:
return do_fontx_ioctl(cmd, up, &op); return do_fontx_ioctl(vc, cmd, up, &op);
case PIO_FONTRESET: case PIO_FONTRESET:
if (!perm) if (!perm)
return -EPERM; return -EPERM;
return vt_io_fontreset(&op); return vt_io_fontreset(vc, &op);
case PIO_SCRNMAP: case PIO_SCRNMAP:
if (!perm) if (!perm)
@@ -1066,7 +1066,8 @@ struct compat_consolefontdesc {
}; };
static inline int static inline int
compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, compat_fontx_ioctl(struct vc_data *vc, int cmd,
struct compat_consolefontdesc __user *user_cfd,
int perm, struct console_font_op *op) int perm, struct console_font_op *op)
{ {
struct compat_consolefontdesc cfdarg; struct compat_consolefontdesc cfdarg;
@@ -1085,7 +1086,8 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
op->height = cfdarg.charheight; op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount; op->charcount = cfdarg.charcount;
op->data = compat_ptr(cfdarg.chardata); op->data = compat_ptr(cfdarg.chardata);
return con_font_op(vc_cons[fg_console].d, op); return con_font_op(vc, op);
case GIO_FONTX: case GIO_FONTX:
op->op = KD_FONT_OP_GET; op->op = KD_FONT_OP_GET;
op->flags = KD_FONT_FLAG_OLD; op->flags = KD_FONT_FLAG_OLD;
@@ -1093,7 +1095,7 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd,
op->height = cfdarg.charheight; op->height = cfdarg.charheight;
op->charcount = cfdarg.charcount; op->charcount = cfdarg.charcount;
op->data = compat_ptr(cfdarg.chardata); op->data = compat_ptr(cfdarg.chardata);
i = con_font_op(vc_cons[fg_console].d, op); i = con_font_op(vc, op);
if (i) if (i)
return i; return i;
cfdarg.charheight = op->height; cfdarg.charheight = op->height;
@@ -1183,7 +1185,7 @@ long vt_compat_ioctl(struct tty_struct *tty,
*/ */
case PIO_FONTX: case PIO_FONTX:
case GIO_FONTX: case GIO_FONTX:
return compat_fontx_ioctl(cmd, up, perm, &op); return compat_fontx_ioctl(vc, cmd, up, perm, &op);
case KDFONTOP: case KDFONTOP:
return compat_kdfontop_ioctl(up, perm, &op, vc); return compat_kdfontop_ioctl(up, perm, &op, vc);

View File

@@ -148,11 +148,6 @@ static const struct xattr_handler afs_xattr_afs_acl_handler = {
.set = afs_xattr_set_acl, .set = afs_xattr_set_acl,
}; };
static void yfs_acl_put(struct afs_operation *op)
{
yfs_free_opaque_acl(op->yacl);
}
static const struct afs_operation_ops yfs_fetch_opaque_acl_operation = { static const struct afs_operation_ops yfs_fetch_opaque_acl_operation = {
.issue_yfs_rpc = yfs_fs_fetch_opaque_acl, .issue_yfs_rpc = yfs_fs_fetch_opaque_acl,
.success = afs_acl_success, .success = afs_acl_success,
@@ -246,7 +241,7 @@ error:
static const struct afs_operation_ops yfs_store_opaque_acl2_operation = { static const struct afs_operation_ops yfs_store_opaque_acl2_operation = {
.issue_yfs_rpc = yfs_fs_store_opaque_acl2, .issue_yfs_rpc = yfs_fs_store_opaque_acl2,
.success = afs_acl_success, .success = afs_acl_success,
.put = yfs_acl_put, .put = afs_acl_put,
}; };
/* /*

View File

@@ -1990,6 +1990,7 @@ void yfs_fs_store_opaque_acl2(struct afs_operation *op)
memcpy(bp, acl->data, acl->size); memcpy(bp, acl->data, acl->size);
if (acl->size != size) if (acl->size != size)
memset((void *)bp + acl->size, 0, size - acl->size); memset((void *)bp + acl->size, 0, size - acl->size);
bp += size / sizeof(__be32);
yfs_check_req(call, bp); yfs_check_req(call, bp);
trace_afs_make_fs_call(call, &vp->fid); trace_afs_make_fs_call(call, &vp->fid);

View File

@@ -1050,6 +1050,8 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) / oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
OOM_SCORE_ADJ_MAX; OOM_SCORE_ADJ_MAX;
put_task_struct(task); put_task_struct(task);
if (oom_adj > OOM_ADJUST_MAX)
oom_adj = OOM_ADJUST_MAX;
len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj); len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
return simple_read_from_buffer(buf, count, ppos, buffer, len); return simple_read_from_buffer(buf, count, ppos, buffer, len);
} }

View File

@@ -235,6 +235,8 @@ enum hctx_type {
* @flags: Zero or more BLK_MQ_F_* flags. * @flags: Zero or more BLK_MQ_F_* flags.
* @driver_data: Pointer to data owned by the block driver that created this * @driver_data: Pointer to data owned by the block driver that created this
* tag set. * tag set.
* @active_queues_shared_sbitmap:
* number of active request queues per tag set.
* @__bitmap_tags: A shared tags sbitmap, used over all hctx's * @__bitmap_tags: A shared tags sbitmap, used over all hctx's
* @__breserved_tags: * @__breserved_tags:
* A shared reserved tags sbitmap, used over all hctx's * A shared reserved tags sbitmap, used over all hctx's

View File

@@ -2760,6 +2760,15 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
#ifndef io_remap_pfn_range
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
}
#endif
static inline vm_fault_t vmf_error(int err) static inline vm_fault_t vmf_error(int err)
{ {
if (err == -ENOMEM) if (err == -ENOMEM)

View File

@@ -344,9 +344,9 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
/** /**
* find_lock_page - locate, pin and lock a pagecache page * find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search * @mapping: the address_space to search
* @offset: the page index * @index: the page index
* *
* Looks up the page cache entry at @mapping & @offset. If there is a * Looks up the page cache entry at @mapping & @index. If there is a
* page cache page, it is returned locked and with an increased * page cache page, it is returned locked and with an increased
* refcount. * refcount.
* *
@@ -363,9 +363,9 @@ static inline struct page *find_lock_page(struct address_space *mapping,
/** /**
* find_lock_head - Locate, pin and lock a pagecache page. * find_lock_head - Locate, pin and lock a pagecache page.
* @mapping: The address_space to search. * @mapping: The address_space to search.
* @offset: The page index. * @index: The page index.
* *
* Looks up the page cache entry at @mapping & @offset. If there is a * Looks up the page cache entry at @mapping & @index. If there is a
* page cache page, its head page is returned locked and with an increased * page cache page, its head page is returned locked and with an increased
* refcount. * refcount.
* *

View File

@@ -1427,10 +1427,6 @@ typedef unsigned int pgtbl_mod_mask;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
#define io_remap_pfn_range remap_pfn_range
#endif
#ifndef has_transparent_hugepage #ifndef has_transparent_hugepage
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define has_transparent_hugepage() 1 #define has_transparent_hugepage() 1

View File

@@ -147,16 +147,8 @@ typedef enum {
PHY_INTERFACE_MODE_MAX, PHY_INTERFACE_MODE_MAX,
} phy_interface_t; } phy_interface_t;
/** /*
* phy_supported_speeds - return all speeds currently supported by a PHY device * phy_supported_speeds - return all speeds currently supported by a PHY device
* @phy: The PHY device to return supported speeds of.
* @speeds: buffer to store supported speeds in.
* @size: size of speeds buffer.
*
* Description: Returns the number of supported speeds, and fills
* the speeds buffer with the supported speeds. If speeds buffer is
* too small to contain all currently supported speeds, will return as
* many speeds as can fit.
*/ */
unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds, unsigned int *speeds,
@@ -1022,14 +1014,9 @@ static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum,
regnum, mask, set); regnum, mask, set);
} }
/** /*
* phy_read_mmd - Convenience function for reading a register * phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY. * from an MMD on a given PHY.
* @phydev: The phy_device struct
* @devad: The MMD to read from
* @regnum: The register on the MMD to read
*
* Same rules as for phy_read();
*/ */
int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
@@ -1064,38 +1051,21 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
__ret; \ __ret; \
}) })
/** /*
* __phy_read_mmd - Convenience function for reading a register * __phy_read_mmd - Convenience function for reading a register
* from an MMD on a given PHY. * from an MMD on a given PHY.
* @phydev: The phy_device struct
* @devad: The MMD to read from
* @regnum: The register on the MMD to read
*
* Same rules as for __phy_read();
*/ */
int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
/** /*
* phy_write_mmd - Convenience function for writing a register * phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY. * on an MMD on a given PHY.
* @phydev: The phy_device struct
* @devad: The MMD to write to
* @regnum: The register on the MMD to read
* @val: value to write to @regnum
*
* Same rules as for phy_write();
*/ */
int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
/** /*
* __phy_write_mmd - Convenience function for writing a register * __phy_write_mmd - Convenience function for writing a register
* on an MMD on a given PHY. * on an MMD on a given PHY.
* @phydev: The phy_device struct
* @devad: The MMD to write to
* @regnum: The register on the MMD to read
* @val: value to write to @regnum
*
* Same rules as for __phy_write();
*/ */
int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);

View File

@@ -147,24 +147,6 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs); return atomic_read(&r->refs);
} }
/**
* refcount_add_not_zero - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
* @r: the refcount
*
* Will saturate at REFCOUNT_SATURATED and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*
* Return: false if the passed refcount is 0, true otherwise
*/
static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
{ {
int old = refcount_read(r); int old = refcount_read(r);
@@ -183,11 +165,42 @@ static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, in
return old; return old;
} }
/**
* refcount_add_not_zero - add a value to a refcount unless it is 0
* @i: the value to add to the refcount
* @r: the refcount
*
* Will saturate at REFCOUNT_SATURATED and WARN.
*
* Provides no memory ordering, it is assumed the caller has guaranteed the
* object memory to be stable (RCU, etc.). It does provide a control dependency
* and thereby orders future stores. See the comment on top.
*
* Use of this function is not recommended for the normal reference counting
* use case in which references are taken and released one at a time. In these
* cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count.
*
* Return: false if the passed refcount is 0, true otherwise
*/
static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
{ {
return __refcount_add_not_zero(i, r, NULL); return __refcount_add_not_zero(i, r, NULL);
} }
static inline void __refcount_add(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_add_relaxed(i, &r->refs);
if (oldp)
*oldp = old;
if (unlikely(!old))
refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
else if (unlikely(old < 0 || old + i < 0))
refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
/** /**
* refcount_add - add a value to a refcount * refcount_add - add a value to a refcount
* @i: the value to add to the refcount * @i: the value to add to the refcount
@@ -204,24 +217,16 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
* cases, refcount_inc(), or one of its variants, should instead be used to * cases, refcount_inc(), or one of its variants, should instead be used to
* increment a reference count. * increment a reference count.
*/ */
static inline void __refcount_add(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_add_relaxed(i, &r->refs);
if (oldp)
*oldp = old;
if (unlikely(!old))
refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
else if (unlikely(old < 0 || old + i < 0))
refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
static inline void refcount_add(int i, refcount_t *r) static inline void refcount_add(int i, refcount_t *r)
{ {
__refcount_add(i, r, NULL); __refcount_add(i, r, NULL);
} }
static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
{
return __refcount_add_not_zero(1, r, oldp);
}
/** /**
* refcount_inc_not_zero - increment a refcount unless it is 0 * refcount_inc_not_zero - increment a refcount unless it is 0
* @r: the refcount to increment * @r: the refcount to increment
@@ -235,16 +240,16 @@ static inline void refcount_add(int i, refcount_t *r)
* *
* Return: true if the increment was successful, false otherwise * Return: true if the increment was successful, false otherwise
*/ */
static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
{
return __refcount_add_not_zero(1, r, oldp);
}
static inline __must_check bool refcount_inc_not_zero(refcount_t *r) static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{ {
return __refcount_inc_not_zero(r, NULL); return __refcount_inc_not_zero(r, NULL);
} }
static inline void __refcount_inc(refcount_t *r, int *oldp)
{
__refcount_add(1, r, oldp);
}
/** /**
* refcount_inc - increment a refcount * refcount_inc - increment a refcount
* @r: the refcount to increment * @r: the refcount to increment
@@ -257,16 +262,29 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
* Will WARN if the refcount is 0, as this represents a possible use-after-free * Will WARN if the refcount is 0, as this represents a possible use-after-free
* condition. * condition.
*/ */
static inline void __refcount_inc(refcount_t *r, int *oldp)
{
__refcount_add(1, r, oldp);
}
static inline void refcount_inc(refcount_t *r) static inline void refcount_inc(refcount_t *r)
{ {
__refcount_inc(r, NULL); __refcount_inc(r, NULL);
} }
static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(i, &r->refs);
if (oldp)
*oldp = old;
if (old == i) {
smp_acquire__after_ctrl_dep();
return true;
}
if (unlikely(old < 0 || old - i < 0))
refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
return false;
}
/** /**
* refcount_sub_and_test - subtract from a refcount and test if it is 0 * refcount_sub_and_test - subtract from a refcount and test if it is 0
* @i: amount to subtract from the refcount * @i: amount to subtract from the refcount
@@ -287,29 +305,16 @@ static inline void refcount_inc(refcount_t *r)
* *
* Return: true if the resulting refcount is 0, false otherwise * Return: true if the resulting refcount is 0, false otherwise
*/ */
static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(i, &r->refs);
if (oldp)
*oldp = old;
if (old == i) {
smp_acquire__after_ctrl_dep();
return true;
}
if (unlikely(old < 0 || old - i < 0))
refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
return false;
}
static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
{ {
return __refcount_sub_and_test(i, r, NULL); return __refcount_sub_and_test(i, r, NULL);
} }
static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
{
return __refcount_sub_and_test(1, r, oldp);
}
/** /**
* refcount_dec_and_test - decrement a refcount and test if it is 0 * refcount_dec_and_test - decrement a refcount and test if it is 0
* @r: the refcount * @r: the refcount
@@ -323,16 +328,22 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
* *
* Return: true if the resulting refcount is 0, false otherwise * Return: true if the resulting refcount is 0, false otherwise
*/ */
static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
{
return __refcount_sub_and_test(1, r, oldp);
}
static inline __must_check bool refcount_dec_and_test(refcount_t *r) static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{ {
return __refcount_dec_and_test(r, NULL); return __refcount_dec_and_test(r, NULL);
} }
static inline void __refcount_dec(refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(1, &r->refs);
if (oldp)
*oldp = old;
if (unlikely(old <= 1))
refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
/** /**
* refcount_dec - decrement a refcount * refcount_dec - decrement a refcount
* @r: the refcount * @r: the refcount
@@ -343,17 +354,6 @@ static inline __must_check bool refcount_dec_and_test(refcount_t *r)
* Provides release memory ordering, such that prior loads and stores are done * Provides release memory ordering, such that prior loads and stores are done
* before. * before.
*/ */
static inline void __refcount_dec(refcount_t *r, int *oldp)
{
int old = atomic_fetch_sub_release(1, &r->refs);
if (oldp)
*oldp = old;
if (unlikely(old <= 1))
refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
static inline void refcount_dec(refcount_t *r) static inline void refcount_dec(refcount_t *r)
{ {
__refcount_dec(r, NULL); __refcount_dec(r, NULL);

View File

@@ -225,8 +225,7 @@ static long hung_timeout_jiffies(unsigned long last_checked,
* Process updating of timeout sysctl * Process updating of timeout sysctl
*/ */
int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
void __user *buffer, void *buffer, size_t *lenp, loff_t *ppos)
size_t *lenp, loff_t *ppos)
{ {
int ret; int ret;

View File

@@ -897,6 +897,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
/* Move the work from worker->delayed_work_list. */ /* Move the work from worker->delayed_work_list. */
WARN_ON_ONCE(list_empty(&work->node)); WARN_ON_ONCE(list_empty(&work->node));
list_del_init(&work->node); list_del_init(&work->node);
if (!work->canceling)
kthread_insert_work(worker, work, &worker->work_list); kthread_insert_work(worker, work, &worker->work_list);
raw_spin_unlock_irqrestore(&worker->lock, flags); raw_spin_unlock_irqrestore(&worker->lock, flags);

View File

@@ -391,16 +391,17 @@ static bool task_participate_group_stop(struct task_struct *task)
void task_join_group_stop(struct task_struct *task) void task_join_group_stop(struct task_struct *task)
{ {
/* Have the new thread join an on-going signal group stop */ unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
unsigned long jobctl = current->jobctl;
if (jobctl & JOBCTL_STOP_PENDING) {
struct signal_struct *sig = current->signal; struct signal_struct *sig = current->signal;
unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; if (sig->group_stop_count) {
if (task_set_jobctl_pending(task, signr | gstop)) {
sig->group_stop_count++; sig->group_stop_count++;
} mask |= JOBCTL_STOP_CONSUME;
} } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
return;
/* Have the new thread join an on-going signal group stop */
task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
} }
/* /*

View File

@@ -683,7 +683,6 @@ static int __init crc32c_test(void)
/* reduce OS noise */ /* reduce OS noise */
local_irq_save(flags); local_irq_save(flags);
local_irq_disable();
nsec = ktime_get_ns(); nsec = ktime_get_ns();
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
@@ -694,7 +693,6 @@ static int __init crc32c_test(void)
nsec = ktime_get_ns() - nsec; nsec = ktime_get_ns() - nsec;
local_irq_restore(flags); local_irq_restore(flags);
local_irq_enable();
pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
@@ -768,7 +766,6 @@ static int __init crc32_test(void)
/* reduce OS noise */ /* reduce OS noise */
local_irq_save(flags); local_irq_save(flags);
local_irq_disable();
nsec = ktime_get_ns(); nsec = ktime_get_ns();
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
@@ -783,7 +780,6 @@ static int __init crc32_test(void)
nsec = ktime_get_ns() - nsec; nsec = ktime_get_ns() - nsec;
local_irq_restore(flags); local_irq_restore(flags);
local_irq_enable();
pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
CRC_LE_BITS, CRC_BE_BITS); CRC_LE_BITS, CRC_BE_BITS);

View File

@@ -216,6 +216,12 @@ static void kmalloc_oob_16(struct kunit *test)
u64 words[2]; u64 words[2];
} *ptr1, *ptr2; } *ptr1, *ptr2;
/* This test is specifically crafted for the generic mode. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
return;
}
ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
@@ -227,6 +233,23 @@ static void kmalloc_oob_16(struct kunit *test)
kfree(ptr2); kfree(ptr2);
} }
static void kmalloc_uaf_16(struct kunit *test)
{
struct {
u64 words[2];
} *ptr1, *ptr2;
ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
kfree(ptr2);
KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
kfree(ptr1);
}
static void kmalloc_oob_memset_2(struct kunit *test) static void kmalloc_oob_memset_2(struct kunit *test)
{ {
char *ptr; char *ptr;
@@ -429,6 +452,12 @@ static void kasan_global_oob(struct kunit *test)
volatile int i = 3; volatile int i = 3;
char *p = &global_array[ARRAY_SIZE(global_array) + i]; char *p = &global_array[ARRAY_SIZE(global_array) + i];
/* Only generic mode instruments globals. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required");
return;
}
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
} }
@@ -467,6 +496,12 @@ static void kasan_alloca_oob_left(struct kunit *test)
char alloca_array[i]; char alloca_array[i];
char *p = alloca_array - 1; char *p = alloca_array - 1;
/* Only generic mode instruments dynamic allocas. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required");
return;
}
if (!IS_ENABLED(CONFIG_KASAN_STACK)) { if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
kunit_info(test, "CONFIG_KASAN_STACK is not enabled"); kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return; return;
@@ -481,6 +516,12 @@ static void kasan_alloca_oob_right(struct kunit *test)
char alloca_array[i]; char alloca_array[i];
char *p = alloca_array + i; char *p = alloca_array + i;
/* Only generic mode instruments dynamic allocas. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required");
return;
}
if (!IS_ENABLED(CONFIG_KASAN_STACK)) { if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
kunit_info(test, "CONFIG_KASAN_STACK is not enabled"); kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return; return;
@@ -551,6 +592,9 @@ static void kasan_memchr(struct kunit *test)
return; return;
} }
if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -573,6 +617,9 @@ static void kasan_memcmp(struct kunit *test)
return; return;
} }
if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset(arr, 0, sizeof(arr)); memset(arr, 0, sizeof(arr));
@@ -619,13 +666,50 @@ static void kasan_strings(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1)); KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
} }
static void kasan_bitops(struct kunit *test) static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
{ {
KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
}
static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
{
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
#if defined(clear_bit_unlock_is_negative_byte)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
clear_bit_unlock_is_negative_byte(nr, addr));
#endif
}
static void kasan_bitops_generic(struct kunit *test)
{
long *bits;
/* This test is specifically crafted for the generic mode. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
return;
}
/* /*
* Allocate 1 more byte, which causes kzalloc to round up to 16-bytes; * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
* this way we do not actually corrupt other memory. * this way we do not actually corrupt other memory.
*/ */
long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
/* /*
@@ -633,55 +717,34 @@ static void kasan_bitops(struct kunit *test)
* below accesses are still out-of-bounds, since bitops are defined to * below accesses are still out-of-bounds, since bitops are defined to
* operate on the whole long the bit is in. * operate on the whole long the bit is in.
*/ */
KUNIT_EXPECT_KASAN_FAIL(test, set_bit(BITS_PER_LONG, bits)); kasan_bitops_modify(test, BITS_PER_LONG, bits);
KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, change_bit(BITS_PER_LONG, bits));
KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(BITS_PER_LONG, bits));
/* /*
* Below calls try to access bit beyond allocated memory. * Below calls try to access bit beyond allocated memory.
*/ */
KUNIT_EXPECT_KASAN_FAIL(test, kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
KUNIT_EXPECT_KASAN_FAIL(test, kfree(bits);
__test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); }
KUNIT_EXPECT_KASAN_FAIL(test, static void kasan_bitops_tags(struct kunit *test)
test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits)); {
long *bits;
KUNIT_EXPECT_KASAN_FAIL(test, /* This test is specifically crafted for the tag-based mode. */
test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n");
return;
}
KUNIT_EXPECT_KASAN_FAIL(test, /* Allocation size will be rounded to up granule size, which is 16. */
__test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); bits = kzalloc(sizeof(*bits), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
KUNIT_EXPECT_KASAN_FAIL(test, /* Do the accesses past the 16 allocated bytes. */
test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);
KUNIT_EXPECT_KASAN_FAIL(test,
__test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
KUNIT_EXPECT_KASAN_FAIL(test,
kasan_int_result =
test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
#if defined(clear_bit_unlock_is_negative_byte)
KUNIT_EXPECT_KASAN_FAIL(test,
kasan_int_result = clear_bit_unlock_is_negative_byte(
BITS_PER_LONG + BITS_PER_BYTE, bits));
#endif
kfree(bits); kfree(bits);
} }
@@ -728,6 +791,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_krealloc_more), KUNIT_CASE(kmalloc_oob_krealloc_more),
KUNIT_CASE(kmalloc_oob_krealloc_less), KUNIT_CASE(kmalloc_oob_krealloc_less),
KUNIT_CASE(kmalloc_oob_16), KUNIT_CASE(kmalloc_oob_16),
KUNIT_CASE(kmalloc_uaf_16),
KUNIT_CASE(kmalloc_oob_in_memset), KUNIT_CASE(kmalloc_oob_in_memset),
KUNIT_CASE(kmalloc_oob_memset_2), KUNIT_CASE(kmalloc_oob_memset_2),
KUNIT_CASE(kmalloc_oob_memset_4), KUNIT_CASE(kmalloc_oob_memset_4),
@@ -751,7 +815,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_memchr), KUNIT_CASE(kasan_memchr),
KUNIT_CASE(kasan_memcmp), KUNIT_CASE(kasan_memcmp),
KUNIT_CASE(kasan_strings), KUNIT_CASE(kasan_strings),
KUNIT_CASE(kasan_bitops), KUNIT_CASE(kasan_bitops_generic),
KUNIT_CASE(kasan_bitops_tags),
KUNIT_CASE(kmalloc_double_kzfree), KUNIT_CASE(kmalloc_double_kzfree),
KUNIT_CASE(vmalloc_oob), KUNIT_CASE(vmalloc_oob),
{} {}

View File

@@ -648,6 +648,8 @@ retry:
} }
del += t - f; del += t - f;
hugetlb_cgroup_uncharge_file_region(
resv, rg, t - f);
/* New entry for end of split region */ /* New entry for end of split region */
nrg->from = t; nrg->from = t;
@@ -660,9 +662,6 @@ retry:
/* Original entry is trimmed */ /* Original entry is trimmed */
rg->to = f; rg->to = f;
hugetlb_cgroup_uncharge_file_region(
resv, rg, nrg->to - nrg->from);
list_add(&nrg->link, &rg->link); list_add(&nrg->link, &rg->link);
nrg = NULL; nrg = NULL;
break; break;
@@ -678,17 +677,17 @@ retry:
} }
if (f <= rg->from) { /* Trim beginning of region */ if (f <= rg->from) { /* Trim beginning of region */
del += t - rg->from;
rg->from = t;
hugetlb_cgroup_uncharge_file_region(resv, rg, hugetlb_cgroup_uncharge_file_region(resv, rg,
t - rg->from); t - rg->from);
} else { /* Trim end of region */
del += rg->to - f;
rg->to = f;
del += t - rg->from;
rg->from = t;
} else { /* Trim end of region */
hugetlb_cgroup_uncharge_file_region(resv, rg, hugetlb_cgroup_uncharge_file_region(resv, rg,
rg->to - f); rg->to - f);
del += rg->to - f;
rg->to = f;
} }
} }
@@ -2443,6 +2442,9 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
rsv_adjust = hugepage_subpool_put_pages(spool, 1); rsv_adjust = hugepage_subpool_put_pages(spool, 1);
hugetlb_acct_memory(h, -rsv_adjust); hugetlb_acct_memory(h, -rsv_adjust);
if (deferred_reserve)
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
pages_per_huge_page(h), page);
} }
return page; return page;

View File

@@ -4110,11 +4110,17 @@ static int memcg_stat_show(struct seq_file *m, void *v)
(u64)memsw * PAGE_SIZE); (u64)memsw * PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
unsigned long nr;
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
continue; continue;
nr = memcg_page_state(memcg, memcg1_stats[i]);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (memcg1_stats[i] == NR_ANON_THPS)
nr *= HPAGE_PMD_NR;
#endif
seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
(u64)memcg_page_state(memcg, memcg1_stats[i]) * (u64)nr * PAGE_SIZE);
PAGE_SIZE);
} }
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
@@ -5339,17 +5345,22 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
memcg->swappiness = mem_cgroup_swappiness(parent); memcg->swappiness = mem_cgroup_swappiness(parent);
memcg->oom_kill_disable = parent->oom_kill_disable; memcg->oom_kill_disable = parent->oom_kill_disable;
} }
if (parent && parent->use_hierarchy) { if (!parent) {
page_counter_init(&memcg->memory, NULL);
page_counter_init(&memcg->swap, NULL);
page_counter_init(&memcg->kmem, NULL);
page_counter_init(&memcg->tcpmem, NULL);
} else if (parent->use_hierarchy) {
memcg->use_hierarchy = true; memcg->use_hierarchy = true;
page_counter_init(&memcg->memory, &parent->memory); page_counter_init(&memcg->memory, &parent->memory);
page_counter_init(&memcg->swap, &parent->swap); page_counter_init(&memcg->swap, &parent->swap);
page_counter_init(&memcg->kmem, &parent->kmem); page_counter_init(&memcg->kmem, &parent->kmem);
page_counter_init(&memcg->tcpmem, &parent->tcpmem); page_counter_init(&memcg->tcpmem, &parent->tcpmem);
} else { } else {
page_counter_init(&memcg->memory, NULL); page_counter_init(&memcg->memory, &root_mem_cgroup->memory);
page_counter_init(&memcg->swap, NULL); page_counter_init(&memcg->swap, &root_mem_cgroup->swap);
page_counter_init(&memcg->kmem, NULL); page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem);
page_counter_init(&memcg->tcpmem, NULL); page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem);
/* /*
* Deeper hierachy with use_hierarchy == false doesn't make * Deeper hierachy with use_hierarchy == false doesn't make
* much sense so let cgroup subsystem know about this * much sense so let cgroup subsystem know about this

View File

@@ -525,7 +525,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long flags = qp->flags; unsigned long flags = qp->flags;
int ret; int ret;
bool has_unmovable = false; bool has_unmovable = false;
pte_t *pte; pte_t *pte, *mapped_pte;
spinlock_t *ptl; spinlock_t *ptl;
ptl = pmd_trans_huge_lock(pmd, vma); ptl = pmd_trans_huge_lock(pmd, vma);
@@ -539,7 +539,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
if (pmd_trans_unstable(pmd)) if (pmd_trans_unstable(pmd))
return 0; return 0;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) { for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte)) if (!pte_present(*pte))
continue; continue;
@@ -571,7 +571,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
} else } else
break; break;
} }
pte_unmap_unlock(pte - 1, ptl); pte_unmap_unlock(mapped_pte, ptl);
cond_resched(); cond_resched();
if (has_unmovable) if (has_unmovable)

View File

@@ -41,28 +41,24 @@ EXPORT_SYMBOL_GPL(memremap_compat_align);
DEFINE_STATIC_KEY_FALSE(devmap_managed_key); DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
EXPORT_SYMBOL(devmap_managed_key); EXPORT_SYMBOL(devmap_managed_key);
static void devmap_managed_enable_put(void) static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
{ {
if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
pgmap->type == MEMORY_DEVICE_FS_DAX)
static_branch_dec(&devmap_managed_key); static_branch_dec(&devmap_managed_key);
} }
static int devmap_managed_enable_get(struct dev_pagemap *pgmap) static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
{ {
if (pgmap->type == MEMORY_DEVICE_PRIVATE && if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
(!pgmap->ops || !pgmap->ops->page_free)) { pgmap->type == MEMORY_DEVICE_FS_DAX)
WARN(1, "Missing page_free method\n");
return -EINVAL;
}
static_branch_inc(&devmap_managed_key); static_branch_inc(&devmap_managed_key);
return 0;
} }
#else #else
static int devmap_managed_enable_get(struct dev_pagemap *pgmap) static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
{ {
return -EINVAL;
} }
static void devmap_managed_enable_put(void) static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
{ {
} }
#endif /* CONFIG_DEV_PAGEMAP_OPS */ #endif /* CONFIG_DEV_PAGEMAP_OPS */
@@ -169,7 +165,7 @@ void memunmap_pages(struct dev_pagemap *pgmap)
pageunmap_range(pgmap, i); pageunmap_range(pgmap, i);
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
devmap_managed_enable_put(); devmap_managed_enable_put(pgmap);
} }
EXPORT_SYMBOL_GPL(memunmap_pages); EXPORT_SYMBOL_GPL(memunmap_pages);
@@ -307,7 +303,6 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
.pgprot = PAGE_KERNEL, .pgprot = PAGE_KERNEL,
}; };
const int nr_range = pgmap->nr_range; const int nr_range = pgmap->nr_range;
bool need_devmap_managed = true;
int error, i; int error, i;
if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) if (WARN_ONCE(!nr_range, "nr_range must be specified\n"))
@@ -323,6 +318,10 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
WARN(1, "Missing migrate_to_ram method\n"); WARN(1, "Missing migrate_to_ram method\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (!pgmap->ops->page_free) {
WARN(1, "Missing page_free method\n");
return ERR_PTR(-EINVAL);
}
if (!pgmap->owner) { if (!pgmap->owner) {
WARN(1, "Missing owner\n"); WARN(1, "Missing owner\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
@@ -336,11 +335,9 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
} }
break; break;
case MEMORY_DEVICE_GENERIC: case MEMORY_DEVICE_GENERIC:
need_devmap_managed = false;
break; break;
case MEMORY_DEVICE_PCI_P2PDMA: case MEMORY_DEVICE_PCI_P2PDMA:
params.pgprot = pgprot_noncached(params.pgprot); params.pgprot = pgprot_noncached(params.pgprot);
need_devmap_managed = false;
break; break;
default: default:
WARN(1, "Invalid pgmap type %d\n", pgmap->type); WARN(1, "Invalid pgmap type %d\n", pgmap->type);
@@ -364,11 +361,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
} }
} }
if (need_devmap_managed) { devmap_managed_enable_get(pgmap);
error = devmap_managed_enable_get(pgmap);
if (error)
return ERR_PTR(error);
}
/* /*
* Clear the pgmap nr_range as it will be incremented for each * Clear the pgmap nr_range as it will be incremented for each

View File

@@ -528,7 +528,7 @@ void truncate_inode_pages_final(struct address_space *mapping)
} }
EXPORT_SYMBOL(truncate_inode_pages_final); EXPORT_SYMBOL(truncate_inode_pages_final);
unsigned long __invalidate_mapping_pages(struct address_space *mapping, static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) pgoff_t start, pgoff_t end, unsigned long *nr_pagevec)
{ {
pgoff_t indices[PAGEVEC_SIZE]; pgoff_t indices[PAGEVEC_SIZE];

View File

@@ -1092,8 +1092,12 @@ sub output_struct_rst(%) {
print "\n\n.. c:type:: " . $name . "\n\n"; print "\n\n.. c:type:: " . $name . "\n\n";
} else { } else {
my $name = $args{'struct'}; my $name = $args{'struct'};
if ($args{'type'} eq 'union') {
print "\n\n.. c:union:: " . $name . "\n\n";
} else {
print "\n\n.. c:struct:: " . $name . "\n\n"; print "\n\n.. c:struct:: " . $name . "\n\n";
} }
}
print_lineno($declaration_start_line); print_lineno($declaration_start_line);
$lineprefix = " "; $lineprefix = " ";
output_highlight_rst($args{'purpose'}); output_highlight_rst($args{'purpose'});
@@ -1427,20 +1431,25 @@ sub dump_enum($$) {
} }
} }
my $typedef_type = qr { ((?:\s+[\w\*]+){1,8})\s* }x;
my $typedef_ident = qr { \*?\s*(\w\S+)\s* }x;
my $typedef_args = qr { \s*\((.*)\); }x;
my $typedef1 = qr { typedef$typedef_type\($typedef_ident\)$typedef_args }x;
my $typedef2 = qr { typedef$typedef_type$typedef_ident$typedef_args }x;
sub dump_typedef($$) { sub dump_typedef($$) {
my $x = shift; my $x = shift;
my $file = shift; my $file = shift;
$x =~ s@/\*.*?\*/@@gos; # strip comments. $x =~ s@/\*.*?\*/@@gos; # strip comments.
# Parse function prototypes # Parse function typedef prototypes
if ($x =~ /typedef\s+(\w+)\s*\(\*\s*(\w\S+)\s*\)\s*\((.*)\);/ || if ($x =~ $typedef1 || $x =~ $typedef2) {
$x =~ /typedef\s+(\w+)\s*(\w\S+)\s*\s*\((.*)\);/) {
# Function typedefs
$return_type = $1; $return_type = $1;
$declaration_name = $2; $declaration_name = $2;
my $args = $3; my $args = $3;
$return_type =~ s/^\s+//;
create_parameterlist($args, ',', $file, $declaration_name); create_parameterlist($args, ',', $file, $declaration_name);

View File

@@ -159,6 +159,21 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot { struct kvm_arch_memory_slot {
}; };
/*
* PMU filter structure. Describe a range of events with a particular
* action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER.
*/
struct kvm_pmu_event_filter {
__u16 base_event;
__u16 nevents;
#define KVM_PMU_EVENT_ALLOW 0
#define KVM_PMU_EVENT_DENY 1
__u8 action;
__u8 pad[3];
};
/* for KVM_GET/SET_VCPU_EVENTS */ /* for KVM_GET/SET_VCPU_EVENTS */
struct kvm_vcpu_events { struct kvm_vcpu_events {
struct { struct {
@@ -242,6 +257,15 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
/*
* Only two states can be presented by the host kernel:
* - NOT_REQUIRED: the guest doesn't need to do anything
* - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
*
* All the other values are deprecated. The host still accepts all
* values (they are ABI), but will narrow them to the above two.
*/
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2) #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
@@ -329,6 +353,7 @@ struct kvm_vcpu_events {
#define KVM_ARM_VCPU_PMU_V3_CTRL 0 #define KVM_ARM_VCPU_PMU_V3_CTRL 0
#define KVM_ARM_VCPU_PMU_V3_IRQ 0 #define KVM_ARM_VCPU_PMU_V3_IRQ 0
#define KVM_ARM_VCPU_PMU_V3_INIT 1 #define KVM_ARM_VCPU_PMU_V3_INIT 1
#define KVM_ARM_VCPU_PMU_V3_FILTER 2
#define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1

View File

@@ -29,7 +29,7 @@
{ 0x13, "SIGP conditional emergency signal" }, \ { 0x13, "SIGP conditional emergency signal" }, \
{ 0x15, "SIGP sense running" }, \ { 0x15, "SIGP sense running" }, \
{ 0x16, "SIGP set multithreading"}, \ { 0x16, "SIGP set multithreading"}, \
{ 0x17, "SIGP store additional status ait address"} { 0x17, "SIGP store additional status at address"}
#define icpt_prog_codes \ #define icpt_prog_codes \
{ 0x0001, "Prog Operation" }, \ { 0x0001, "Prog Operation" }, \

View File

@@ -96,7 +96,7 @@
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
/* free ( 3*32+17) */ #define X86_FEATURE_SME_COHERENT ( 3*32+17) /* "" AMD hardware-enforced cache coherency */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
@@ -236,6 +236,7 @@
#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */ #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */ #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */ #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
#define X86_FEATURE_SEV_ES ( 8*32+20) /* AMD Secure Encrypted Virtualization - Encrypted State */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
@@ -288,6 +289,7 @@
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */ #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */ #define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
@@ -353,6 +355,7 @@
#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ #define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */ #define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
#define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
@@ -368,6 +371,7 @@
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */ #define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */ #define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */

View File

@@ -56,6 +56,12 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif #endif
#ifdef CONFIG_IOMMU_SUPPORT
# define DISABLE_ENQCMD 0
#else
# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
#endif
/* /*
* Make sure to add features to the correct mask * Make sure to add features to the correct mask
*/ */
@@ -75,7 +81,8 @@
#define DISABLED_MASK13 0 #define DISABLED_MASK13 0
#define DISABLED_MASK14 0 #define DISABLED_MASK14 0
#define DISABLED_MASK15 0 #define DISABLED_MASK15 0
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP) #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \
DISABLE_ENQCMD)
#define DISABLED_MASK17 0 #define DISABLED_MASK17 0
#define DISABLED_MASK18 0 #define DISABLED_MASK18 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)

View File

@@ -257,6 +257,9 @@
#define MSR_IA32_LASTINTFROMIP 0x000001dd #define MSR_IA32_LASTINTFROMIP 0x000001dd
#define MSR_IA32_LASTINTTOIP 0x000001de #define MSR_IA32_LASTINTTOIP 0x000001de
#define MSR_IA32_PASID 0x00000d93
#define MSR_IA32_PASID_VALID BIT_ULL(31)
/* DEBUGCTLMSR bits (others vary by model): */ /* DEBUGCTLMSR bits (others vary by model): */
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ #define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
#define DEBUGCTLMSR_BTF_SHIFT 1 #define DEBUGCTLMSR_BTF_SHIFT 1
@@ -464,11 +467,15 @@
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) #define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
#define MSR_AMD64_IBSCTL 0xc001103a #define MSR_AMD64_IBSCTL 0xc001103a
#define MSR_AMD64_IBSBRTARGET 0xc001103b #define MSR_AMD64_IBSBRTARGET 0xc001103b
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d #define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131 #define MSR_AMD64_SEV 0xc0010131
#define MSR_AMD64_SEV_ENABLED_BIT 0 #define MSR_AMD64_SEV_ENABLED_BIT 0
#define MSR_AMD64_SEV_ES_ENABLED_BIT 1
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT) #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
@@ -857,11 +864,14 @@
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309 #define MSR_CORE_PERF_FIXED_CTR0 0x00000309
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a #define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b #define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
#define MSR_CORE_PERF_FIXED_CTR3 0x0000030c
#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e #define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f #define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
#define MSR_PERF_METRICS 0x00000329
/* PERF_GLOBAL_OVF_CTL bits */ /* PERF_GLOBAL_OVF_CTL bits */
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT 55 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT 55
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT) #define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT)

View File

@@ -54,7 +54,7 @@
#endif #endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT_XXL
/* Paravirtualized systems may not have PSE or PGE available */ /* Paravirtualized systems may not have PSE or PGE available */
#define NEED_PSE 0 #define NEED_PSE 0
#define NEED_PGE 0 #define NEED_PGE 0

View File

@@ -192,6 +192,26 @@ struct kvm_msr_list {
__u32 indices[0]; __u32 indices[0];
}; };
/* Maximum size of any access bitmap in bytes */
#define KVM_MSR_FILTER_MAX_BITMAP_SIZE 0x600
/* for KVM_X86_SET_MSR_FILTER */
struct kvm_msr_filter_range {
#define KVM_MSR_FILTER_READ (1 << 0)
#define KVM_MSR_FILTER_WRITE (1 << 1)
__u32 flags;
__u32 nmsrs; /* number of msrs in bitmap */
__u32 base; /* MSR index the bitmap starts at */
__u8 *bitmap; /* a 1 bit allows the operations in flags, 0 denies */
};
#define KVM_MSR_FILTER_MAX_RANGES 16
struct kvm_msr_filter {
#define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0)
#define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0)
__u32 flags;
struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES];
};
struct kvm_cpuid_entry { struct kvm_cpuid_entry {
__u32 function; __u32 function;

View File

@@ -29,6 +29,7 @@
#define SVM_EXIT_WRITE_DR6 0x036 #define SVM_EXIT_WRITE_DR6 0x036
#define SVM_EXIT_WRITE_DR7 0x037 #define SVM_EXIT_WRITE_DR7 0x037
#define SVM_EXIT_EXCP_BASE 0x040 #define SVM_EXIT_EXCP_BASE 0x040
#define SVM_EXIT_LAST_EXCP 0x05f
#define SVM_EXIT_INTR 0x060 #define SVM_EXIT_INTR 0x060
#define SVM_EXIT_NMI 0x061 #define SVM_EXIT_NMI 0x061
#define SVM_EXIT_SMI 0x062 #define SVM_EXIT_SMI 0x062
@@ -76,10 +77,21 @@
#define SVM_EXIT_MWAIT_COND 0x08c #define SVM_EXIT_MWAIT_COND 0x08c
#define SVM_EXIT_XSETBV 0x08d #define SVM_EXIT_XSETBV 0x08d
#define SVM_EXIT_RDPRU 0x08e #define SVM_EXIT_RDPRU 0x08e
#define SVM_EXIT_INVPCID 0x0a2
#define SVM_EXIT_NPF 0x400 #define SVM_EXIT_NPF 0x400
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401 #define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402
/* SEV-ES software-defined VMGEXIT events */
#define SVM_VMGEXIT_MMIO_READ 0x80000001
#define SVM_VMGEXIT_MMIO_WRITE 0x80000002
#define SVM_VMGEXIT_NMI_COMPLETE 0x80000003
#define SVM_VMGEXIT_AP_HLT_LOOP 0x80000004
#define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005
#define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0
#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1
#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
#define SVM_EXIT_ERR -1 #define SVM_EXIT_ERR -1
#define SVM_EXIT_REASONS \ #define SVM_EXIT_REASONS \
@@ -171,6 +183,7 @@
{ SVM_EXIT_MONITOR, "monitor" }, \ { SVM_EXIT_MONITOR, "monitor" }, \
{ SVM_EXIT_MWAIT, "mwait" }, \ { SVM_EXIT_MWAIT, "mwait" }, \
{ SVM_EXIT_XSETBV, "xsetbv" }, \ { SVM_EXIT_XSETBV, "xsetbv" }, \
{ SVM_EXIT_INVPCID, "invpcid" }, \
{ SVM_EXIT_NPF, "npf" }, \ { SVM_EXIT_NPF, "npf" }, \
{ SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \ { SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \
{ SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \ { SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \

View File

@@ -185,7 +185,6 @@ int main(int argc, char *argv[])
main_test_libperl(); main_test_libperl();
main_test_hello(); main_test_hello();
main_test_libelf(); main_test_libelf();
main_test_libelf_mmap();
main_test_get_current_dir_name(); main_test_get_current_dir_name();
main_test_gettid(); main_test_gettid();
main_test_glibc(); main_test_glibc();

View File

@@ -27,18 +27,6 @@
#define __pure __attribute__((pure)) #define __pure __attribute__((pure))
#endif #endif
#define noinline __attribute__((noinline)) #define noinline __attribute__((noinline))
#ifdef __has_attribute
#if __has_attribute(disable_tail_calls)
#define __no_tail_call __attribute__((disable_tail_calls))
#endif
#endif
#ifndef __no_tail_call
#if GCC_VERSION > 40201
#define __no_tail_call __attribute__((optimize("no-optimize-sibling-calls")))
#else
#define __no_tail_call
#endif
#endif
#ifndef __packed #ifndef __packed
#define __packed __attribute__((packed)) #define __packed __attribute__((packed))
#endif #endif

View File

@@ -47,9 +47,6 @@
#ifndef noinline #ifndef noinline
#define noinline #define noinline
#endif #endif
#ifndef __no_tail_call
#define __no_tail_call
#endif
/* Are two types/vars the same type (ignoring qualifiers)? */ /* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type #ifndef __same_type

View File

@@ -857,9 +857,11 @@ __SYSCALL(__NR_openat2, sys_openat2)
__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd) __SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
#define __NR_faccessat2 439 #define __NR_faccessat2 439
__SYSCALL(__NR_faccessat2, sys_faccessat2) __SYSCALL(__NR_faccessat2, sys_faccessat2)
#define __NR_process_madvise 440
__SYSCALL(__NR_process_madvise, sys_process_madvise)
#undef __NR_syscalls #undef __NR_syscalls
#define __NR_syscalls 440 #define __NR_syscalls 441
/* /*
* 32 bit systems traditionally used different * 32 bit systems traditionally used different

View File

@@ -619,6 +619,12 @@ typedef struct drm_i915_irq_wait {
*/ */
#define I915_PARAM_PERF_REVISION 54 #define I915_PARAM_PERF_REVISION 54
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
* timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
* I915_EXEC_USE_EXTENSIONS.
*/
#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
/* Must be kept compact -- no holes and well documented */ /* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam { typedef struct drm_i915_getparam {
@@ -1046,6 +1052,38 @@ struct drm_i915_gem_exec_fence {
__u32 flags; __u32 flags;
}; };
/**
* See drm_i915_gem_execbuffer_ext_timeline_fences.
*/
#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
/**
* This structure describes an array of drm_syncobj and associated points for
* timeline variants of drm_syncobj. It is invalid to append this structure to
* the execbuf if I915_EXEC_FENCE_ARRAY is set.
*/
struct drm_i915_gem_execbuffer_ext_timeline_fences {
struct i915_user_extension base;
/**
* Number of element in the handles_ptr & value_ptr arrays.
*/
__u64 fence_count;
/**
* Pointer to an array of struct drm_i915_gem_exec_fence of length
* fence_count.
*/
__u64 handles_ptr;
/**
* Pointer to an array of u64 values of length fence_count. Values
* must be 0 for a binary drm_syncobj. A Value of 0 for a timeline
* drm_syncobj is invalid as it turns a drm_syncobj into a binary one.
*/
__u64 values_ptr;
};
struct drm_i915_gem_execbuffer2 { struct drm_i915_gem_execbuffer2 {
/** /**
* List of gem_exec_object2 structs * List of gem_exec_object2 structs
@@ -1062,8 +1100,14 @@ struct drm_i915_gem_execbuffer2 {
__u32 num_cliprects; __u32 num_cliprects;
/** /**
* This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
* is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a * & I915_EXEC_USE_EXTENSIONS are not set.
* struct drm_i915_gem_exec_fence *fences. *
* If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
* of struct drm_i915_gem_exec_fence and num_cliprects is the length
* of the array.
*
* If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
* single struct i915_user_extension and num_cliprects is 0.
*/ */
__u64 cliprects_ptr; __u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (0x3f) #define I915_EXEC_RING_MASK (0x3f)
@@ -1181,7 +1225,16 @@ struct drm_i915_gem_execbuffer2 {
*/ */
#define I915_EXEC_FENCE_SUBMIT (1 << 20) #define I915_EXEC_FENCE_SUBMIT (1 << 20)
#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1)) /*
* Setting I915_EXEC_USE_EXTENSIONS implies that
* drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
* list of i915_user_extension. Each i915_user_extension node is the base of a
* larger structure. The list of supported structures are listed in the
* drm_i915_gem_execbuffer_ext enum.
*/
#define I915_EXEC_USE_EXTENSIONS (1 << 21)
#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \ #define i915_execbuffer2_set_context_id(eb2, context) \

View File

@@ -45,7 +45,6 @@ struct fscrypt_policy_v1 {
__u8 flags; __u8 flags;
__u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
}; };
#define fscrypt_policy fscrypt_policy_v1
/* /*
* Process-subscribed "logon" key description prefix and payload format. * Process-subscribed "logon" key description prefix and payload format.
@@ -156,9 +155,9 @@ struct fscrypt_get_key_status_arg {
__u32 __out_reserved[13]; __u32 __out_reserved[13];
}; };
#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) #define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy_v1)
#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) #define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) #define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy_v1)
#define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */ #define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */
#define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg) #define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg)
#define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) #define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg)
@@ -170,6 +169,7 @@ struct fscrypt_get_key_status_arg {
/* old names; don't add anything new here! */ /* old names; don't add anything new here! */
#ifndef __KERNEL__ #ifndef __KERNEL__
#define fscrypt_policy fscrypt_policy_v1
#define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE #define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE
#define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4 #define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4
#define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8 #define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8

View File

@@ -248,6 +248,8 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_IOAPIC_EOI 26 #define KVM_EXIT_IOAPIC_EOI 26
#define KVM_EXIT_HYPERV 27 #define KVM_EXIT_HYPERV 27
#define KVM_EXIT_ARM_NISV 28 #define KVM_EXIT_ARM_NISV 28
#define KVM_EXIT_X86_RDMSR 29
#define KVM_EXIT_X86_WRMSR 30
/* For KVM_EXIT_INTERNAL_ERROR */ /* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */ /* Emulate instruction failed. */
@@ -413,6 +415,17 @@ struct kvm_run {
__u64 esr_iss; __u64 esr_iss;
__u64 fault_ipa; __u64 fault_ipa;
} arm_nisv; } arm_nisv;
/* KVM_EXIT_X86_RDMSR / KVM_EXIT_X86_WRMSR */
struct {
__u8 error; /* user -> kernel */
__u8 pad[7];
#define KVM_MSR_EXIT_REASON_INVAL (1 << 0)
#define KVM_MSR_EXIT_REASON_UNKNOWN (1 << 1)
#define KVM_MSR_EXIT_REASON_FILTER (1 << 2)
__u32 reason; /* kernel -> user */
__u32 index; /* kernel -> user */
__u64 data; /* kernel <-> user */
} msr;
/* Fix the size of the union. */ /* Fix the size of the union. */
char padding[256]; char padding[256];
}; };
@@ -1037,6 +1050,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_SMALLER_MAXPHYADDR 185 #define KVM_CAP_SMALLER_MAXPHYADDR 185
#define KVM_CAP_S390_DIAG318 186 #define KVM_CAP_S390_DIAG318 186
#define KVM_CAP_STEAL_TIME 187 #define KVM_CAP_STEAL_TIME 187
#define KVM_CAP_X86_USER_SPACE_MSR 188
#define KVM_CAP_X86_MSR_FILTER 189
#define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
@@ -1538,6 +1554,9 @@ struct kvm_pv_cmd {
/* Available with KVM_CAP_S390_PROTECTED */ /* Available with KVM_CAP_S390_PROTECTED */
#define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd) #define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd)
/* Available with KVM_CAP_X86_MSR_FILTER */
#define KVM_X86_SET_MSR_FILTER _IOW(KVMIO, 0xc6, struct kvm_msr_filter)
/* Secure Encrypted Virtualization command */ /* Secure Encrypted Virtualization command */
enum sev_cmd_id { enum sev_cmd_id {
/* Guest initialization commands */ /* Guest initialization commands */

View File

@@ -27,6 +27,7 @@
#define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT #define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
#define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK #define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
#define MAP_HUGE_16KB HUGETLB_FLAG_ENCODE_16KB
#define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB #define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
#define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB #define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
#define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB #define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB

View File

@@ -16,6 +16,7 @@
#define MS_REMOUNT 32 /* Alter flags of a mounted FS */ #define MS_REMOUNT 32 /* Alter flags of a mounted FS */
#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ #define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
#define MS_DIRSYNC 128 /* Directory modifications are synchronous */ #define MS_DIRSYNC 128 /* Directory modifications are synchronous */
#define MS_NOSYMFOLLOW 256 /* Do not follow symlinks */
#define MS_NOATIME 1024 /* Do not update access times. */ #define MS_NOATIME 1024 /* Do not update access times. */
#define MS_NODIRATIME 2048 /* Do not update directory access times */ #define MS_NODIRATIME 2048 /* Do not update directory access times */
#define MS_BIND 4096 #define MS_BIND 4096

View File

@@ -233,6 +233,15 @@ struct prctl_mm_map {
#define PR_SET_TAGGED_ADDR_CTRL 55 #define PR_SET_TAGGED_ADDR_CTRL 55
#define PR_GET_TAGGED_ADDR_CTRL 56 #define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0) # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
/* MTE tag check fault modes */
# define PR_MTE_TCF_SHIFT 1
# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT)
# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
/* MTE tag inclusion mask */
# define PR_MTE_TAG_SHIFT 3
# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
/* Control reclaim behavior when allocating memory */ /* Control reclaim behavior when allocating memory */
#define PR_SET_IO_FLUSHER 57 #define PR_SET_IO_FLUSHER 57

View File

@@ -146,4 +146,8 @@
/* Set event fd for config interrupt*/ /* Set event fd for config interrupt*/
#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int) #define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int)
/* Get the valid iova range */
#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
struct vhost_vdpa_iova_range)
#endif #endif

View File

@@ -749,6 +749,7 @@ else
PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null) PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS)) PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS))
PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS)) PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)

View File

@@ -361,12 +361,13 @@
437 common openat2 sys_openat2 437 common openat2 sys_openat2
438 common pidfd_getfd sys_pidfd_getfd 438 common pidfd_getfd sys_pidfd_getfd
439 common faccessat2 sys_faccessat2 439 common faccessat2 sys_faccessat2
440 common process_madvise sys_process_madvise
# #
# x32-specific system call numbers start at 512 to avoid cache impact # Due to a historical design error, certain syscalls are numbered differently
# for native 64-bit operation. The __x32_compat_sys stubs are created # in x32 as compared to native x86_64. These syscalls have numbers 512-547.
# on-the-fly for compat_sys_*() compatibility system calls if X86_X32 # Do not add new syscalls to this range. Numbers 548 and above are available
# is defined. # for non-x32 use.
# #
512 x32 rt_sigaction compat_sys_rt_sigaction 512 x32 rt_sigaction compat_sys_rt_sigaction
513 x32 rt_sigreturn compat_sys_x32_rt_sigreturn 513 x32 rt_sigreturn compat_sys_x32_rt_sigreturn
@@ -404,3 +405,5 @@
545 x32 execveat compat_sys_execveat 545 x32 execveat compat_sys_execveat
546 x32 preadv2 compat_sys_preadv64v2 546 x32 preadv2 compat_sys_preadv64v2
547 x32 pwritev2 compat_sys_pwritev64v2 547 x32 pwritev2 compat_sys_pwritev64v2
# This is the end of the legacy x32 range. Numbers 548 and above are
# not special and are not to be used for x32-specific syscalls.

View File

@@ -4639,9 +4639,9 @@ do_concat:
err = 0; err = 0;
if (lists[0]) { if (lists[0]) {
struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event", struct option o = {
"event selector. use 'perf list' to list available events", .value = &trace->evlist,
parse_events_option); };
err = parse_events_option(&o, lists[0], 0); err = parse_events_option(&o, lists[0], 0);
} }
out: out:
@@ -4655,9 +4655,12 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u
{ {
struct trace *trace = opt->value; struct trace *trace = opt->value;
if (!list_empty(&trace->evlist->core.entries)) if (!list_empty(&trace->evlist->core.entries)) {
return parse_cgroups(opt, str, unset); struct option o = {
.value = &trace->evlist,
};
return parse_cgroups(&o, str, unset);
}
trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
return 0; return 0;

View File

@@ -329,7 +329,7 @@
}, },
{ {
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time", "MetricExpr": "( ( ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) * 1048576 ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW;SoC", "MetricGroup": "Memory_BW;SoC",
"MetricName": "DRAM_BW_Use" "MetricName": "DRAM_BW_Use"
}, },

View File

@@ -323,7 +323,7 @@
}, },
{ {
"BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
"MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time", "MetricExpr": "( ( ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) * 1048576 ) / 1000000000 ) / duration_time",
"MetricGroup": "Memory_BW;SoC", "MetricGroup": "Memory_BW;SoC",
"MetricName": "DRAM_BW_Use" "MetricName": "DRAM_BW_Use"
}, },

View File

@@ -95,7 +95,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
return strcmp((const char *) symbol, funcs[idx]); return strcmp((const char *) symbol, funcs[idx]);
} }
__no_tail_call noinline int test_dwarf_unwind__thread(struct thread *thread) noinline int test_dwarf_unwind__thread(struct thread *thread)
{ {
struct perf_sample sample; struct perf_sample sample;
unsigned long cnt = 0; unsigned long cnt = 0;
@@ -126,7 +126,7 @@ __no_tail_call noinline int test_dwarf_unwind__thread(struct thread *thread)
static int global_unwind_retval = -INT_MAX; static int global_unwind_retval = -INT_MAX;
__no_tail_call noinline int test_dwarf_unwind__compare(void *p1, void *p2) noinline int test_dwarf_unwind__compare(void *p1, void *p2)
{ {
/* Any possible value should be 'thread' */ /* Any possible value should be 'thread' */
struct thread *thread = *(struct thread **)p1; struct thread *thread = *(struct thread **)p1;
@@ -145,7 +145,7 @@ __no_tail_call noinline int test_dwarf_unwind__compare(void *p1, void *p2)
return p1 - p2; return p1 - p2;
} }
__no_tail_call noinline int test_dwarf_unwind__krava_3(struct thread *thread) noinline int test_dwarf_unwind__krava_3(struct thread *thread)
{ {
struct thread *array[2] = {thread, thread}; struct thread *array[2] = {thread, thread};
void *fp = &bsearch; void *fp = &bsearch;
@@ -164,12 +164,12 @@ __no_tail_call noinline int test_dwarf_unwind__krava_3(struct thread *thread)
return global_unwind_retval; return global_unwind_retval;
} }
__no_tail_call noinline int test_dwarf_unwind__krava_2(struct thread *thread) noinline int test_dwarf_unwind__krava_2(struct thread *thread)
{ {
return test_dwarf_unwind__krava_3(thread); return test_dwarf_unwind__krava_3(thread);
} }
__no_tail_call noinline int test_dwarf_unwind__krava_1(struct thread *thread) noinline int test_dwarf_unwind__krava_1(struct thread *thread)
{ {
return test_dwarf_unwind__krava_2(thread); return test_dwarf_unwind__krava_2(thread);
} }

View File

@@ -2963,7 +2963,7 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
struct popup_action actions[MAX_OPTIONS]; struct popup_action actions[MAX_OPTIONS];
int nr_options = 0; int nr_options = 0;
int key = -1; int key = -1;
char buf[64]; char buf[128];
int delay_secs = hbt ? hbt->refresh : 0; int delay_secs = hbt ? hbt->refresh : 0;
#define HIST_BROWSER_HELP_COMMON \ #define HIST_BROWSER_HELP_COMMON \

View File

@@ -102,6 +102,8 @@ int build_id__sprintf(const struct build_id *build_id, char *bf)
const u8 *raw = build_id->data; const u8 *raw = build_id->data;
size_t i; size_t i;
bf[0] = 0x0;
for (i = 0; i < build_id->size; ++i) { for (i = 0; i < build_id->size; ++i) {
sprintf(bid, "%02x", *raw); sprintf(bid, "%02x", *raw);
++raw; ++raw;

View File

@@ -15,6 +15,9 @@
/* make sure libbpf doesn't use kernel-only integer typedefs */ /* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
/* prevent accidental re-addition of reallocarray() */
#pragma GCC poison reallocarray
/* start with 4 buckets */ /* start with 4 buckets */
#define HASHMAP_MIN_CAP_BITS 2 #define HASHMAP_MIN_CAP_BITS 2

View File

@@ -25,6 +25,18 @@ static inline size_t hash_bits(size_t h, int bits)
#endif #endif
} }
/* generic C-string hashing function */
static inline size_t str_hash(const char *s)
{
size_t h = 0;
while (*s) {
h = h * 31 + *s;
s++;
}
return h;
}
typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx); typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx); typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx);

View File

@@ -786,11 +786,20 @@ static int machine__process_ksymbol_unregister(struct machine *machine,
union perf_event *event, union perf_event *event,
struct perf_sample *sample __maybe_unused) struct perf_sample *sample __maybe_unused)
{ {
struct symbol *sym;
struct map *map; struct map *map;
map = maps__find(&machine->kmaps, event->ksymbol.addr); map = maps__find(&machine->kmaps, event->ksymbol.addr);
if (map) if (!map)
return 0;
if (map != machine->vmlinux_map)
maps__remove(&machine->kmaps, map); maps__remove(&machine->kmaps, map);
else {
sym = dso__find_symbol(map->dso, map->map_ip(map, map->start));
if (sym)
dso__delete_symbol(map->dso, sym);
}
return 0; return 0;
} }

View File

@@ -1592,7 +1592,6 @@ static void _free_command_line(wchar_t **command_line, int num)
static int python_start_script(const char *script, int argc, const char **argv) static int python_start_script(const char *script, int argc, const char **argv)
{ {
struct tables *tables = &tables_global; struct tables *tables = &tables_global;
PyMODINIT_FUNC (*initfunc)(void);
#if PY_MAJOR_VERSION < 3 #if PY_MAJOR_VERSION < 3
const char **command_line; const char **command_line;
#else #else
@@ -1607,20 +1606,18 @@ static int python_start_script(const char *script, int argc, const char **argv)
FILE *fp; FILE *fp;
#if PY_MAJOR_VERSION < 3 #if PY_MAJOR_VERSION < 3
initfunc = initperf_trace_context;
command_line = malloc((argc + 1) * sizeof(const char *)); command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script; command_line[0] = script;
for (i = 1; i < argc + 1; i++) for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1]; command_line[i] = argv[i - 1];
PyImport_AppendInittab(name, initperf_trace_context);
#else #else
initfunc = PyInit_perf_trace_context;
command_line = malloc((argc + 1) * sizeof(wchar_t *)); command_line = malloc((argc + 1) * sizeof(wchar_t *));
command_line[0] = Py_DecodeLocale(script, NULL); command_line[0] = Py_DecodeLocale(script, NULL);
for (i = 1; i < argc + 1; i++) for (i = 1; i < argc + 1; i++)
command_line[i] = Py_DecodeLocale(argv[i - 1], NULL); command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
PyImport_AppendInittab(name, PyInit_perf_trace_context);
#endif #endif
PyImport_AppendInittab(name, initfunc);
Py_Initialize(); Py_Initialize();
#if PY_MAJOR_VERSION < 3 #if PY_MAJOR_VERSION < 3

View File

@@ -595,6 +595,7 @@ static void perf_event__mmap2_swap(union perf_event *event,
event->mmap2.maj = bswap_32(event->mmap2.maj); event->mmap2.maj = bswap_32(event->mmap2.maj);
event->mmap2.min = bswap_32(event->mmap2.min); event->mmap2.min = bswap_32(event->mmap2.min);
event->mmap2.ino = bswap_64(event->mmap2.ino); event->mmap2.ino = bswap_64(event->mmap2.ino);
event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
if (sample_id_all) { if (sample_id_all) {
void *data = &event->mmap2.filename; void *data = &event->mmap2.filename;
@@ -710,6 +711,18 @@ static void perf_event__namespaces_swap(union perf_event *event,
swap_sample_id_all(event, &event->namespaces.link_info[i]); swap_sample_id_all(event, &event->namespaces.link_info[i]);
} }
static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
{
event->cgroup.id = bswap_64(event->cgroup.id);
if (sample_id_all) {
void *data = &event->cgroup.path;
data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
swap_sample_id_all(event, data);
}
}
static u8 revbyte(u8 b) static u8 revbyte(u8 b)
{ {
int rev = (b >> 4) | ((b & 0xf) << 4); int rev = (b >> 4) | ((b & 0xf) << 4);
@@ -952,6 +965,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_SWITCH] = perf_event__switch_swap, [PERF_RECORD_SWITCH] = perf_event__switch_swap,
[PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
[PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap, [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
[PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
[PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap, [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,

View File

@@ -515,6 +515,13 @@ void dso__insert_symbol(struct dso *dso, struct symbol *sym)
} }
} }
void dso__delete_symbol(struct dso *dso, struct symbol *sym)
{
rb_erase_cached(&sym->rb_node, &dso->symbols);
symbol__delete(sym);
dso__reset_find_symbol_cache(dso);
}
struct symbol *dso__find_symbol(struct dso *dso, u64 addr) struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
{ {
if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) { if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {

View File

@@ -131,6 +131,8 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map);
void dso__insert_symbol(struct dso *dso, void dso__insert_symbol(struct dso *dso,
struct symbol *sym); struct symbol *sym);
void dso__delete_symbol(struct dso *dso,
struct symbol *sym);
struct symbol *dso__find_symbol(struct dso *dso, u64 addr); struct symbol *dso__find_symbol(struct dso *dso, u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name); struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name);

View File

@@ -3282,4 +3282,99 @@ TEST(epoll60)
close(ctx.epfd); close(ctx.epfd);
} }
struct epoll61_ctx {
int epfd;
int evfd;
};
static void *epoll61_write_eventfd(void *ctx_)
{
struct epoll61_ctx *ctx = ctx_;
int64_t l = 1;
usleep(10950);
write(ctx->evfd, &l, sizeof(l));
return NULL;
}
static void *epoll61_epoll_with_timeout(void *ctx_)
{
struct epoll61_ctx *ctx = ctx_;
struct epoll_event events[1];
int n;
n = epoll_wait(ctx->epfd, events, 1, 11);
/*
* If epoll returned the eventfd, write on the eventfd to wake up the
* blocking poller.
*/
if (n == 1) {
int64_t l = 1;
write(ctx->evfd, &l, sizeof(l));
}
return NULL;
}
static void *epoll61_blocking_epoll(void *ctx_)
{
struct epoll61_ctx *ctx = ctx_;
struct epoll_event events[1];
epoll_wait(ctx->epfd, events, 1, -1);
return NULL;
}
TEST(epoll61)
{
struct epoll61_ctx ctx;
struct epoll_event ev;
int i, r;
ctx.epfd = epoll_create1(0);
ASSERT_GE(ctx.epfd, 0);
ctx.evfd = eventfd(0, EFD_NONBLOCK);
ASSERT_GE(ctx.evfd, 0);
ev.events = EPOLLIN | EPOLLET | EPOLLERR | EPOLLHUP;
ev.data.ptr = NULL;
r = epoll_ctl(ctx.epfd, EPOLL_CTL_ADD, ctx.evfd, &ev);
ASSERT_EQ(r, 0);
/*
* We are testing a race. Repeat the test case 1000 times to make it
* more likely to fail in case of a bug.
*/
for (i = 0; i < 1000; i++) {
pthread_t threads[3];
int n;
/*
* Start 3 threads:
* Thread 1 sleeps for 10.9ms and writes to the evenfd.
* Thread 2 calls epoll with a timeout of 11ms.
* Thread 3 calls epoll with a timeout of -1.
*
* The eventfd write by Thread 1 should either wakeup Thread 2
* or Thread 3. If it wakes up Thread 2, Thread 2 writes on the
* eventfd to wake up Thread 3.
*
* If no events are missed, all three threads should eventually
* be joinable.
*/
ASSERT_EQ(pthread_create(&threads[0], NULL,
epoll61_write_eventfd, &ctx), 0);
ASSERT_EQ(pthread_create(&threads[1], NULL,
epoll61_epoll_with_timeout, &ctx), 0);
ASSERT_EQ(pthread_create(&threads[2], NULL,
epoll61_blocking_epoll, &ctx), 0);
for (n = 0; n < ARRAY_SIZE(threads); ++n)
ASSERT_EQ(pthread_join(threads[n], NULL), 0);
}
close(ctx.epfd);
close(ctx.evfd);
}
TEST_HARNESS_MAIN TEST_HARNESS_MAIN

View File

@@ -432,7 +432,7 @@
*/ */
/** /**
* ASSERT_EQ(expected, seen) * ASSERT_EQ()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -443,7 +443,7 @@
__EXPECT(expected, #expected, seen, #seen, ==, 1) __EXPECT(expected, #expected, seen, #seen, ==, 1)
/** /**
* ASSERT_NE(expected, seen) * ASSERT_NE()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -454,7 +454,7 @@
__EXPECT(expected, #expected, seen, #seen, !=, 1) __EXPECT(expected, #expected, seen, #seen, !=, 1)
/** /**
* ASSERT_LT(expected, seen) * ASSERT_LT()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -465,7 +465,7 @@
__EXPECT(expected, #expected, seen, #seen, <, 1) __EXPECT(expected, #expected, seen, #seen, <, 1)
/** /**
* ASSERT_LE(expected, seen) * ASSERT_LE()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -476,7 +476,7 @@
__EXPECT(expected, #expected, seen, #seen, <=, 1) __EXPECT(expected, #expected, seen, #seen, <=, 1)
/** /**
* ASSERT_GT(expected, seen) * ASSERT_GT()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -487,7 +487,7 @@
__EXPECT(expected, #expected, seen, #seen, >, 1) __EXPECT(expected, #expected, seen, #seen, >, 1)
/** /**
* ASSERT_GE(expected, seen) * ASSERT_GE()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -498,7 +498,7 @@
__EXPECT(expected, #expected, seen, #seen, >=, 1) __EXPECT(expected, #expected, seen, #seen, >=, 1)
/** /**
* ASSERT_NULL(seen) * ASSERT_NULL()
* *
* @seen: measured value * @seen: measured value
* *
@@ -508,7 +508,7 @@
__EXPECT(NULL, "NULL", seen, #seen, ==, 1) __EXPECT(NULL, "NULL", seen, #seen, ==, 1)
/** /**
* ASSERT_TRUE(seen) * ASSERT_TRUE()
* *
* @seen: measured value * @seen: measured value
* *
@@ -518,7 +518,7 @@
__EXPECT(0, "0", seen, #seen, !=, 1) __EXPECT(0, "0", seen, #seen, !=, 1)
/** /**
* ASSERT_FALSE(seen) * ASSERT_FALSE()
* *
* @seen: measured value * @seen: measured value
* *
@@ -528,7 +528,7 @@
__EXPECT(0, "0", seen, #seen, ==, 1) __EXPECT(0, "0", seen, #seen, ==, 1)
/** /**
* ASSERT_STREQ(expected, seen) * ASSERT_STREQ()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -539,7 +539,7 @@
__EXPECT_STR(expected, seen, ==, 1) __EXPECT_STR(expected, seen, ==, 1)
/** /**
* ASSERT_STRNE(expected, seen) * ASSERT_STRNE()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -550,7 +550,7 @@
__EXPECT_STR(expected, seen, !=, 1) __EXPECT_STR(expected, seen, !=, 1)
/** /**
* EXPECT_EQ(expected, seen) * EXPECT_EQ()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -561,7 +561,7 @@
__EXPECT(expected, #expected, seen, #seen, ==, 0) __EXPECT(expected, #expected, seen, #seen, ==, 0)
/** /**
* EXPECT_NE(expected, seen) * EXPECT_NE()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -572,7 +572,7 @@
__EXPECT(expected, #expected, seen, #seen, !=, 0) __EXPECT(expected, #expected, seen, #seen, !=, 0)
/** /**
* EXPECT_LT(expected, seen) * EXPECT_LT()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -583,7 +583,7 @@
__EXPECT(expected, #expected, seen, #seen, <, 0) __EXPECT(expected, #expected, seen, #seen, <, 0)
/** /**
* EXPECT_LE(expected, seen) * EXPECT_LE()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -594,7 +594,7 @@
__EXPECT(expected, #expected, seen, #seen, <=, 0) __EXPECT(expected, #expected, seen, #seen, <=, 0)
/** /**
* EXPECT_GT(expected, seen) * EXPECT_GT()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -605,7 +605,7 @@
__EXPECT(expected, #expected, seen, #seen, >, 0) __EXPECT(expected, #expected, seen, #seen, >, 0)
/** /**
* EXPECT_GE(expected, seen) * EXPECT_GE()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -616,7 +616,7 @@
__EXPECT(expected, #expected, seen, #seen, >=, 0) __EXPECT(expected, #expected, seen, #seen, >=, 0)
/** /**
* EXPECT_NULL(seen) * EXPECT_NULL()
* *
* @seen: measured value * @seen: measured value
* *
@@ -626,7 +626,7 @@
__EXPECT(NULL, "NULL", seen, #seen, ==, 0) __EXPECT(NULL, "NULL", seen, #seen, ==, 0)
/** /**
* EXPECT_TRUE(seen) * EXPECT_TRUE()
* *
* @seen: measured value * @seen: measured value
* *
@@ -636,7 +636,7 @@
__EXPECT(0, "0", seen, #seen, !=, 0) __EXPECT(0, "0", seen, #seen, !=, 0)
/** /**
* EXPECT_FALSE(seen) * EXPECT_FALSE()
* *
* @seen: measured value * @seen: measured value
* *
@@ -646,7 +646,7 @@
__EXPECT(0, "0", seen, #seen, ==, 0) __EXPECT(0, "0", seen, #seen, ==, 0)
/** /**
* EXPECT_STREQ(expected, seen) * EXPECT_STREQ()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value
@@ -657,7 +657,7 @@
__EXPECT_STR(expected, seen, ==, 0) __EXPECT_STR(expected, seen, ==, 0)
/** /**
* EXPECT_STRNE(expected, seen) * EXPECT_STRNE()
* *
* @expected: expected value * @expected: expected value
* @seen: measured value * @seen: measured value