Merge 5.10-rc3 into android-mainline
Linux 5.10-rc3 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I7884051ea7b86204b2685b51462368e122ad0772
This commit is contained in:
@@ -1,29 +1,29 @@
|
||||
What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/cap
|
||||
What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/cap
|
||||
Date: December 3, 2009
|
||||
KernelVersion: 2.6.32
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: Capabilities the DMA supports.Currently there are DMA_PQ, DMA_PQ_VAL,
|
||||
DMA_XOR,DMA_XOR_VAL,DMA_INTERRUPT.
|
||||
|
||||
What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_active
|
||||
What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_active
|
||||
Date: December 3, 2009
|
||||
KernelVersion: 2.6.32
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: The number of descriptors active in the ring.
|
||||
|
||||
What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_size
|
||||
What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_size
|
||||
Date: December 3, 2009
|
||||
KernelVersion: 2.6.32
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: Descriptor ring size, total number of descriptors available.
|
||||
|
||||
What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/version
|
||||
What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/version
|
||||
Date: December 3, 2009
|
||||
KernelVersion: 2.6.32
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
Description: Version of ioatdma device.
|
||||
|
||||
What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/intr_coalesce
|
||||
What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/intr_coalesce
|
||||
Date: August 8, 2017
|
||||
KernelVersion: 4.14
|
||||
Contact: dmaengine@vger.kernel.org
|
||||
|
@@ -152,7 +152,7 @@ Description:
|
||||
When an interface is under test, it cannot be expected
|
||||
to pass packets as normal.
|
||||
|
||||
What: /sys/clas/net/<iface>/duplex
|
||||
What: /sys/class/net/<iface>/duplex
|
||||
Date: October 2009
|
||||
KernelVersion: 2.6.33
|
||||
Contact: netdev@vger.kernel.org
|
||||
|
@@ -32,6 +32,11 @@ description: |
|
||||
| | vint | bit | | 0 |.....|63| vintx |
|
||||
| +--------------+ +------------+ |
|
||||
| |
|
||||
| Unmap |
|
||||
| +--------------+ |
|
||||
Unmapped events ---->| | umapidx |-------------------------> Globalevents
|
||||
| +--------------+ |
|
||||
| |
|
||||
+-----------------------------------------+
|
||||
|
||||
Configuration of these Intmap registers that maps global events to vint is
|
||||
@@ -70,6 +75,11 @@ properties:
|
||||
- description: |
|
||||
"limit" specifies the limit for translation
|
||||
|
||||
ti,unmapped-event-sources:
|
||||
$ref: /schemas/types.yaml#definitions/phandle-array
|
||||
description:
|
||||
Array of phandles to DMA controllers where the unmapped events originate.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
@@ -0,0 +1,18 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/net/can/can-controller.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: CAN Controller Generic Binding
|
||||
|
||||
maintainers:
|
||||
- Marc Kleine-Budde <mkl@pengutronix.de>
|
||||
|
||||
properties:
|
||||
$nodename:
|
||||
pattern: "^can(@.*)?$"
|
||||
|
||||
additionalProperties: true
|
||||
|
||||
...
|
135
Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
Normal file
135
Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml
Normal file
@@ -0,0 +1,135 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/net/can/fsl,flexcan.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title:
|
||||
Flexcan CAN controller on Freescale's ARM and PowerPC system-on-a-chip (SOC).
|
||||
|
||||
maintainers:
|
||||
- Marc Kleine-Budde <mkl@pengutronix.de>
|
||||
|
||||
allOf:
|
||||
- $ref: can-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
- enum:
|
||||
- fsl,imx8qm-flexcan
|
||||
- fsl,imx8mp-flexcan
|
||||
- fsl,imx6q-flexcan
|
||||
- fsl,imx53-flexcan
|
||||
- fsl,imx35-flexcan
|
||||
- fsl,imx28-flexcan
|
||||
- fsl,imx25-flexcan
|
||||
- fsl,p1010-flexcan
|
||||
- fsl,vf610-flexcan
|
||||
- fsl,ls1021ar2-flexcan
|
||||
- fsl,lx2160ar1-flexcan
|
||||
- items:
|
||||
- enum:
|
||||
- fsl,imx7d-flexcan
|
||||
- fsl,imx6ul-flexcan
|
||||
- fsl,imx6sx-flexcan
|
||||
- const: fsl,imx6q-flexcan
|
||||
- items:
|
||||
- enum:
|
||||
- fsl,ls1028ar1-flexcan
|
||||
- const: fsl,lx2160ar1-flexcan
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 2
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: ipg
|
||||
- const: per
|
||||
|
||||
clock-frequency:
|
||||
description: |
|
||||
The oscillator frequency driving the flexcan device, filled in by the
|
||||
boot loader. This property should only be used the used operating system
|
||||
doesn't support the clocks and clock-names property.
|
||||
|
||||
xceiver-supply:
|
||||
description: Regulator that powers the CAN transceiver.
|
||||
|
||||
big-endian:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description: |
|
||||
This means the registers of FlexCAN controller are big endian. This is
|
||||
optional property.i.e. if this property is not present in device tree
|
||||
node then controller is assumed to be little endian. If this property is
|
||||
present then controller is assumed to be big endian.
|
||||
|
||||
fsl,stop-mode:
|
||||
description: |
|
||||
Register bits of stop mode control.
|
||||
|
||||
The format should be as follows:
|
||||
<gpr req_gpr req_bit>
|
||||
gpr is the phandle to general purpose register node.
|
||||
req_gpr is the gpr register offset of CAN stop request.
|
||||
req_bit is the bit offset of CAN stop request.
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
items:
|
||||
- description: The 'gpr' is the phandle to general purpose register node.
|
||||
- description: The 'req_gpr' is the gpr register offset of CAN stop request.
|
||||
maximum: 0xff
|
||||
- description: The 'req_bit' is the bit offset of CAN stop request.
|
||||
maximum: 0x1f
|
||||
|
||||
fsl,clk-source:
|
||||
description: |
|
||||
Select the clock source to the CAN Protocol Engine (PE). It's SoC
|
||||
implementation dependent. Refer to RM for detailed definition. If this
|
||||
property is not set in device tree node then driver selects clock source 1
|
||||
by default.
|
||||
0: clock source 0 (oscillator clock)
|
||||
1: clock source 1 (peripheral clock)
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
default: 1
|
||||
minimum: 0
|
||||
maximum: 1
|
||||
|
||||
wakeup-source:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description:
|
||||
Enable CAN remote wakeup.
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
can@1c000 {
|
||||
compatible = "fsl,p1010-flexcan";
|
||||
reg = <0x1c000 0x1000>;
|
||||
interrupts = <48 0x2>;
|
||||
interrupt-parent = <&mpic>;
|
||||
clock-frequency = <200000000>;
|
||||
fsl,clk-source = <0>;
|
||||
};
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
|
||||
can@2090000 {
|
||||
compatible = "fsl,imx6q-flexcan";
|
||||
reg = <0x02090000 0x4000>;
|
||||
interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks 1>, <&clks 2>;
|
||||
clock-names = "ipg", "per";
|
||||
fsl,stop-mode = <&gpr 0x34 28>;
|
||||
};
|
@@ -1,57 +0,0 @@
|
||||
Flexcan CAN controller on Freescale's ARM and PowerPC system-on-a-chip (SOC).
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : Should be "fsl,<processor>-flexcan"
|
||||
|
||||
where <processor> is imx8qm, imx6q, imx28, imx53, imx35, imx25, p1010,
|
||||
vf610, ls1021ar2, lx2160ar1, ls1028ar1.
|
||||
|
||||
The ls1028ar1 must be followed by lx2160ar1, e.g.
|
||||
- "fsl,ls1028ar1-flexcan", "fsl,lx2160ar1-flexcan"
|
||||
|
||||
An implementation should also claim any of the following compatibles
|
||||
that it is fully backwards compatible with:
|
||||
|
||||
- fsl,p1010-flexcan
|
||||
|
||||
- reg : Offset and length of the register set for this device
|
||||
- interrupts : Interrupt tuple for this device
|
||||
|
||||
Optional properties:
|
||||
|
||||
- clock-frequency : The oscillator frequency driving the flexcan device
|
||||
|
||||
- xceiver-supply: Regulator that powers the CAN transceiver
|
||||
|
||||
- big-endian: This means the registers of FlexCAN controller are big endian.
|
||||
This is optional property.i.e. if this property is not present in
|
||||
device tree node then controller is assumed to be little endian.
|
||||
if this property is present then controller is assumed to be big
|
||||
endian.
|
||||
|
||||
- fsl,stop-mode: register bits of stop mode control, the format is
|
||||
<&gpr req_gpr req_bit>.
|
||||
gpr is the phandle to general purpose register node.
|
||||
req_gpr is the gpr register offset of CAN stop request.
|
||||
req_bit is the bit offset of CAN stop request.
|
||||
|
||||
- fsl,clk-source: Select the clock source to the CAN Protocol Engine (PE).
|
||||
It's SoC Implementation dependent. Refer to RM for detailed
|
||||
definition. If this property is not set in device tree node
|
||||
then driver selects clock source 1 by default.
|
||||
0: clock source 0 (oscillator clock)
|
||||
1: clock source 1 (peripheral clock)
|
||||
|
||||
- wakeup-source: enable CAN remote wakeup
|
||||
|
||||
Example:
|
||||
|
||||
can@1c000 {
|
||||
compatible = "fsl,p1010-flexcan";
|
||||
reg = <0x1c000 0x1000>;
|
||||
interrupts = <48 0x2>;
|
||||
interrupt-parent = <&mpic>;
|
||||
clock-frequency = <200000000>; // filled in by bootloader
|
||||
fsl,clk-source = <0>; // select clock source 0 for PE
|
||||
};
|
@@ -25,3 +25,4 @@ LEDs
|
||||
leds-lp5562
|
||||
leds-lp55xx
|
||||
leds-mlxcpld
|
||||
leds-sc27xx
|
||||
|
@@ -24,7 +24,6 @@ fit into other categories.
|
||||
isl29003
|
||||
lis3lv02d
|
||||
max6875
|
||||
mic/index
|
||||
pci-endpoint-test
|
||||
spear-pcie-gadget
|
||||
uacce
|
||||
|
@@ -10,9 +10,9 @@ Overview / What Is J1939
|
||||
SAE J1939 defines a higher layer protocol on CAN. It implements a more
|
||||
sophisticated addressing scheme and extends the maximum packet size above 8
|
||||
bytes. Several derived specifications exist, which differ from the original
|
||||
J1939 on the application level, like MilCAN A, NMEA2000 and especially
|
||||
J1939 on the application level, like MilCAN A, NMEA2000, and especially
|
||||
ISO-11783 (ISOBUS). This last one specifies the so-called ETP (Extended
|
||||
Transport Protocol) which is has been included in this implementation. This
|
||||
Transport Protocol), which has been included in this implementation. This
|
||||
results in a maximum packet size of ((2 ^ 24) - 1) * 7 bytes == 111 MiB.
|
||||
|
||||
Specifications used
|
||||
@@ -32,15 +32,15 @@ sockets, we found some reasons to justify a kernel implementation for the
|
||||
addressing and transport methods used by J1939.
|
||||
|
||||
* **Addressing:** when a process on an ECU communicates via J1939, it should
|
||||
not necessarily know its source address. Although at least one process per
|
||||
not necessarily know its source address. Although, at least one process per
|
||||
ECU should know the source address. Other processes should be able to reuse
|
||||
that address. This way, address parameters for different processes
|
||||
cooperating for the same ECU, are not duplicated. This way of working is
|
||||
closely related to the UNIX concept where programs do just one thing, and do
|
||||
closely related to the UNIX concept, where programs do just one thing and do
|
||||
it well.
|
||||
|
||||
* **Dynamic addressing:** Address Claiming in J1939 is time critical.
|
||||
Furthermore data transport should be handled properly during the address
|
||||
Furthermore, data transport should be handled properly during the address
|
||||
negotiation. Putting this functionality in the kernel eliminates it as a
|
||||
requirement for _every_ user space process that communicates via J1939. This
|
||||
results in a consistent J1939 bus with proper addressing.
|
||||
@@ -58,7 +58,7 @@ Therefore, these parts are left to user space.
|
||||
|
||||
The J1939 sockets operate on CAN network devices (see SocketCAN). Any J1939
|
||||
user space library operating on CAN raw sockets will still operate properly.
|
||||
Since such library does not communicate with the in-kernel implementation, care
|
||||
Since such a library does not communicate with the in-kernel implementation, care
|
||||
must be taken that these two do not interfere. In practice, this means they
|
||||
cannot share ECU addresses. A single ECU (or virtual ECU) address is used by
|
||||
the library exclusively, or by the in-kernel system exclusively.
|
||||
@@ -77,13 +77,13 @@ is composed as follows:
|
||||
8 bits : PS (PDU Specific)
|
||||
|
||||
In J1939-21 distinction is made between PDU1 format (where PF < 240) and PDU2
|
||||
format (where PF >= 240). Furthermore, when using PDU2 format, the PS-field
|
||||
format (where PF >= 240). Furthermore, when using the PDU2 format, the PS-field
|
||||
contains a so-called Group Extension, which is part of the PGN. When using PDU2
|
||||
format, the Group Extension is set in the PS-field.
|
||||
|
||||
On the other hand, when using PDU1 format, the PS-field contains a so-called
|
||||
Destination Address, which is _not_ part of the PGN. When communicating a PGN
|
||||
from user space to kernel (or visa versa) and PDU2 format is used, the PS-field
|
||||
from user space to kernel (or vice versa) and PDU2 format is used, the PS-field
|
||||
of the PGN shall be set to zero. The Destination Address shall be set
|
||||
elsewhere.
|
||||
|
||||
@@ -96,15 +96,15 @@ Addressing
|
||||
|
||||
Both static and dynamic addressing methods can be used.
|
||||
|
||||
For static addresses, no extra checks are made by the kernel, and provided
|
||||
For static addresses, no extra checks are made by the kernel and provided
|
||||
addresses are considered right. This responsibility is for the OEM or system
|
||||
integrator.
|
||||
|
||||
For dynamic addressing, so-called Address Claiming, extra support is foreseen
|
||||
in the kernel. In J1939 any ECU is known by it's 64-bit NAME. At the moment of
|
||||
in the kernel. In J1939 any ECU is known by its 64-bit NAME. At the moment of
|
||||
a successful address claim, the kernel keeps track of both NAME and source
|
||||
address being claimed. This serves as a base for filter schemes. By default,
|
||||
packets with a destination that is not locally, will be rejected.
|
||||
packets with a destination that is not locally will be rejected.
|
||||
|
||||
Mixed mode packets (from a static to a dynamic address or vice versa) are
|
||||
allowed. The BSD sockets define separate API calls for getting/setting the
|
||||
@@ -131,31 +131,31 @@ API Calls
|
||||
---------
|
||||
|
||||
On CAN, you first need to open a socket for communicating over a CAN network.
|
||||
To use J1939, #include <linux/can/j1939.h>. From there, <linux/can.h> will be
|
||||
To use J1939, ``#include <linux/can/j1939.h>``. From there, ``<linux/can.h>`` will be
|
||||
included too. To open a socket, use:
|
||||
|
||||
.. code-block:: C
|
||||
|
||||
s = socket(PF_CAN, SOCK_DGRAM, CAN_J1939);
|
||||
|
||||
J1939 does use SOCK_DGRAM sockets. In the J1939 specification, connections are
|
||||
J1939 does use ``SOCK_DGRAM`` sockets. In the J1939 specification, connections are
|
||||
mentioned in the context of transport protocol sessions. These still deliver
|
||||
packets to the other end (using several CAN packets). SOCK_STREAM is not
|
||||
packets to the other end (using several CAN packets). ``SOCK_STREAM`` is not
|
||||
supported.
|
||||
|
||||
After the successful creation of the socket, you would normally use the bind(2)
|
||||
and/or connect(2) system call to bind the socket to a CAN interface. After
|
||||
binding and/or connecting the socket, you can read(2) and write(2) from/to the
|
||||
socket or use send(2), sendto(2), sendmsg(2) and the recv*() counterpart
|
||||
After the successful creation of the socket, you would normally use the ``bind(2)``
|
||||
and/or ``connect(2)`` system call to bind the socket to a CAN interface. After
|
||||
binding and/or connecting the socket, you can ``read(2)`` and ``write(2)`` from/to the
|
||||
socket or use ``send(2)``, ``sendto(2)``, ``sendmsg(2)`` and the ``recv*()`` counterpart
|
||||
operations on the socket as usual. There are also J1939 specific socket options
|
||||
described below.
|
||||
|
||||
In order to send data, a bind(2) must have been successful. bind(2) assigns a
|
||||
In order to send data, a ``bind(2)`` must have been successful. ``bind(2)`` assigns a
|
||||
local address to a socket.
|
||||
|
||||
Different from CAN is that the payload data is just the data that get send,
|
||||
without it's header info. The header info is derived from the sockaddr supplied
|
||||
to bind(2), connect(2), sendto(2) and recvfrom(2). A write(2) with size 4 will
|
||||
Different from CAN is that the payload data is just the data that get sends,
|
||||
without its header info. The header info is derived from the sockaddr supplied
|
||||
to ``bind(2)``, ``connect(2)``, ``sendto(2)`` and ``recvfrom(2)``. A ``write(2)`` with size 4 will
|
||||
result in a packet with 4 bytes.
|
||||
|
||||
The sockaddr structure has extensions for use with J1939 as specified below:
|
||||
@@ -180,47 +180,47 @@ The sockaddr structure has extensions for use with J1939 as specified below:
|
||||
} can_addr;
|
||||
}
|
||||
|
||||
can_family & can_ifindex serve the same purpose as for other SocketCAN sockets.
|
||||
``can_family`` & ``can_ifindex`` serve the same purpose as for other SocketCAN sockets.
|
||||
|
||||
can_addr.j1939.pgn specifies the PGN (max 0x3ffff). Individual bits are
|
||||
``can_addr.j1939.pgn`` specifies the PGN (max 0x3ffff). Individual bits are
|
||||
specified above.
|
||||
|
||||
can_addr.j1939.name contains the 64-bit J1939 NAME.
|
||||
``can_addr.j1939.name`` contains the 64-bit J1939 NAME.
|
||||
|
||||
can_addr.j1939.addr contains the address.
|
||||
``can_addr.j1939.addr`` contains the address.
|
||||
|
||||
The bind(2) system call assigns the local address, i.e. the source address when
|
||||
sending packages. If a PGN during bind(2) is set, it's used as a RX filter.
|
||||
The ``bind(2)`` system call assigns the local address, i.e. the source address when
|
||||
sending packages. If a PGN during ``bind(2)`` is set, it's used as a RX filter.
|
||||
I.e. only packets with a matching PGN are received. If an ADDR or NAME is set
|
||||
it is used as a receive filter, too. It will match the destination NAME or ADDR
|
||||
of the incoming packet. The NAME filter will work only if appropriate Address
|
||||
Claiming for this name was done on the CAN bus and registered/cached by the
|
||||
kernel.
|
||||
|
||||
On the other hand connect(2) assigns the remote address, i.e. the destination
|
||||
address. The PGN from connect(2) is used as the default PGN when sending
|
||||
On the other hand ``connect(2)`` assigns the remote address, i.e. the destination
|
||||
address. The PGN from ``connect(2)`` is used as the default PGN when sending
|
||||
packets. If ADDR or NAME is set it will be used as the default destination ADDR
|
||||
or NAME. Further a set ADDR or NAME during connect(2) is used as a receive
|
||||
or NAME. Further a set ADDR or NAME during ``connect(2)`` is used as a receive
|
||||
filter. It will match the source NAME or ADDR of the incoming packet.
|
||||
|
||||
Both write(2) and send(2) will send a packet with local address from bind(2) and
|
||||
the remote address from connect(2). Use sendto(2) to overwrite the destination
|
||||
Both ``write(2)`` and ``send(2)`` will send a packet with local address from ``bind(2)`` and the
|
||||
remote address from ``connect(2)``. Use ``sendto(2)`` to overwrite the destination
|
||||
address.
|
||||
|
||||
If can_addr.j1939.name is set (!= 0) the NAME is looked up by the kernel and
|
||||
the corresponding ADDR is used. If can_addr.j1939.name is not set (== 0),
|
||||
can_addr.j1939.addr is used.
|
||||
If ``can_addr.j1939.name`` is set (!= 0) the NAME is looked up by the kernel and
|
||||
the corresponding ADDR is used. If ``can_addr.j1939.name`` is not set (== 0),
|
||||
``can_addr.j1939.addr`` is used.
|
||||
|
||||
When creating a socket, reasonable defaults are set. Some options can be
|
||||
modified with setsockopt(2) & getsockopt(2).
|
||||
modified with ``setsockopt(2)`` & ``getsockopt(2)``.
|
||||
|
||||
RX path related options:
|
||||
|
||||
- SO_J1939_FILTER - configure array of filters
|
||||
- SO_J1939_PROMISC - disable filters set by bind(2) and connect(2)
|
||||
- ``SO_J1939_FILTER`` - configure array of filters
|
||||
- ``SO_J1939_PROMISC`` - disable filters set by ``bind(2)`` and ``connect(2)``
|
||||
|
||||
By default no broadcast packets can be send or received. To enable sending or
|
||||
receiving broadcast packets use the socket option SO_BROADCAST:
|
||||
receiving broadcast packets use the socket option ``SO_BROADCAST``:
|
||||
|
||||
.. code-block:: C
|
||||
|
||||
@@ -261,26 +261,26 @@ The following diagram illustrates the RX path:
|
||||
+---------------------------+
|
||||
|
||||
TX path related options:
|
||||
SO_J1939_SEND_PRIO - change default send priority for the socket
|
||||
``SO_J1939_SEND_PRIO`` - change default send priority for the socket
|
||||
|
||||
Message Flags during send() and Related System Calls
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
send(2), sendto(2) and sendmsg(2) take a 'flags' argument. Currently
|
||||
``send(2)``, ``sendto(2)`` and ``sendmsg(2)`` take a 'flags' argument. Currently
|
||||
supported flags are:
|
||||
|
||||
* MSG_DONTWAIT, i.e. non-blocking operation.
|
||||
* ``MSG_DONTWAIT``, i.e. non-blocking operation.
|
||||
|
||||
recvmsg(2)
|
||||
^^^^^^^^^^
|
||||
|
||||
In most cases recvmsg(2) is needed if you want to extract more information than
|
||||
recvfrom(2) can provide. For example package priority and timestamp. The
|
||||
In most cases ``recvmsg(2)`` is needed if you want to extract more information than
|
||||
``recvfrom(2)`` can provide. For example package priority and timestamp. The
|
||||
Destination Address, name and packet priority (if applicable) are attached to
|
||||
the msghdr in the recvmsg(2) call. They can be extracted using cmsg(3) macros,
|
||||
with cmsg_level == SOL_J1939 && cmsg_type == SCM_J1939_DEST_ADDR,
|
||||
SCM_J1939_DEST_NAME or SCM_J1939_PRIO. The returned data is a uint8_t for
|
||||
priority and dst_addr, and uint64_t for dst_name.
|
||||
the msghdr in the ``recvmsg(2)`` call. They can be extracted using ``cmsg(3)`` macros,
|
||||
with ``cmsg_level == SOL_J1939 && cmsg_type == SCM_J1939_DEST_ADDR``,
|
||||
``SCM_J1939_DEST_NAME`` or ``SCM_J1939_PRIO``. The returned data is a ``uint8_t`` for
|
||||
``priority`` and ``dst_addr``, and ``uint64_t`` for ``dst_name``.
|
||||
|
||||
.. code-block:: C
|
||||
|
||||
@@ -305,12 +305,12 @@ Dynamic Addressing
|
||||
|
||||
Distinction has to be made between using the claimed address and doing an
|
||||
address claim. To use an already claimed address, one has to fill in the
|
||||
j1939.name member and provide it to bind(2). If the name had claimed an address
|
||||
``j1939.name`` member and provide it to ``bind(2)``. If the name had claimed an address
|
||||
earlier, all further messages being sent will use that address. And the
|
||||
j1939.addr member will be ignored.
|
||||
``j1939.addr`` member will be ignored.
|
||||
|
||||
An exception on this is PGN 0x0ee00. This is the "Address Claim/Cannot Claim
|
||||
Address" message and the kernel will use the j1939.addr member for that PGN if
|
||||
Address" message and the kernel will use the ``j1939.addr`` member for that PGN if
|
||||
necessary.
|
||||
|
||||
To claim an address following code example can be used:
|
||||
@@ -371,12 +371,12 @@ NAME can send packets.
|
||||
|
||||
If another ECU claims the address, the kernel will mark the NAME-SA expired.
|
||||
No socket bound to the NAME can send packets (other than address claims). To
|
||||
claim another address, some socket bound to NAME, must bind(2) again, but with
|
||||
only j1939.addr changed to the new SA, and must then send a valid address claim
|
||||
claim another address, some socket bound to NAME, must ``bind(2)`` again, but with
|
||||
only ``j1939.addr`` changed to the new SA, and must then send a valid address claim
|
||||
packet. This restarts the state machine in the kernel (and any other
|
||||
participant on the bus) for this NAME.
|
||||
|
||||
can-utils also include the jacd tool, so it can be used as code example or as
|
||||
``can-utils`` also include the ``j1939acd`` tool, so it can be used as code example or as
|
||||
default Address Claiming daemon.
|
||||
|
||||
Send Examples
|
||||
@@ -403,8 +403,8 @@ Bind:
|
||||
|
||||
bind(sock, (struct sockaddr *)&baddr, sizeof(baddr));
|
||||
|
||||
Now, the socket 'sock' is bound to the SA 0x20. Since no connect(2) was called,
|
||||
at this point we can use only sendto(2) or sendmsg(2).
|
||||
Now, the socket 'sock' is bound to the SA 0x20. Since no ``connect(2)`` was called,
|
||||
at this point we can use only ``sendto(2)`` or ``sendmsg(2)``.
|
||||
|
||||
Send:
|
||||
|
||||
@@ -414,8 +414,8 @@ Send:
|
||||
.can_family = AF_CAN,
|
||||
.can_addr.j1939 = {
|
||||
.name = J1939_NO_NAME;
|
||||
.pgn = 0x30,
|
||||
.addr = 0x12300,
|
||||
.addr = 0x30,
|
||||
.pgn = 0x12300,
|
||||
},
|
||||
};
|
||||
|
||||
|
@@ -934,7 +934,7 @@ M: Evan Quan <evan.quan@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git git://people.freedesktop.org/~agd5f/linux
|
||||
F: drivers/gpu/drm/amd/powerplay/
|
||||
F: drivers/gpu/drm/amd/pm/powerplay/
|
||||
|
||||
AMD SEATTLE DEVICE TREE SUPPORT
|
||||
M: Brijesh Singh <brijeshkumar.singh@amd.com>
|
||||
@@ -11170,7 +11170,7 @@ F: Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt
|
||||
F: drivers/input/touchscreen/melfas_mip4.c
|
||||
|
||||
MELLANOX BLUEFIELD I2C DRIVER
|
||||
M: Khalil Blaiech <kblaiech@mellanox.com>
|
||||
M: Khalil Blaiech <kblaiech@nvidia.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/i2c/busses/i2c-mlxbf.c
|
||||
|
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@@ -67,7 +67,22 @@
|
||||
sr r5, [ARC_REG_LPB_CTRL]
|
||||
1:
|
||||
#endif /* CONFIG_ARC_LPB_DISABLE */
|
||||
#endif
|
||||
|
||||
/* On HSDK, CCMs need to remapped super early */
|
||||
#ifdef CONFIG_ARC_SOC_HSDK
|
||||
mov r6, 0x60000000
|
||||
lr r5, [ARC_REG_ICCM_BUILD]
|
||||
breq r5, 0, 1f
|
||||
sr r6, [ARC_REG_AUX_ICCM]
|
||||
1:
|
||||
lr r5, [ARC_REG_DCCM_BUILD]
|
||||
breq r5, 0, 2f
|
||||
sr r6, [ARC_REG_AUX_DCCM]
|
||||
2:
|
||||
#endif /* CONFIG_ARC_SOC_HSDK */
|
||||
|
||||
#endif /* CONFIG_ISA_ARCV2 */
|
||||
|
||||
; Config DSP_CTRL properly, so kernel may use integer multiply,
|
||||
; multiply-accumulate, and divide operations
|
||||
DSP_EARLY_INIT
|
||||
|
@@ -112,7 +112,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
|
||||
int (*consumer_fn) (unsigned int, void *), void *arg)
|
||||
{
|
||||
#ifdef CONFIG_ARC_DW2_UNWIND
|
||||
int ret = 0;
|
||||
int ret = 0, cnt = 0;
|
||||
unsigned int address;
|
||||
struct unwind_frame_info frame_info;
|
||||
|
||||
@@ -132,6 +132,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
|
||||
break;
|
||||
|
||||
frame_info.regs.r63 = frame_info.regs.r31;
|
||||
|
||||
if (cnt++ > 128) {
|
||||
printk("unwinder looping too long, aborting !\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return address; /* return the last address it saw */
|
||||
|
@@ -17,22 +17,6 @@ int arc_hsdk_axi_dmac_coherent __section(".data") = 0;
|
||||
|
||||
#define ARC_CCM_UNUSED_ADDR 0x60000000
|
||||
|
||||
static void __init hsdk_init_per_cpu(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* By default ICCM is mapped to 0x7z while this area is used for
|
||||
* kernel virtual mappings, so move it to currently unused area.
|
||||
*/
|
||||
if (cpuinfo_arc700[cpu].iccm.sz)
|
||||
write_aux_reg(ARC_REG_AUX_ICCM, ARC_CCM_UNUSED_ADDR);
|
||||
|
||||
/*
|
||||
* By default DCCM is mapped to 0x8z while this area is used by kernel,
|
||||
* so move it to currently unused area.
|
||||
*/
|
||||
if (cpuinfo_arc700[cpu].dccm.sz)
|
||||
write_aux_reg(ARC_REG_AUX_DCCM, ARC_CCM_UNUSED_ADDR);
|
||||
}
|
||||
|
||||
#define ARC_PERIPHERAL_BASE 0xf0000000
|
||||
#define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000)
|
||||
@@ -339,5 +323,4 @@ static const char *hsdk_compat[] __initconst = {
|
||||
MACHINE_START(SIMULATION, "hsdk")
|
||||
.dt_compat = hsdk_compat,
|
||||
.init_early = hsdk_init_early,
|
||||
.init_per_cpu = hsdk_init_per_cpu,
|
||||
MACHINE_END
|
||||
|
@@ -1002,7 +1002,7 @@ config NUMA
|
||||
config NODES_SHIFT
|
||||
int "Maximum NUMA Nodes (as a power of 2)"
|
||||
range 1 10
|
||||
default "2"
|
||||
default "4"
|
||||
depends on NEED_MULTIPLE_NODES
|
||||
help
|
||||
Specify the maximum number of NUMA Nodes available on the target
|
||||
|
@@ -10,6 +10,7 @@
|
||||
* #imm16 values used for BRK instruction generation
|
||||
* 0x004: for installing kprobes
|
||||
* 0x005: for installing uprobes
|
||||
* 0x006: for kprobe software single-step
|
||||
* Allowed values for kgdb are 0x400 - 0x7ff
|
||||
* 0x100: for triggering a fault on purpose (reserved)
|
||||
* 0x400: for dynamic BRK instruction
|
||||
@@ -19,6 +20,7 @@
|
||||
*/
|
||||
#define KPROBES_BRK_IMM 0x004
|
||||
#define UPROBES_BRK_IMM 0x005
|
||||
#define KPROBES_BRK_SS_IMM 0x006
|
||||
#define FAULT_BRK_IMM 0x100
|
||||
#define KGDB_DYN_DBG_BRK_IMM 0x400
|
||||
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
|
||||
|
@@ -53,6 +53,7 @@
|
||||
|
||||
/* kprobes BRK opcodes with ESR encoding */
|
||||
#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5))
|
||||
#define BRK64_OPCODE_KPROBES_SS (AARCH64_BREAK_MON | (KPROBES_BRK_SS_IMM << 5))
|
||||
/* uprobes BRK opcodes with ESR encoding */
|
||||
#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5))
|
||||
|
||||
|
@@ -16,7 +16,7 @@
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
#define MAX_INSN_SIZE 1
|
||||
#define MAX_INSN_SIZE 2
|
||||
|
||||
#define flush_insn_slot(p) do { } while (0)
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
@@ -43,7 +43,7 @@ static void *image_load(struct kimage *image,
|
||||
u64 flags, value;
|
||||
bool be_image, be_kernel;
|
||||
struct kexec_buf kbuf;
|
||||
unsigned long text_offset;
|
||||
unsigned long text_offset, kernel_segment_number;
|
||||
struct kexec_segment *kernel_segment;
|
||||
int ret;
|
||||
|
||||
@@ -88,11 +88,37 @@ static void *image_load(struct kimage *image,
|
||||
/* Adjust kernel segment with TEXT_OFFSET */
|
||||
kbuf.memsz += text_offset;
|
||||
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
kernel_segment_number = image->nr_segments;
|
||||
|
||||
kernel_segment = &image->segment[image->nr_segments - 1];
|
||||
/*
|
||||
* The location of the kernel segment may make it impossible to satisfy
|
||||
* the other segment requirements, so we try repeatedly to find a
|
||||
* location that will work.
|
||||
*/
|
||||
while ((ret = kexec_add_buffer(&kbuf)) == 0) {
|
||||
/* Try to load additional data */
|
||||
kernel_segment = &image->segment[kernel_segment_number];
|
||||
ret = load_other_segments(image, kernel_segment->mem,
|
||||
kernel_segment->memsz, initrd,
|
||||
initrd_len, cmdline);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
/*
|
||||
* We couldn't find space for the other segments; erase the
|
||||
* kernel segment and try the next available hole.
|
||||
*/
|
||||
image->nr_segments -= 1;
|
||||
kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz;
|
||||
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
pr_err("Could not find any suitable kernel location!");
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
kernel_segment = &image->segment[kernel_segment_number];
|
||||
kernel_segment->mem += text_offset;
|
||||
kernel_segment->memsz -= text_offset;
|
||||
image->start = kernel_segment->mem;
|
||||
@@ -101,12 +127,7 @@ static void *image_load(struct kimage *image,
|
||||
kernel_segment->mem, kbuf.bufsz,
|
||||
kernel_segment->memsz);
|
||||
|
||||
/* Load additional data */
|
||||
ret = load_other_segments(image,
|
||||
kernel_segment->mem, kernel_segment->memsz,
|
||||
initrd, initrd_len, cmdline);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
|
||||
|
@@ -240,6 +240,11 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tries to add the initrd and DTB to the image. If it is not possible to find
|
||||
* valid locations, this function will undo changes to the image and return non
|
||||
* zero.
|
||||
*/
|
||||
int load_other_segments(struct kimage *image,
|
||||
unsigned long kernel_load_addr,
|
||||
unsigned long kernel_size,
|
||||
@@ -248,7 +253,8 @@ int load_other_segments(struct kimage *image,
|
||||
{
|
||||
struct kexec_buf kbuf;
|
||||
void *headers, *dtb = NULL;
|
||||
unsigned long headers_sz, initrd_load_addr = 0, dtb_len;
|
||||
unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
|
||||
orig_segments = image->nr_segments;
|
||||
int ret = 0;
|
||||
|
||||
kbuf.image = image;
|
||||
@@ -334,6 +340,7 @@ int load_other_segments(struct kimage *image,
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
image->nr_segments = orig_segments;
|
||||
vfree(dtb);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -36,25 +36,16 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
static void __kprobes
|
||||
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
|
||||
|
||||
static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
|
||||
{
|
||||
void *addrs[1];
|
||||
u32 insns[1];
|
||||
|
||||
addrs[0] = addr;
|
||||
insns[0] = opcode;
|
||||
|
||||
return aarch64_insn_patch_text(addrs, insns, 1);
|
||||
}
|
||||
|
||||
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
|
||||
{
|
||||
/* prepare insn slot */
|
||||
patch_text(p->ainsn.api.insn, p->opcode);
|
||||
kprobe_opcode_t *addr = p->ainsn.api.insn;
|
||||
void *addrs[] = {addr, addr + 1};
|
||||
u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS};
|
||||
|
||||
flush_icache_range((uintptr_t) (p->ainsn.api.insn),
|
||||
(uintptr_t) (p->ainsn.api.insn) +
|
||||
MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||
/* prepare insn slot */
|
||||
aarch64_insn_patch_text(addrs, insns, 2);
|
||||
|
||||
flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE));
|
||||
|
||||
/*
|
||||
* Needs restoring of return address after stepping xol.
|
||||
@@ -128,13 +119,18 @@ void *alloc_insn_page(void)
|
||||
/* arm kprobe: install breakpoint in text */
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
patch_text(p->addr, BRK64_OPCODE_KPROBES);
|
||||
void *addr = p->addr;
|
||||
u32 insn = BRK64_OPCODE_KPROBES;
|
||||
|
||||
aarch64_insn_patch_text(&addr, &insn, 1);
|
||||
}
|
||||
|
||||
/* disarm kprobe: remove breakpoint from text */
|
||||
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
||||
{
|
||||
patch_text(p->addr, p->opcode);
|
||||
void *addr = p->addr;
|
||||
|
||||
aarch64_insn_patch_text(&addr, &p->opcode, 1);
|
||||
}
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
@@ -163,20 +159,15 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupts need to be disabled before single-step mode is set, and not
|
||||
* reenabled until after single-step mode ends.
|
||||
* Without disabling interrupt on local CPU, there is a chance of
|
||||
* interrupt occurrence in the period of exception return and start of
|
||||
* out-of-line single-step, that result in wrongly single stepping
|
||||
* into the interrupt handler.
|
||||
* Mask all of DAIF while executing the instruction out-of-line, to keep things
|
||||
* simple and avoid nesting exceptions. Interrupts do have to be disabled since
|
||||
* the kprobe state is per-CPU and doesn't get migrated.
|
||||
*/
|
||||
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
kcb->saved_irqflag = regs->pstate & DAIF_MASK;
|
||||
regs->pstate |= PSR_I_BIT;
|
||||
/* Unmask PSTATE.D for enabling software step exceptions. */
|
||||
regs->pstate &= ~PSR_D_BIT;
|
||||
regs->pstate |= DAIF_MASK;
|
||||
}
|
||||
|
||||
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
|
||||
@@ -219,10 +210,7 @@ static void __kprobes setup_singlestep(struct kprobe *p,
|
||||
slot = (unsigned long)p->ainsn.api.insn;
|
||||
|
||||
set_ss_context(kcb, slot); /* mark pending ss */
|
||||
|
||||
/* IRQs and single stepping do not mix well. */
|
||||
kprobes_save_local_irqflag(kcb, regs);
|
||||
kernel_enable_single_step(regs);
|
||||
instruction_pointer_set(regs, slot);
|
||||
} else {
|
||||
/* insn simulation */
|
||||
@@ -273,12 +261,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
|
||||
}
|
||||
/* call post handler */
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
if (cur->post_handler) {
|
||||
/* post_handler can hit breakpoint and single step
|
||||
* again, so we enable D-flag for recursive exception.
|
||||
*/
|
||||
if (cur->post_handler)
|
||||
cur->post_handler(cur, regs, 0);
|
||||
}
|
||||
|
||||
reset_current_kprobe();
|
||||
}
|
||||
@@ -302,8 +286,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
||||
if (!instruction_pointer(regs))
|
||||
BUG();
|
||||
|
||||
kernel_disable_single_step();
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
restore_previous_kprobe(kcb);
|
||||
else
|
||||
@@ -365,10 +347,6 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
* pre-handler and it returned non-zero, it will
|
||||
* modify the execution path and no need to single
|
||||
* stepping. Let's just reset current kprobe and exit.
|
||||
*
|
||||
* pre_handler can hit a breakpoint and can step thru
|
||||
* before return, keep PSTATE D-flag enabled until
|
||||
* pre_handler return back.
|
||||
*/
|
||||
if (!p->pre_handler || !p->pre_handler(p, regs)) {
|
||||
setup_singlestep(p, regs, kcb, 0);
|
||||
@@ -399,7 +377,7 @@ kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
|
||||
}
|
||||
|
||||
static int __kprobes
|
||||
kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
||||
kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
int retval;
|
||||
@@ -409,16 +387,15 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
||||
|
||||
if (retval == DBG_HOOK_HANDLED) {
|
||||
kprobes_restore_local_irqflag(kcb, regs);
|
||||
kernel_disable_single_step();
|
||||
|
||||
post_kprobe_handler(kcb, regs);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static struct step_hook kprobes_step_hook = {
|
||||
.fn = kprobe_single_step_handler,
|
||||
static struct break_hook kprobes_break_ss_hook = {
|
||||
.imm = KPROBES_BRK_SS_IMM,
|
||||
.fn = kprobe_breakpoint_ss_handler,
|
||||
};
|
||||
|
||||
static int __kprobes
|
||||
@@ -486,7 +463,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
register_kernel_break_hook(&kprobes_break_hook);
|
||||
register_kernel_step_hook(&kprobes_step_hook);
|
||||
register_kernel_break_hook(&kprobes_break_ss_hook);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -63,7 +63,7 @@ static inline void restore_user_access(unsigned long flags)
|
||||
static inline bool
|
||||
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
|
||||
{
|
||||
return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
|
||||
return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xff000000),
|
||||
"Bug: fault blocked by AP register !");
|
||||
}
|
||||
|
||||
|
@@ -33,19 +33,18 @@
|
||||
* respectively NA for All or X for Supervisor and no access for User.
|
||||
* Then we use the APG to say whether accesses are according to Page rules or
|
||||
* "all Supervisor" rules (Access to all)
|
||||
* Therefore, we define 2 APG groups. lsb is _PMD_USER
|
||||
* 0 => Kernel => 01 (all accesses performed according to page definition)
|
||||
* 1 => User => 00 (all accesses performed as supervisor iaw page definition)
|
||||
* 2-15 => Not Used
|
||||
* _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say
|
||||
* "all User" rules, that will lead to NA for all.
|
||||
* Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED
|
||||
* 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
|
||||
* 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
|
||||
* 2 => User => 11 (all accesses performed according as user iaw page definition)
|
||||
* 3 => User+Accessed => 00 (all accesses performed as supervisor iaw page definition) for INIT
|
||||
* => 10 (all accesses performed according to swaped page definition) for KUEP
|
||||
* 4-15 => Not Used
|
||||
*/
|
||||
#define MI_APG_INIT 0x40000000
|
||||
|
||||
/*
|
||||
* 0 => Kernel => 01 (all accesses performed according to page definition)
|
||||
* 1 => User => 10 (all accesses performed according to swaped page definition)
|
||||
* 2-15 => Not Used
|
||||
*/
|
||||
#define MI_APG_KUEP 0x60000000
|
||||
#define MI_APG_INIT 0xdc000000
|
||||
#define MI_APG_KUEP 0xde000000
|
||||
|
||||
/* The effective page number register. When read, contains the information
|
||||
* about the last instruction TLB miss. When MI_RPN is written, bits in
|
||||
@@ -106,25 +105,9 @@
|
||||
#define MD_Ks 0x80000000 /* Should not be set */
|
||||
#define MD_Kp 0x40000000 /* Should always be set */
|
||||
|
||||
/*
|
||||
* All pages' PP data bits are set to either 000 or 011 or 001, which means
|
||||
* respectively RW for Supervisor and no access for User, or RO for
|
||||
* Supervisor and no access for user and NA for ALL.
|
||||
* Then we use the APG to say whether accesses are according to Page rules or
|
||||
* "all Supervisor" rules (Access to all)
|
||||
* Therefore, we define 2 APG groups. lsb is _PMD_USER
|
||||
* 0 => Kernel => 01 (all accesses performed according to page definition)
|
||||
* 1 => User => 00 (all accesses performed as supervisor iaw page definition)
|
||||
* 2-15 => Not Used
|
||||
*/
|
||||
#define MD_APG_INIT 0x40000000
|
||||
|
||||
/*
|
||||
* 0 => No user => 01 (all accesses performed according to page definition)
|
||||
* 1 => User => 10 (all accesses performed according to swaped page definition)
|
||||
* 2-15 => Not Used
|
||||
*/
|
||||
#define MD_APG_KUAP 0x60000000
|
||||
/* See explanation above at the definition of MI_APG_INIT */
|
||||
#define MD_APG_INIT 0xdc000000
|
||||
#define MD_APG_KUAP 0xde000000
|
||||
|
||||
/* The effective page number register. When read, contains the information
|
||||
* about the last instruction TLB miss. When MD_RPN is written, bits in
|
||||
|
@@ -39,9 +39,9 @@
|
||||
* into the TLB.
|
||||
*/
|
||||
#define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */
|
||||
#define _PAGE_SPECIAL 0x0020 /* SW entry */
|
||||
#define _PAGE_ACCESSED 0x0020 /* Copied to L1 APG 1 entry in I/DTLB */
|
||||
#define _PAGE_EXEC 0x0040 /* Copied to PP (bit 21) in ITLB */
|
||||
#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
|
||||
#define _PAGE_SPECIAL 0x0080 /* SW entry */
|
||||
|
||||
#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */
|
||||
#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
|
||||
@@ -59,11 +59,12 @@
|
||||
|
||||
#define _PMD_PRESENT 0x0001
|
||||
#define _PMD_PRESENT_MASK _PMD_PRESENT
|
||||
#define _PMD_BAD 0x0fd0
|
||||
#define _PMD_BAD 0x0f90
|
||||
#define _PMD_PAGE_MASK 0x000c
|
||||
#define _PMD_PAGE_8M 0x000c
|
||||
#define _PMD_PAGE_512K 0x0004
|
||||
#define _PMD_USER 0x0020 /* APG 1 */
|
||||
#define _PMD_ACCESSED 0x0020 /* APG 1 */
|
||||
#define _PMD_USER 0x0040 /* APG 2 */
|
||||
|
||||
#define _PTE_NONE_MASK 0
|
||||
|
||||
|
@@ -6,6 +6,7 @@
|
||||
|
||||
struct device;
|
||||
struct device_node;
|
||||
struct drmem_lmb;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
||||
@@ -61,6 +62,9 @@ static inline int early_cpu_to_node(int cpu)
|
||||
*/
|
||||
return (nid < 0) ? 0 : nid;
|
||||
}
|
||||
|
||||
int of_drconf_to_nid_single(struct drmem_lmb *lmb);
|
||||
|
||||
#else
|
||||
|
||||
static inline int early_cpu_to_node(int cpu) { return 0; }
|
||||
@@ -84,10 +88,12 @@ static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
static inline int of_drconf_to_nid_single(struct drmem_lmb *lmb)
|
||||
{
|
||||
return first_online_node;
|
||||
}
|
||||
|
||||
struct drmem_lmb;
|
||||
int of_drconf_to_nid_single(struct drmem_lmb *lmb);
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
|
||||
extern int find_and_online_cpu_nid(int cpu);
|
||||
|
@@ -178,7 +178,7 @@ do { \
|
||||
* are no aliasing issues.
|
||||
*/
|
||||
#define __put_user_asm_goto(x, addr, label, op) \
|
||||
asm volatile goto( \
|
||||
asm_volatile_goto( \
|
||||
"1: " op "%U1%X1 %0,%1 # put_user\n" \
|
||||
EX_TABLE(1b, %l2) \
|
||||
: \
|
||||
@@ -191,7 +191,7 @@ do { \
|
||||
__put_user_asm_goto(x, ptr, label, "std")
|
||||
#else /* __powerpc64__ */
|
||||
#define __put_user_asm2_goto(x, addr, label) \
|
||||
asm volatile goto( \
|
||||
asm_volatile_goto( \
|
||||
"1: stw%X1 %0, %1\n" \
|
||||
"2: stw%X1 %L0, %L1\n" \
|
||||
EX_TABLE(1b, %l2) \
|
||||
|
@@ -264,8 +264,9 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct pci_io_addr_range *piar;
|
||||
struct rb_node *n;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock(&pci_io_addr_cache_root.piar_lock);
|
||||
spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
|
||||
for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
|
||||
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
|
||||
|
||||
@@ -273,7 +274,7 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
|
||||
(piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
|
||||
&piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
|
||||
}
|
||||
spin_unlock(&pci_io_addr_cache_root.piar_lock);
|
||||
spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -284,11 +284,7 @@ _ENTRY(saved_ksp_limit)
|
||||
|
||||
rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
|
||||
lwz r11, 0(r11) /* Get Linux PTE */
|
||||
#ifdef CONFIG_SWAP
|
||||
li r9, _PAGE_PRESENT | _PAGE_ACCESSED
|
||||
#else
|
||||
li r9, _PAGE_PRESENT
|
||||
#endif
|
||||
andc. r9, r9, r11 /* Check permission */
|
||||
bne 5f
|
||||
|
||||
@@ -369,11 +365,7 @@ _ENTRY(saved_ksp_limit)
|
||||
|
||||
rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
|
||||
lwz r11, 0(r11) /* Get Linux PTE */
|
||||
#ifdef CONFIG_SWAP
|
||||
li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
||||
#else
|
||||
li r9, _PAGE_PRESENT | _PAGE_EXEC
|
||||
#endif
|
||||
andc. r9, r9, r11 /* Check permission */
|
||||
bne 5f
|
||||
|
||||
|
@@ -202,9 +202,7 @@ SystemCall:
|
||||
|
||||
InstructionTLBMiss:
|
||||
mtspr SPRN_SPRG_SCRATCH0, r10
|
||||
#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
|
||||
mtspr SPRN_SPRG_SCRATCH1, r11
|
||||
#endif
|
||||
|
||||
/* If we are faulting a kernel address, we have to use the
|
||||
* kernel page tables.
|
||||
@@ -224,25 +222,13 @@ InstructionTLBMiss:
|
||||
3:
|
||||
mtcr r11
|
||||
#endif
|
||||
#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
|
||||
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
|
||||
mtspr SPRN_MD_TWC, r11
|
||||
#else
|
||||
lwz r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
|
||||
mtspr SPRN_MI_TWC, r10 /* Set segment attributes */
|
||||
mtspr SPRN_MD_TWC, r10
|
||||
#endif
|
||||
mfspr r10, SPRN_MD_TWC
|
||||
lwz r10, 0(r10) /* Get the pte */
|
||||
#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
|
||||
rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
|
||||
rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
|
||||
mtspr SPRN_MI_TWC, r11
|
||||
#endif
|
||||
#ifdef CONFIG_SWAP
|
||||
rlwinm r11, r10, 32-5, _PAGE_PRESENT
|
||||
and r11, r11, r10
|
||||
rlwimi r10, r11, 0, _PAGE_PRESENT
|
||||
#endif
|
||||
/* The Linux PTE won't go exactly into the MMU TLB.
|
||||
* Software indicator bits 20 and 23 must be clear.
|
||||
* Software indicator bits 22, 24, 25, 26, and 27 must be
|
||||
@@ -256,9 +242,7 @@ InstructionTLBMiss:
|
||||
|
||||
/* Restore registers */
|
||||
0: mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
|
||||
mfspr r11, SPRN_SPRG_SCRATCH1
|
||||
#endif
|
||||
rfi
|
||||
patch_site 0b, patch__itlbmiss_exit_1
|
||||
|
||||
@@ -268,9 +252,7 @@ InstructionTLBMiss:
|
||||
addi r10, r10, 1
|
||||
stw r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
|
||||
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||
#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
|
||||
mfspr r11, SPRN_SPRG_SCRATCH1
|
||||
#endif
|
||||
rfi
|
||||
#endif
|
||||
|
||||
@@ -297,30 +279,16 @@ DataStoreTLBMiss:
|
||||
mfspr r10, SPRN_MD_TWC
|
||||
lwz r10, 0(r10) /* Get the pte */
|
||||
|
||||
/* Insert the Guarded flag into the TWC from the Linux PTE.
|
||||
/* Insert Guarded and Accessed flags into the TWC from the Linux PTE.
|
||||
* It is bit 27 of both the Linux PTE and the TWC (at least
|
||||
* I got that right :-). It will be better when we can put
|
||||
* this into the Linux pgd/pmd and load it in the operation
|
||||
* above.
|
||||
*/
|
||||
rlwimi r11, r10, 0, _PAGE_GUARDED
|
||||
rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
|
||||
rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
|
||||
mtspr SPRN_MD_TWC, r11
|
||||
|
||||
/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
|
||||
* We also need to know if the insn is a load/store, so:
|
||||
* Clear _PAGE_PRESENT and load that which will
|
||||
* trap into DTLB Error with store bit set accordinly.
|
||||
*/
|
||||
/* PRESENT=0x1, ACCESSED=0x20
|
||||
* r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
|
||||
* r10 = (r10 & ~PRESENT) | r11;
|
||||
*/
|
||||
#ifdef CONFIG_SWAP
|
||||
rlwinm r11, r10, 32-5, _PAGE_PRESENT
|
||||
and r11, r11, r10
|
||||
rlwimi r10, r11, 0, _PAGE_PRESENT
|
||||
#endif
|
||||
/* The Linux PTE won't go exactly into the MMU TLB.
|
||||
* Software indicator bits 24, 25, 26, and 27 must be
|
||||
* set. All other Linux PTE bits control the behavior
|
||||
@@ -711,7 +679,7 @@ initial_mmu:
|
||||
li r9, 4 /* up to 4 pages of 8M */
|
||||
mtctr r9
|
||||
lis r9, KERNELBASE@h /* Create vaddr for TLB */
|
||||
li r10, MI_PS8MEG | MI_SVALID /* Set 8M byte page */
|
||||
li r10, MI_PS8MEG | _PMD_ACCESSED | MI_SVALID
|
||||
li r11, MI_BOOTINIT /* Create RPN for address 0 */
|
||||
1:
|
||||
mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
|
||||
@@ -775,7 +743,7 @@ _GLOBAL(mmu_pin_tlb)
|
||||
#ifdef CONFIG_PIN_TLB_TEXT
|
||||
LOAD_REG_IMMEDIATE(r5, 28 << 8)
|
||||
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
|
||||
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG)
|
||||
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
|
||||
LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
|
||||
LOAD_REG_ADDR(r9, _sinittext)
|
||||
li r0, 4
|
||||
@@ -797,7 +765,7 @@ _GLOBAL(mmu_pin_tlb)
|
||||
LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
|
||||
#ifdef CONFIG_PIN_TLB_DATA
|
||||
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
|
||||
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG)
|
||||
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
|
||||
#ifdef CONFIG_PIN_TLB_IMMR
|
||||
li r0, 3
|
||||
#else
|
||||
@@ -834,7 +802,7 @@ _GLOBAL(mmu_pin_tlb)
|
||||
#endif
|
||||
#ifdef CONFIG_PIN_TLB_IMMR
|
||||
LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
|
||||
LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED)
|
||||
LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED | _PMD_ACCESSED)
|
||||
mfspr r8, SPRN_IMMR
|
||||
rlwinm r8, r8, 0, 0xfff80000
|
||||
ori r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
|
||||
|
@@ -457,11 +457,7 @@ InstructionTLBMiss:
|
||||
cmplw 0,r1,r3
|
||||
#endif
|
||||
mfspr r2, SPRN_SPRG_PGDIR
|
||||
#ifdef CONFIG_SWAP
|
||||
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
|
||||
#else
|
||||
li r1,_PAGE_PRESENT | _PAGE_EXEC
|
||||
#endif
|
||||
#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
@@ -523,11 +519,7 @@ DataLoadTLBMiss:
|
||||
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||
cmplw 0,r1,r3
|
||||
mfspr r2, SPRN_SPRG_PGDIR
|
||||
#ifdef CONFIG_SWAP
|
||||
li r1, _PAGE_PRESENT | _PAGE_ACCESSED
|
||||
#else
|
||||
li r1, _PAGE_PRESENT
|
||||
#endif
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
@@ -603,11 +595,7 @@ DataStoreTLBMiss:
|
||||
lis r1, TASK_SIZE@h /* check if kernel address */
|
||||
cmplw 0,r1,r3
|
||||
mfspr r2, SPRN_SPRG_PGDIR
|
||||
#ifdef CONFIG_SWAP
|
||||
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
|
||||
#else
|
||||
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
|
||||
#endif
|
||||
bgt- 112f
|
||||
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||
|
@@ -1393,13 +1393,14 @@ static void add_cpu_to_masks(int cpu)
|
||||
/* Activate a secondary processor. */
|
||||
void start_secondary(void *unused)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned int cpu = raw_smp_processor_id();
|
||||
|
||||
mmgrab(&init_mm);
|
||||
current->active_mm = &init_mm;
|
||||
|
||||
smp_store_cpu_info(cpu);
|
||||
set_dec(tb_ticks_per_jiffy);
|
||||
rcu_cpu_starting(cpu);
|
||||
preempt_disable();
|
||||
cpu_callin_map[cpu] = 1;
|
||||
|
||||
|
@@ -476,7 +476,7 @@ do { \
|
||||
do { \
|
||||
long __kr_err; \
|
||||
\
|
||||
__put_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
|
||||
__put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
|
||||
if (unlikely(__kr_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2013 Linaro Limited
|
||||
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
|
||||
|
@@ -35,12 +35,17 @@ ENTRY(_start)
|
||||
.word 0
|
||||
#endif
|
||||
.balign 8
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
/* Image load offset (0MB) from start of RAM for M-mode */
|
||||
.dword 0
|
||||
#else
|
||||
#if __riscv_xlen == 64
|
||||
/* Image load offset(2MB) from start of RAM */
|
||||
.dword 0x200000
|
||||
#else
|
||||
/* Image load offset(4MB) from start of RAM */
|
||||
.dword 0x400000
|
||||
#endif
|
||||
#endif
|
||||
/* Effective size of kernel image */
|
||||
.dword _end - _start
|
||||
|
1
arch/riscv/kernel/vdso/.gitignore
vendored
1
arch/riscv/kernel/vdso/.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
vdso.lds
|
||||
*.tmp
|
||||
vdso-syms.S
|
||||
|
@@ -43,19 +43,14 @@ $(obj)/vdso.o: $(obj)/vdso.so
|
||||
SYSCFLAGS_vdso.so.dbg = $(c_flags)
|
||||
$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
|
||||
$(call if_changed,vdsold)
|
||||
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
|
||||
-Wl,--build-id -Wl,--hash-style=both
|
||||
|
||||
# We also create a special relocatable object that should mirror the symbol
|
||||
# table and layout of the linked DSO. With ld --just-symbols we can then
|
||||
# refer to these symbols in the kernel code rather than hand-coded addresses.
|
||||
|
||||
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
|
||||
-Wl,--build-id=sha1 -Wl,--hash-style=both
|
||||
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
|
||||
$(call if_changed,vdsold)
|
||||
|
||||
LDFLAGS_vdso-syms.o := -r --just-symbols
|
||||
$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
|
||||
$(call if_changed,ld)
|
||||
$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
|
||||
$(call if_changed,so2s)
|
||||
|
||||
# strip rule for the .so file
|
||||
$(obj)/%.so: OBJCOPYFLAGS := -S
|
||||
@@ -73,6 +68,11 @@ quiet_cmd_vdsold = VDSOLD $@
|
||||
$(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
|
||||
rm $@.tmp
|
||||
|
||||
# Extracts symbol offsets from the VDSO, converting them into an assembly file
|
||||
# that contains the same symbols at the same offsets.
|
||||
quiet_cmd_so2s = SO2S $@
|
||||
cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
|
||||
|
||||
# install commands for the unstripped file
|
||||
quiet_cmd_vdso_install = INSTALL $@
|
||||
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
|
||||
|
6
arch/riscv/kernel/vdso/so2s.sh
Executable file
6
arch/riscv/kernel/vdso/so2s.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0+
|
||||
# Copyright 2020 Palmer Dabbelt <palmerdabbelt@google.com>
|
||||
|
||||
sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_4.15\)*!.global \2\n.set \2,0x\1!' \
|
||||
| grep '^\.'
|
@@ -86,6 +86,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
||||
pmd_t *pmd, *pmd_k;
|
||||
pte_t *pte_k;
|
||||
int index;
|
||||
unsigned long pfn;
|
||||
|
||||
/* User mode accesses just cause a SIGSEGV */
|
||||
if (user_mode(regs))
|
||||
@@ -100,7 +101,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
|
||||
* of a task switch.
|
||||
*/
|
||||
index = pgd_index(addr);
|
||||
pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
|
||||
pfn = csr_read(CSR_SATP) & SATP_PPN;
|
||||
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
|
||||
pgd_k = init_mm.pgd + index;
|
||||
|
||||
if (!pgd_present(*pgd_k)) {
|
||||
|
@@ -154,9 +154,8 @@ disable:
|
||||
|
||||
void __init setup_bootmem(void)
|
||||
{
|
||||
phys_addr_t mem_size = 0;
|
||||
phys_addr_t total_mem = 0;
|
||||
phys_addr_t mem_start, start, end = 0;
|
||||
phys_addr_t mem_start = 0;
|
||||
phys_addr_t start, end = 0;
|
||||
phys_addr_t vmlinux_end = __pa_symbol(&_end);
|
||||
phys_addr_t vmlinux_start = __pa_symbol(&_start);
|
||||
u64 i;
|
||||
@@ -164,21 +163,18 @@ void __init setup_bootmem(void)
|
||||
/* Find the memory region containing the kernel */
|
||||
for_each_mem_range(i, &start, &end) {
|
||||
phys_addr_t size = end - start;
|
||||
if (!total_mem)
|
||||
if (!mem_start)
|
||||
mem_start = start;
|
||||
if (start <= vmlinux_start && vmlinux_end <= end)
|
||||
BUG_ON(size == 0);
|
||||
total_mem = total_mem + size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove memblock from the end of usable area to the
|
||||
* end of region
|
||||
* The maximal physical memory size is -PAGE_OFFSET.
|
||||
* Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
|
||||
* as it is unusable by kernel.
|
||||
*/
|
||||
mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
|
||||
if (mem_start + mem_size < end)
|
||||
memblock_remove(mem_start + mem_size,
|
||||
end - mem_start - mem_size);
|
||||
memblock_enforce_memory_limit(mem_start - PAGE_OFFSET);
|
||||
|
||||
/* Reserve from the start of the kernel to the end of the kernel */
|
||||
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
|
||||
@@ -297,6 +293,7 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
|
||||
#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
|
||||
#endif
|
||||
pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
|
||||
pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
|
||||
|
||||
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
|
||||
{
|
||||
@@ -494,6 +491,18 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
|
||||
load_pa + (va - PAGE_OFFSET),
|
||||
map_size, PAGE_KERNEL_EXEC);
|
||||
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
/* Setup early PMD for DTB */
|
||||
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
|
||||
(uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
|
||||
/* Create two consecutive PMD mappings for FDT early scan */
|
||||
pa = dtb_pa & ~(PMD_SIZE - 1);
|
||||
create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
|
||||
pa, PMD_SIZE, PAGE_KERNEL);
|
||||
create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
|
||||
pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
|
||||
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
|
||||
#else
|
||||
/* Create two consecutive PGD mappings for FDT early scan */
|
||||
pa = dtb_pa & ~(PGDIR_SIZE - 1);
|
||||
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
|
||||
@@ -501,6 +510,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
|
||||
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
|
||||
pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
|
||||
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
|
||||
#endif
|
||||
dtb_early_pa = dtb_pa;
|
||||
|
||||
/*
|
||||
|
@@ -93,9 +93,10 @@ CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA_DEBUG=y
|
||||
CONFIG_CMA_DEBUGFS=y
|
||||
CONFIG_CMA_AREAS=7
|
||||
CONFIG_MEM_SOFT_DIRTY=y
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZSMALLOC=m
|
||||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
|
||||
CONFIG_IDLE_PAGE_TRACKING=y
|
||||
@@ -378,7 +379,6 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_CGROUP_NET_PRIO=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_NET_PKTGEN=m
|
||||
# CONFIG_NET_DROP_MONITOR is not set
|
||||
CONFIG_PCI=y
|
||||
# CONFIG_PCIEASPM is not set
|
||||
CONFIG_PCI_DEBUG=y
|
||||
@@ -386,7 +386,7 @@ CONFIG_HOTPLUG_PCI=y
|
||||
CONFIG_HOTPLUG_PCI_S390=y
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_CONNECTOR=y
|
||||
CONFIG_ZRAM=m
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_DRBD=m
|
||||
@@ -689,6 +689,7 @@ CONFIG_CRYPTO_TEST=m
|
||||
CONFIG_CRYPTO_DH=m
|
||||
CONFIG_CRYPTO_ECDH=m
|
||||
CONFIG_CRYPTO_ECRDSA=m
|
||||
CONFIG_CRYPTO_SM2=m
|
||||
CONFIG_CRYPTO_CURVE25519=m
|
||||
CONFIG_CRYPTO_GCM=y
|
||||
CONFIG_CRYPTO_CHACHA20POLY1305=m
|
||||
@@ -709,7 +710,6 @@ CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_SM3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
@@ -753,6 +753,7 @@ CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=m
|
||||
CONFIG_CORDIC=m
|
||||
CONFIG_CRC32_SELFTEST=y
|
||||
CONFIG_CRC4=m
|
||||
@@ -829,6 +830,7 @@ CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
|
||||
CONFIG_FAULT_INJECTION=y
|
||||
CONFIG_FAILSLAB=y
|
||||
CONFIG_FAIL_PAGE_ALLOC=y
|
||||
CONFIG_FAULT_INJECTION_USERCOPY=y
|
||||
CONFIG_FAIL_MAKE_REQUEST=y
|
||||
CONFIG_FAIL_IO_TIMEOUT=y
|
||||
CONFIG_FAIL_FUTEX=y
|
||||
|
@@ -87,9 +87,10 @@ CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CLEANCACHE=y
|
||||
CONFIG_FRONTSWAP=y
|
||||
CONFIG_CMA_AREAS=7
|
||||
CONFIG_MEM_SOFT_DIRTY=y
|
||||
CONFIG_ZSWAP=y
|
||||
CONFIG_ZSMALLOC=m
|
||||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_ZSMALLOC_STAT=y
|
||||
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
|
||||
CONFIG_IDLE_PAGE_TRACKING=y
|
||||
@@ -371,7 +372,6 @@ CONFIG_NETLINK_DIAG=m
|
||||
CONFIG_CGROUP_NET_PRIO=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_NET_PKTGEN=m
|
||||
# CONFIG_NET_DROP_MONITOR is not set
|
||||
CONFIG_PCI=y
|
||||
# CONFIG_PCIEASPM is not set
|
||||
CONFIG_HOTPLUG_PCI=y
|
||||
@@ -379,7 +379,7 @@ CONFIG_HOTPLUG_PCI_S390=y
|
||||
CONFIG_UEVENT_HELPER=y
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_CONNECTOR=y
|
||||
CONFIG_ZRAM=m
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_DRBD=m
|
||||
@@ -680,6 +680,7 @@ CONFIG_CRYPTO_TEST=m
|
||||
CONFIG_CRYPTO_DH=m
|
||||
CONFIG_CRYPTO_ECDH=m
|
||||
CONFIG_CRYPTO_ECRDSA=m
|
||||
CONFIG_CRYPTO_SM2=m
|
||||
CONFIG_CRYPTO_CURVE25519=m
|
||||
CONFIG_CRYPTO_GCM=y
|
||||
CONFIG_CRYPTO_CHACHA20POLY1305=m
|
||||
@@ -701,7 +702,6 @@ CONFIG_CRYPTO_RMD160=m
|
||||
CONFIG_CRYPTO_RMD256=m
|
||||
CONFIG_CRYPTO_RMD320=m
|
||||
CONFIG_CRYPTO_SHA3=m
|
||||
CONFIG_CRYPTO_SM3=m
|
||||
CONFIG_CRYPTO_TGR192=m
|
||||
CONFIG_CRYPTO_WP512=m
|
||||
CONFIG_CRYPTO_AES_TI=m
|
||||
@@ -745,6 +745,7 @@ CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=m
|
||||
CONFIG_CORDIC=m
|
||||
CONFIG_PRIME_NUMBERS=m
|
||||
CONFIG_CRC4=m
|
||||
|
@@ -17,11 +17,11 @@ CONFIG_HZ_100=y
|
||||
# CONFIG_CHSC_SCH is not set
|
||||
# CONFIG_SCM_BUS is not set
|
||||
CONFIG_CRASH_DUMP=y
|
||||
# CONFIG_SECCOMP is not set
|
||||
# CONFIG_PFAULT is not set
|
||||
# CONFIG_S390_HYPFS_FS is not set
|
||||
# CONFIG_VIRTUALIZATION is not set
|
||||
# CONFIG_S390_GUEST is not set
|
||||
# CONFIG_SECCOMP is not set
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
CONFIG_IBM_PARTITION=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
|
@@ -692,16 +692,6 @@ static inline int pud_large(pud_t pud)
|
||||
return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
|
||||
}
|
||||
|
||||
static inline unsigned long pud_pfn(pud_t pud)
|
||||
{
|
||||
unsigned long origin_mask;
|
||||
|
||||
origin_mask = _REGION_ENTRY_ORIGIN;
|
||||
if (pud_large(pud))
|
||||
origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
|
||||
return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#define pmd_leaf pmd_large
|
||||
static inline int pmd_large(pmd_t pmd)
|
||||
{
|
||||
@@ -747,16 +737,6 @@ static inline int pmd_none(pmd_t pmd)
|
||||
return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
unsigned long origin_mask;
|
||||
|
||||
origin_mask = _SEGMENT_ENTRY_ORIGIN;
|
||||
if (pmd_large(pmd))
|
||||
origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
|
||||
return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#define pmd_write pmd_write
|
||||
static inline int pmd_write(pmd_t pmd)
|
||||
{
|
||||
@@ -1238,11 +1218,39 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
|
||||
#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
|
||||
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
|
||||
|
||||
#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
|
||||
#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
|
||||
#define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
|
||||
#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
|
||||
|
||||
static inline unsigned long pmd_deref(pmd_t pmd)
|
||||
{
|
||||
unsigned long origin_mask;
|
||||
|
||||
origin_mask = _SEGMENT_ENTRY_ORIGIN;
|
||||
if (pmd_large(pmd))
|
||||
origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
|
||||
return pmd_val(pmd) & origin_mask;
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
return pmd_deref(pmd) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long pud_deref(pud_t pud)
|
||||
{
|
||||
unsigned long origin_mask;
|
||||
|
||||
origin_mask = _REGION_ENTRY_ORIGIN;
|
||||
if (pud_large(pud))
|
||||
origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
|
||||
return pud_val(pud) & origin_mask;
|
||||
}
|
||||
|
||||
static inline unsigned long pud_pfn(pud_t pud)
|
||||
{
|
||||
return pud_deref(pud) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/*
|
||||
* The pgd_offset function *always* adds the index for the top-level
|
||||
* region/segment table. This is done to get a sequence like the
|
||||
|
@@ -61,14 +61,6 @@ int main(void)
|
||||
BLANK();
|
||||
OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
|
||||
BLANK();
|
||||
/* constants used by the vdso */
|
||||
DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
|
||||
DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
|
||||
DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
|
||||
DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
|
||||
DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
|
||||
DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
|
||||
BLANK();
|
||||
/* idle data offsets */
|
||||
OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
|
||||
OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit);
|
||||
|
@@ -855,13 +855,14 @@ void __init smp_detect_cpus(void)
|
||||
|
||||
static void smp_init_secondary(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int cpu = raw_smp_processor_id();
|
||||
|
||||
S390_lowcore.last_update_clock = get_tod_clock();
|
||||
restore_access_regs(S390_lowcore.access_regs_save_area);
|
||||
set_cpu_flag(CIF_ASCE_PRIMARY);
|
||||
set_cpu_flag(CIF_ASCE_SECONDARY);
|
||||
cpu_init();
|
||||
rcu_cpu_starting(cpu);
|
||||
preempt_disable();
|
||||
init_cpu_timer();
|
||||
vtime_init();
|
||||
|
@@ -101,6 +101,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
/* the PCI function will be scanned once function 0 appears */
|
||||
if (!zdev->zbus->bus)
|
||||
break;
|
||||
|
||||
pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
|
||||
if (!pdev)
|
||||
break;
|
||||
|
@@ -290,6 +290,9 @@ static void __init uv_stringify(int len, char *to, char *from)
|
||||
{
|
||||
/* Relies on 'to' being NULL chars so result will be NULL terminated */
|
||||
strncpy(to, from, len-1);
|
||||
|
||||
/* Trim trailing spaces */
|
||||
(void)strim(to);
|
||||
}
|
||||
|
||||
/* Find UV arch type entry in UVsystab */
|
||||
@@ -366,7 +369,7 @@ static int __init early_get_arch_type(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init uv_set_system_type(char *_oem_id)
|
||||
static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
|
||||
{
|
||||
/* Save OEM_ID passed from ACPI MADT */
|
||||
uv_stringify(sizeof(oem_id), oem_id, _oem_id);
|
||||
@@ -386,13 +389,23 @@ static int __init uv_set_system_type(char *_oem_id)
|
||||
/* (Not hubless), not a UV */
|
||||
return 0;
|
||||
|
||||
/* Is UV hubless system */
|
||||
uv_hubless_system = 0x01;
|
||||
|
||||
/* UV5 Hubless */
|
||||
if (strncmp(uv_archtype, "NSGI5", 5) == 0)
|
||||
uv_hubless_system |= 0x20;
|
||||
|
||||
/* UV4 Hubless: CH */
|
||||
if (strncmp(uv_archtype, "NSGI4", 5) == 0)
|
||||
uv_hubless_system = 0x11;
|
||||
else if (strncmp(uv_archtype, "NSGI4", 5) == 0)
|
||||
uv_hubless_system |= 0x10;
|
||||
|
||||
/* UV3 Hubless: UV300/MC990X w/o hub */
|
||||
else
|
||||
uv_hubless_system = 0x9;
|
||||
uv_hubless_system |= 0x8;
|
||||
|
||||
/* Copy APIC type */
|
||||
uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
|
||||
|
||||
pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n",
|
||||
oem_id, oem_table_id, uv_system_type, uv_hubless_system);
|
||||
@@ -456,7 +469,7 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
|
||||
uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0;
|
||||
|
||||
/* If not UV, return. */
|
||||
if (likely(uv_set_system_type(_oem_id) == 0))
|
||||
if (uv_set_system_type(_oem_id, _oem_table_id) == 0)
|
||||
return 0;
|
||||
|
||||
/* Save and Decode OEM Table ID */
|
||||
|
@@ -1254,6 +1254,14 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_spec_ib_user_controlled(void)
|
||||
{
|
||||
return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
|
||||
spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
|
||||
}
|
||||
|
||||
static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
{
|
||||
switch (ctrl) {
|
||||
@@ -1261,16 +1269,26 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Indirect branch speculation is always disabled in strict
|
||||
* mode. It can neither be enabled if it was force-disabled
|
||||
* by a previous prctl call.
|
||||
* With strict mode for both IBPB and STIBP, the instruction
|
||||
* code paths avoid checking this task flag and instead,
|
||||
* unconditionally run the instruction. However, STIBP and IBPB
|
||||
* are independent and either can be set to conditionally
|
||||
* enabled regardless of the mode of the other.
|
||||
*
|
||||
* If either is set to conditional, allow the task flag to be
|
||||
* updated, unless it was force-disabled by a previous prctl
|
||||
* call. Currently, this is possible on an AMD CPU which has the
|
||||
* feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
|
||||
* kernel is booted with 'spectre_v2_user=seccomp', then
|
||||
* spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
|
||||
* spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
|
||||
*/
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
|
||||
if (!is_spec_ib_user_controlled() ||
|
||||
task_spec_ib_force_disable(task))
|
||||
return -EPERM;
|
||||
|
||||
task_clear_spec_ib_disable(task);
|
||||
task_update_spec_tif(task);
|
||||
break;
|
||||
@@ -1283,10 +1301,10 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
|
||||
return -EPERM;
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
|
||||
if (!is_spec_ib_user_controlled())
|
||||
return 0;
|
||||
|
||||
task_set_spec_ib_disable(task);
|
||||
if (ctrl == PR_SPEC_FORCE_DISABLE)
|
||||
task_set_spec_ib_force_disable(task);
|
||||
@@ -1351,20 +1369,17 @@ static int ib_prctl_get(struct task_struct *task)
|
||||
if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
|
||||
return PR_SPEC_ENABLE;
|
||||
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
return PR_SPEC_DISABLE;
|
||||
else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
|
||||
spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
|
||||
else if (is_spec_ib_user_controlled()) {
|
||||
if (task_spec_ib_force_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
||||
if (task_spec_ib_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
||||
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
||||
} else
|
||||
} else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
|
||||
return PR_SPEC_DISABLE;
|
||||
else
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
}
|
||||
|
||||
|
@@ -16,8 +16,6 @@
|
||||
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
|
||||
*/
|
||||
|
||||
.weak memcpy
|
||||
|
||||
/*
|
||||
* memcpy - Copy a memory block.
|
||||
*
|
||||
@@ -30,7 +28,7 @@
|
||||
* rax original destination
|
||||
*/
|
||||
SYM_FUNC_START_ALIAS(__memcpy)
|
||||
SYM_FUNC_START_LOCAL(memcpy)
|
||||
SYM_FUNC_START_WEAK(memcpy)
|
||||
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
|
||||
"jmp memcpy_erms", X86_FEATURE_ERMS
|
||||
|
||||
|
@@ -24,9 +24,7 @@
|
||||
* Output:
|
||||
* rax: dest
|
||||
*/
|
||||
.weak memmove
|
||||
|
||||
SYM_FUNC_START_ALIAS(memmove)
|
||||
SYM_FUNC_START_WEAK(memmove)
|
||||
SYM_FUNC_START(__memmove)
|
||||
|
||||
mov %rdi, %rax
|
||||
|
@@ -6,8 +6,6 @@
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/export.h>
|
||||
|
||||
.weak memset
|
||||
|
||||
/*
|
||||
* ISO C memset - set a memory block to a byte value. This function uses fast
|
||||
* string to get better performance than the original function. The code is
|
||||
@@ -19,7 +17,7 @@
|
||||
*
|
||||
* rax original destination
|
||||
*/
|
||||
SYM_FUNC_START_ALIAS(memset)
|
||||
SYM_FUNC_START_WEAK(memset)
|
||||
SYM_FUNC_START(__memset)
|
||||
/*
|
||||
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
|
||||
|
@@ -47,7 +47,7 @@ struct nullb_device {
|
||||
unsigned int nr_zones_closed;
|
||||
struct blk_zone *zones;
|
||||
sector_t zone_size_sects;
|
||||
spinlock_t zone_dev_lock;
|
||||
spinlock_t zone_lock;
|
||||
unsigned long *zone_locks;
|
||||
|
||||
unsigned long size; /* device size in MB */
|
||||
|
@@ -46,12 +46,21 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
|
||||
if (!dev->zones)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&dev->zone_dev_lock);
|
||||
/*
|
||||
* With memory backing, the zone_lock spinlock needs to be temporarily
|
||||
* released to avoid scheduling in atomic context. To guarantee zone
|
||||
* information protection, use a bitmap to lock zones with
|
||||
* wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing
|
||||
* implies that the queue is marked with BLK_MQ_F_BLOCKING.
|
||||
*/
|
||||
spin_lock_init(&dev->zone_lock);
|
||||
if (dev->memory_backed) {
|
||||
dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
|
||||
if (!dev->zone_locks) {
|
||||
kvfree(dev->zones);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev->zone_nr_conv >= dev->nr_zones) {
|
||||
dev->zone_nr_conv = dev->nr_zones - 1;
|
||||
@@ -137,11 +146,16 @@ void null_free_zoned_dev(struct nullb_device *dev)
|
||||
|
||||
static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
|
||||
{
|
||||
if (dev->memory_backed)
|
||||
wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
|
||||
spin_lock_irq(&dev->zone_lock);
|
||||
}
|
||||
|
||||
static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
|
||||
{
|
||||
spin_unlock_irq(&dev->zone_lock);
|
||||
|
||||
if (dev->memory_backed)
|
||||
clear_and_wake_up_bit(zno, dev->zone_locks);
|
||||
}
|
||||
|
||||
@@ -322,7 +336,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
|
||||
|
||||
null_lock_zone(dev, zno);
|
||||
spin_lock(&dev->zone_dev_lock);
|
||||
|
||||
switch (zone->cond) {
|
||||
case BLK_ZONE_COND_FULL:
|
||||
@@ -375,9 +388,17 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
|
||||
zone->cond = BLK_ZONE_COND_IMP_OPEN;
|
||||
|
||||
spin_unlock(&dev->zone_dev_lock);
|
||||
/*
|
||||
* Memory backing allocation may sleep: release the zone_lock spinlock
|
||||
* to avoid scheduling in atomic context. Zone operation atomicity is
|
||||
* still guaranteed through the zone_locks bitmap.
|
||||
*/
|
||||
if (dev->memory_backed)
|
||||
spin_unlock_irq(&dev->zone_lock);
|
||||
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
|
||||
spin_lock(&dev->zone_dev_lock);
|
||||
if (dev->memory_backed)
|
||||
spin_lock_irq(&dev->zone_lock);
|
||||
|
||||
if (ret != BLK_STS_OK)
|
||||
goto unlock;
|
||||
|
||||
@@ -392,7 +413,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
|
||||
ret = BLK_STS_OK;
|
||||
|
||||
unlock:
|
||||
spin_unlock(&dev->zone_dev_lock);
|
||||
null_unlock_zone(dev, zno);
|
||||
|
||||
return ret;
|
||||
@@ -516,9 +536,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
|
||||
null_lock_zone(dev, i);
|
||||
zone = &dev->zones[i];
|
||||
if (zone->cond != BLK_ZONE_COND_EMPTY) {
|
||||
spin_lock(&dev->zone_dev_lock);
|
||||
null_reset_zone(dev, zone);
|
||||
spin_unlock(&dev->zone_dev_lock);
|
||||
trace_nullb_zone_op(cmd, i, zone->cond);
|
||||
}
|
||||
null_unlock_zone(dev, i);
|
||||
@@ -530,7 +548,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
|
||||
zone = &dev->zones[zone_no];
|
||||
|
||||
null_lock_zone(dev, zone_no);
|
||||
spin_lock(&dev->zone_dev_lock);
|
||||
|
||||
switch (op) {
|
||||
case REQ_OP_ZONE_RESET:
|
||||
@@ -550,8 +567,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock(&dev->zone_dev_lock);
|
||||
|
||||
if (ret == BLK_STS_OK)
|
||||
trace_nullb_zone_op(cmd, zone_no, zone->cond);
|
||||
|
||||
|
@@ -41,6 +41,11 @@ int tpm_read_log_efi(struct tpm_chip *chip)
|
||||
log_size = log_tbl->size;
|
||||
memunmap(log_tbl);
|
||||
|
||||
if (!log_size) {
|
||||
pr_warn("UEFI TPM log area empty\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
|
||||
MEMREMAP_WB);
|
||||
if (!log_tbl) {
|
||||
|
@@ -27,6 +27,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/dmi.h>
|
||||
#include "tpm.h"
|
||||
#include "tpm_tis_core.h"
|
||||
|
||||
@@ -49,8 +50,8 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
|
||||
return container_of(data, struct tpm_tis_tcg_phy, priv);
|
||||
}
|
||||
|
||||
static bool interrupts = true;
|
||||
module_param(interrupts, bool, 0444);
|
||||
static int interrupts = -1;
|
||||
module_param(interrupts, int, 0444);
|
||||
MODULE_PARM_DESC(interrupts, "Enable interrupts");
|
||||
|
||||
static bool itpm;
|
||||
@@ -63,6 +64,28 @@ module_param(force, bool, 0444);
|
||||
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
|
||||
#endif
|
||||
|
||||
static int tpm_tis_disable_irq(const struct dmi_system_id *d)
|
||||
{
|
||||
if (interrupts == -1) {
|
||||
pr_notice("tpm_tis: %s detected: disabling interrupts.\n", d->ident);
|
||||
interrupts = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id tpm_tis_dmi_table[] = {
|
||||
{
|
||||
.callback = tpm_tis_disable_irq,
|
||||
.ident = "ThinkPad T490s",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
|
||||
static int has_hid(struct acpi_device *dev, const char *hid)
|
||||
{
|
||||
@@ -192,6 +215,8 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
|
||||
int irq = -1;
|
||||
int rc;
|
||||
|
||||
dmi_check_system(tpm_tis_dmi_table);
|
||||
|
||||
rc = check_acpi_tpm2(dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@@ -80,6 +80,7 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
|
||||
|
||||
#define AMDGPU_RESUME_MS 2000
|
||||
|
||||
@@ -1805,7 +1806,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
|
||||
chip_name = "arcturus";
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
chip_name = "renoir";
|
||||
else
|
||||
chip_name = "green_sardine";
|
||||
break;
|
||||
case CHIP_NAVI10:
|
||||
chip_name = "navi10";
|
||||
|
@@ -2524,6 +2524,7 @@ int parse_ta_bin_descriptor(struct psp_context *psp,
|
||||
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
|
||||
psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
|
||||
psp->asd_start_addr = ucode_start_addr;
|
||||
psp->asd_fw = psp->ta_fw;
|
||||
break;
|
||||
case TA_FW_TYPE_PSP_XGMI:
|
||||
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
|
||||
|
@@ -39,6 +39,7 @@
|
||||
#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
|
||||
#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
|
||||
#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
|
||||
#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
|
||||
#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
|
||||
#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
|
||||
#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
|
||||
@@ -50,6 +51,7 @@ MODULE_FIRMWARE(FIRMWARE_PICASSO);
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN2);
|
||||
MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
|
||||
MODULE_FIRMWARE(FIRMWARE_RENOIR);
|
||||
MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVI10);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVI14);
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVI12);
|
||||
@@ -89,7 +91,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
adev->vcn.indirect_sram = true;
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
fw_name = FIRMWARE_RENOIR;
|
||||
else
|
||||
fw_name = FIRMWARE_GREEN_SARDINE;
|
||||
|
||||
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
|
||||
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
|
||||
adev->vcn.indirect_sram = true;
|
||||
|
@@ -1336,11 +1336,13 @@ cik_asic_reset_method(struct amdgpu_device *adev)
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_BONAIRE:
|
||||
case CHIP_HAWAII:
|
||||
/* disable baco reset until it works */
|
||||
/* smu7_asic_get_baco_capability(adev, &baco_reset); */
|
||||
baco_reset = false;
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
baco_reset = cik_asic_supports_baco(adev);
|
||||
break;
|
||||
default:
|
||||
baco_reset = false;
|
||||
break;
|
||||
|
@@ -1071,22 +1071,19 @@ static int cik_sdma_soft_reset(void *handle)
|
||||
{
|
||||
u32 srbm_soft_reset = 0;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
u32 tmp = RREG32(mmSRBM_STATUS2);
|
||||
u32 tmp;
|
||||
|
||||
if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
|
||||
/* sdma0 */
|
||||
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
|
||||
}
|
||||
if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
|
||||
|
||||
/* sdma1 */
|
||||
tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
|
||||
tmp |= SDMA0_F32_CNTL__HALT_MASK;
|
||||
WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
|
||||
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
|
||||
}
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
tmp = RREG32(mmSRBM_SOFT_RESET);
|
||||
|
@@ -128,6 +128,9 @@
|
||||
#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO__SHIFT 0x3
|
||||
#define PA_SC_ENHANCE_3__FORCE_PBB_WORKLOAD_MODE_TO_ZERO_MASK 0x00000008L
|
||||
|
||||
#define mmCGTT_SPI_CS_CLK_CTRL 0x507c
|
||||
#define mmCGTT_SPI_CS_CLK_CTRL_BASE_IDX 1
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
|
||||
@@ -3094,6 +3097,7 @@ static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_10_3[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_PS_CLK_CTRL, 0xff7f0fff, 0x78000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100),
|
||||
|
@@ -117,6 +117,13 @@ MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
|
||||
|
||||
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
|
||||
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
|
||||
#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
|
||||
@@ -1630,7 +1637,10 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||
chip_name = "arcturus";
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
chip_name = "renoir";
|
||||
else
|
||||
chip_name = "green_sardine";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@@ -455,10 +455,11 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
|
||||
adev->virt.ops = &xgpu_nv_virt_ops;
|
||||
}
|
||||
|
||||
static bool nv_is_blockchain_sku(struct pci_dev *pdev)
|
||||
static bool nv_is_headless_sku(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->device == 0x731E &&
|
||||
(pdev->revision == 0xC6 || pdev->revision == 0xC7))
|
||||
if ((pdev->device == 0x731E &&
|
||||
(pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
|
||||
(pdev->device == 0x7340 && pdev->revision == 0xC9))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@@ -492,7 +493,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev) &&
|
||||
!nv_is_blockchain_sku(adev->pdev))
|
||||
!nv_is_headless_sku(adev->pdev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
#endif
|
||||
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
|
||||
@@ -500,7 +501,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
|
||||
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
|
||||
if (!nv_is_blockchain_sku(adev->pdev))
|
||||
if (!nv_is_headless_sku(adev->pdev))
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
|
||||
if (adev->enable_mes)
|
||||
|
@@ -39,6 +39,7 @@
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/renoir_asd.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_ta.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_asd.bin");
|
||||
|
||||
/* address block */
|
||||
#define smnMP1_FIRMWARE_FLAGS 0x3010024
|
||||
@@ -54,7 +55,10 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_RENOIR:
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
chip_name = "renoir";
|
||||
else
|
||||
chip_name = "green_sardine";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@@ -69,6 +69,7 @@ MODULE_FIRMWARE("amdgpu/picasso_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
|
||||
|
||||
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
|
||||
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
|
||||
@@ -619,7 +620,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
|
||||
chip_name = "arcturus";
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
chip_name = "renoir";
|
||||
else
|
||||
chip_name = "green_sardine";
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@@ -1195,8 +1195,7 @@ static int soc15_common_early_init(void *handle)
|
||||
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
|
||||
AMD_PG_SUPPORT_MMHUB |
|
||||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG;
|
||||
AMD_PG_SUPPORT_VCN;
|
||||
} else {
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
@@ -1243,7 +1242,15 @@ static int soc15_common_early_init(void *handle)
|
||||
break;
|
||||
case CHIP_RENOIR:
|
||||
adev->asic_funcs = &soc15_asic_funcs;
|
||||
if (adev->pdev->device == 0x1636)
|
||||
adev->apu_flags |= AMD_APU_IS_RENOIR;
|
||||
else
|
||||
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
|
||||
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR)
|
||||
adev->external_rev_id = adev->rev_id + 0x91;
|
||||
else
|
||||
adev->external_rev_id = adev->rev_id + 0xa1;
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_MGLS |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
@@ -1268,7 +1275,6 @@ static int soc15_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_JPEG |
|
||||
AMD_PG_SUPPORT_VCN_DPG;
|
||||
adev->external_rev_id = adev->rev_id + 0x91;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
|
@@ -798,10 +798,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
|
||||
}
|
||||
|
||||
pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
|
||||
memcpy(pcrat_image, crat_table, crat_table->length);
|
||||
if (!pcrat_image)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(pcrat_image, crat_table, crat_table->length);
|
||||
*crat_image = pcrat_image;
|
||||
*size = crat_table->length;
|
||||
|
||||
|
@@ -100,6 +100,8 @@ MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
|
||||
#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
|
||||
#endif
|
||||
#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
|
||||
|
||||
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
|
||||
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
|
||||
@@ -973,6 +975,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
case CHIP_RAVEN:
|
||||
case CHIP_RENOIR:
|
||||
init_data.flags.gpu_vm_support = true;
|
||||
if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
|
||||
init_data.flags.disable_dmcu = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@@ -1267,6 +1271,8 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
case CHIP_RENOIR:
|
||||
dmub_asic = DMUB_ASIC_DCN21;
|
||||
fw_name_dmub = FIRMWARE_RENOIR_DMUB;
|
||||
if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
|
||||
fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
|
||||
break;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
case CHIP_SIENNA_CICHLID:
|
||||
|
@@ -166,6 +166,11 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
|
||||
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
|
||||
break;
|
||||
}
|
||||
|
||||
if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) {
|
||||
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
|
||||
break;
|
||||
}
|
||||
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
|
||||
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
|
||||
break;
|
||||
|
@@ -120,6 +120,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
dc_version = DCN_VERSION_1_01;
|
||||
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_2_1;
|
||||
if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_2_1;
|
||||
break;
|
||||
#endif
|
||||
|
||||
|
@@ -205,6 +205,10 @@ enum {
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
|
||||
#define ASICREV_IS_SIENNA_CICHLID_P(eChipRev) ((eChipRev >= NV_SIENNA_CICHLID_P_A0))
|
||||
#endif
|
||||
#define GREEN_SARDINE_A0 0xA1
|
||||
#ifndef ASICREV_IS_GREEN_SARDINE
|
||||
#define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ASIC chip ID
|
||||
|
@@ -45,6 +45,7 @@ enum amd_apu_flags {
|
||||
AMD_APU_IS_RAVEN2 = 0x00000002UL,
|
||||
AMD_APU_IS_PICASSO = 0x00000004UL,
|
||||
AMD_APU_IS_RENOIR = 0x00000008UL,
|
||||
AMD_APU_IS_GREEN_SARDINE = 0x00000010UL,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -229,6 +229,7 @@ struct pp_smumgr_func {
|
||||
bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
|
||||
int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
|
||||
int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
|
||||
int (*stop_smc)(struct pp_hwmgr *hwmgr);
|
||||
};
|
||||
|
||||
struct pp_hwmgr_func {
|
||||
|
@@ -113,4 +113,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_settin
|
||||
|
||||
extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
|
||||
|
||||
extern int smum_stop_smc(struct pp_hwmgr *hwmgr);
|
||||
|
||||
#endif
|
||||
|
@@ -142,12 +142,12 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 },
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 },
|
||||
{ CMD_DELAY_MS, 0, 0, 0, 20, 0 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 },
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 },
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 },
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 },
|
||||
{ CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 },
|
||||
{ CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 }
|
||||
};
|
||||
@@ -155,6 +155,7 @@ static const struct baco_cmd_entry exit_baco_tbl[] =
|
||||
static const struct baco_cmd_entry clean_baco_tbl[] =
|
||||
{
|
||||
{ CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
|
||||
{ CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 },
|
||||
{ CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 }
|
||||
};
|
||||
|
||||
|
@@ -1541,6 +1541,10 @@ static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
PP_ASSERT_WITH_CODE((tmp_result == 0),
|
||||
"Failed to reset to default!", result = tmp_result);
|
||||
|
||||
tmp_result = smum_stop_smc(hwmgr);
|
||||
PP_ASSERT_WITH_CODE((tmp_result == 0),
|
||||
"Failed to stop smc!", result = tmp_result);
|
||||
|
||||
tmp_result = smu7_force_switch_to_arbf0(hwmgr);
|
||||
PP_ASSERT_WITH_CODE((tmp_result == 0),
|
||||
"Failed to force to switch arbf0!", result = tmp_result);
|
||||
@@ -1585,6 +1589,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
data->current_profile_setting.sclk_down_hyst = 100;
|
||||
data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
|
||||
data->current_profile_setting.bupdate_mclk = 1;
|
||||
if (hwmgr->chip_id >= CHIP_POLARIS10) {
|
||||
if (adev->gmc.vram_width == 256) {
|
||||
data->current_profile_setting.mclk_up_hyst = 10;
|
||||
data->current_profile_setting.mclk_down_hyst = 60;
|
||||
@@ -1598,6 +1603,11 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
|
||||
data->current_profile_setting.mclk_down_hyst = 16;
|
||||
data->current_profile_setting.mclk_activity = 20;
|
||||
}
|
||||
} else {
|
||||
data->current_profile_setting.mclk_up_hyst = 0;
|
||||
data->current_profile_setting.mclk_down_hyst = 100;
|
||||
data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
|
||||
}
|
||||
hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
|
||||
hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
|
||||
hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
|
||||
|
@@ -2726,10 +2726,7 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
|
||||
|
||||
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
|
||||
CGS_IND_REG__SMC, FEATURE_STATUS,
|
||||
VOLTAGE_CONTROLLER_ON))
|
||||
? true : false;
|
||||
return ci_is_smc_ram_running(hwmgr);
|
||||
}
|
||||
|
||||
static int ci_smu_init(struct pp_hwmgr *hwmgr)
|
||||
@@ -2939,6 +2936,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ci_reset_smc(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
|
||||
SMC_SYSCON_RESET_CNTL,
|
||||
rst_reg, 1);
|
||||
}
|
||||
|
||||
|
||||
static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
|
||||
SMC_SYSCON_CLOCK_CNTL_0,
|
||||
ck_disable, 1);
|
||||
}
|
||||
|
||||
static int ci_stop_smc(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
ci_reset_smc(hwmgr);
|
||||
ci_stop_smc_clock(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct pp_smumgr_func ci_smu_funcs = {
|
||||
.name = "ci_smu",
|
||||
.smu_init = ci_smu_init,
|
||||
@@ -2964,4 +2984,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
|
||||
.is_dpm_running = ci_is_dpm_running,
|
||||
.update_dpm_settings = ci_update_dpm_settings,
|
||||
.update_smc_table = ci_update_smc_table,
|
||||
.stop_smc = ci_stop_smc,
|
||||
};
|
||||
|
@@ -245,3 +245,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t tabl
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int smum_stop_smc(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
if (hwmgr->smumgr_funcs->stop_smc)
|
||||
return hwmgr->smumgr_funcs->stop_smc(hwmgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1029,17 +1029,6 @@ static int smu_smc_hw_setup(struct smu_context *smu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set initialized values (get from vbios) to dpm tables context such as
|
||||
* gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
|
||||
* type of clks.
|
||||
*/
|
||||
ret = smu_set_default_dpm_table(smu);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = smu_notify_display_change(smu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@@ -1754,7 +1754,7 @@ void intel_psr_atomic_check(struct drm_connector *connector,
|
||||
return;
|
||||
|
||||
intel_connector = to_intel_connector(connector);
|
||||
dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector));
|
||||
dig_port = enc_to_dig_port(to_intel_encoder(new_state->best_encoder));
|
||||
if (dev_priv->psr.dp != &dig_port->dp)
|
||||
return;
|
||||
|
||||
|
@@ -508,21 +508,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
if (!obj)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* Already in the desired write domain? Nothing for us to do!
|
||||
*
|
||||
* We apply a little bit of cunning here to catch a broader set of
|
||||
* no-ops. If obj->write_domain is set, we must be in the same
|
||||
* obj->read_domains, and only that domain. Therefore, if that
|
||||
* obj->write_domain matches the request read_domains, we are
|
||||
* already in the same read/write domain and can skip the operation,
|
||||
* without having to further check the requested write_domain.
|
||||
*/
|
||||
if (READ_ONCE(obj->write_domain) == read_domains) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to flush the object off the GPU without holding the lock.
|
||||
* We will repeat the flush holding the lock in the normal manner
|
||||
@@ -560,6 +545,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Already in the desired write domain? Nothing for us to do!
|
||||
*
|
||||
* We apply a little bit of cunning here to catch a broader set of
|
||||
* no-ops. If obj->write_domain is set, we must be in the same
|
||||
* obj->read_domains, and only that domain. Therefore, if that
|
||||
* obj->write_domain matches the request read_domains, we are
|
||||
* already in the same read/write domain and can skip the operation,
|
||||
* without having to further check the requested write_domain.
|
||||
*/
|
||||
if (READ_ONCE(obj->write_domain) == read_domains)
|
||||
goto out_unpin;
|
||||
|
||||
err = i915_gem_object_lock_interruptible(obj, NULL);
|
||||
if (err)
|
||||
goto out_unpin;
|
||||
|
@@ -245,22 +245,14 @@ static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u
|
||||
}
|
||||
|
||||
static inline u32 *
|
||||
__gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
|
||||
__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1)
|
||||
{
|
||||
/* We're using qword write, offset should be aligned to 8 bytes. */
|
||||
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
|
||||
|
||||
/* w/a for post sync ops following a GPGPU operation we
|
||||
* need a prior CS_STALL, which is emitted by the flush
|
||||
* following the batch.
|
||||
*/
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(6) | flags0;
|
||||
*cs++ = flags1 | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
|
||||
*cs++ = gtt_offset;
|
||||
*cs++ = flags1 | PIPE_CONTROL_QW_WRITE;
|
||||
*cs++ = offset;
|
||||
*cs++ = 0;
|
||||
*cs++ = value;
|
||||
/* We're thrashing one dword of HWS. */
|
||||
*cs++ = 0;
|
||||
*cs++ = 0; /* We're thrashing one extra dword. */
|
||||
|
||||
return cs;
|
||||
}
|
||||
@@ -268,13 +260,38 @@ __gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 f
|
||||
static inline u32*
|
||||
gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
|
||||
{
|
||||
return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, 0, flags);
|
||||
/* We're using qword write, offset should be aligned to 8 bytes. */
|
||||
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
|
||||
|
||||
return __gen8_emit_write_rcs(cs,
|
||||
value,
|
||||
gtt_offset,
|
||||
0,
|
||||
flags | PIPE_CONTROL_GLOBAL_GTT_IVB);
|
||||
}
|
||||
|
||||
static inline u32*
|
||||
gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1)
|
||||
{
|
||||
return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, flags0, flags1);
|
||||
/* We're using qword write, offset should be aligned to 8 bytes. */
|
||||
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
|
||||
|
||||
return __gen8_emit_write_rcs(cs,
|
||||
value,
|
||||
gtt_offset,
|
||||
flags0,
|
||||
flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB);
|
||||
}
|
||||
|
||||
static inline u32 *
|
||||
__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
|
||||
{
|
||||
*cs++ = (MI_FLUSH_DW + 1) | flags;
|
||||
*cs++ = gtt_offset;
|
||||
*cs++ = 0;
|
||||
*cs++ = value;
|
||||
|
||||
return cs;
|
||||
}
|
||||
|
||||
static inline u32 *
|
||||
@@ -285,12 +302,10 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
|
||||
/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
|
||||
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
|
||||
|
||||
*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
|
||||
*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
|
||||
*cs++ = 0;
|
||||
*cs++ = value;
|
||||
|
||||
return cs;
|
||||
return __gen8_emit_flush_dw(cs,
|
||||
value,
|
||||
gtt_offset | MI_FLUSH_DW_USE_GTT,
|
||||
flags | MI_FLUSH_DW_OP_STOREDW);
|
||||
}
|
||||
|
||||
static inline void __intel_engine_reset(struct intel_engine_cs *engine,
|
||||
|
@@ -3547,6 +3547,19 @@ static const struct intel_context_ops execlists_context_ops = {
|
||||
.destroy = execlists_context_destroy,
|
||||
};
|
||||
|
||||
static u32 hwsp_offset(const struct i915_request *rq)
|
||||
{
|
||||
const struct intel_timeline_cacheline *cl;
|
||||
|
||||
/* Before the request is executed, the timeline/cachline is fixed */
|
||||
|
||||
cl = rcu_dereference_protected(rq->hwsp_cacheline, 1);
|
||||
if (cl)
|
||||
return cl->ggtt_offset;
|
||||
|
||||
return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset;
|
||||
}
|
||||
|
||||
static int gen8_emit_init_breadcrumb(struct i915_request *rq)
|
||||
{
|
||||
u32 *cs;
|
||||
@@ -3569,7 +3582,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
|
||||
*cs++ = i915_request_timeline(rq)->hwsp_offset;
|
||||
*cs++ = hwsp_offset(rq);
|
||||
*cs++ = 0;
|
||||
*cs++ = rq->fence.seqno - 1;
|
||||
|
||||
@@ -4886,11 +4899,9 @@ gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
|
||||
return gen8_emit_wa_tail(request, cs);
|
||||
}
|
||||
|
||||
static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs)
|
||||
static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
u32 addr = i915_request_active_timeline(request)->hwsp_offset;
|
||||
|
||||
return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0);
|
||||
return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
|
||||
}
|
||||
|
||||
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
@@ -4909,7 +4920,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
|
||||
/* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
|
||||
cs = gen8_emit_ggtt_write_rcs(cs,
|
||||
request->fence.seqno,
|
||||
i915_request_active_timeline(request)->hwsp_offset,
|
||||
hwsp_offset(request),
|
||||
PIPE_CONTROL_FLUSH_ENABLE |
|
||||
PIPE_CONTROL_CS_STALL);
|
||||
|
||||
@@ -4921,7 +4932,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
|
||||
{
|
||||
cs = gen8_emit_ggtt_write_rcs(cs,
|
||||
request->fence.seqno,
|
||||
i915_request_active_timeline(request)->hwsp_offset,
|
||||
hwsp_offset(request),
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_TILE_CACHE_FLUSH |
|
||||
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
|
||||
@@ -4983,7 +4994,9 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs)
|
||||
|
||||
static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
{
|
||||
return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
|
||||
/* XXX Stalling flush before seqno write; post-sync not */
|
||||
cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
|
||||
return gen12_emit_fini_breadcrumb_tail(rq, cs);
|
||||
}
|
||||
|
||||
static u32 *
|
||||
@@ -4991,7 +5004,7 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
|
||||
{
|
||||
cs = gen12_emit_ggtt_write_rcs(cs,
|
||||
request->fence.seqno,
|
||||
i915_request_active_timeline(request)->hwsp_offset,
|
||||
hwsp_offset(request),
|
||||
PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_TILE_CACHE_FLUSH |
|
||||
|
@@ -188,9 +188,13 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
|
||||
return cl;
|
||||
}
|
||||
|
||||
static void cacheline_acquire(struct intel_timeline_cacheline *cl)
|
||||
static void cacheline_acquire(struct intel_timeline_cacheline *cl,
|
||||
u32 ggtt_offset)
|
||||
{
|
||||
if (cl)
|
||||
if (!cl)
|
||||
return;
|
||||
|
||||
cl->ggtt_offset = ggtt_offset;
|
||||
i915_active_acquire(&cl->active);
|
||||
}
|
||||
|
||||
@@ -340,7 +344,7 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww)
|
||||
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
|
||||
tl->fence_context, tl->hwsp_offset);
|
||||
|
||||
cacheline_acquire(tl->hwsp_cacheline);
|
||||
cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset);
|
||||
if (atomic_fetch_inc(&tl->pin_count)) {
|
||||
cacheline_release(tl->hwsp_cacheline);
|
||||
__i915_vma_unpin(tl->hwsp_ggtt);
|
||||
@@ -515,7 +519,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
|
||||
GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n",
|
||||
tl->fence_context, tl->hwsp_offset);
|
||||
|
||||
cacheline_acquire(cl);
|
||||
cacheline_acquire(cl, tl->hwsp_offset);
|
||||
tl->hwsp_cacheline = cl;
|
||||
|
||||
*seqno = timeline_advance(tl);
|
||||
@@ -573,9 +577,7 @@ int intel_timeline_read_hwsp(struct i915_request *from,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*hwsp = i915_ggtt_offset(cl->hwsp->vma) +
|
||||
ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES;
|
||||
|
||||
*hwsp = cl->ggtt_offset;
|
||||
out:
|
||||
i915_active_release(&cl->active);
|
||||
return err;
|
||||
|
@@ -94,6 +94,8 @@ struct intel_timeline_cacheline {
|
||||
struct intel_timeline_hwsp *hwsp;
|
||||
void *vaddr;
|
||||
|
||||
u32 ggtt_offset;
|
||||
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
|
@@ -1489,7 +1489,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
const struct intel_engine_cs *engine =
|
||||
intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
|
||||
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
|
||||
if (value != 0 &&
|
||||
!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
|
||||
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
|
||||
offset, value);
|
||||
return -EINVAL;
|
||||
@@ -1650,6 +1651,34 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* FixMe:
|
||||
* If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did:
|
||||
* 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.)
|
||||
* Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing
|
||||
* these MI_BATCH_BUFFER.
|
||||
* Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT
|
||||
* PML4 PTE: PAT(0) PCD(1) PWT(1).
|
||||
* The performance is still expected to be low, will need further improvement.
|
||||
*/
|
||||
static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u64 pat =
|
||||
GEN8_PPAT(0, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(1, 0) |
|
||||
GEN8_PPAT(2, 0) |
|
||||
GEN8_PPAT(3, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(4, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(5, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(7, CHV_PPAT_SNOOP);
|
||||
|
||||
vgpu_vreg(vgpu, offset) = lower_32_bits(pat);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int guc_status_read(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data,
|
||||
unsigned int bytes)
|
||||
@@ -2812,7 +2841,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
|
||||
|
||||
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
|
||||
|
||||
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
|
||||
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT);
|
||||
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
|
||||
|
||||
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
|
||||
@@ -3139,7 +3168,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
NULL, NULL);
|
||||
|
||||
MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
|
||||
MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -3313,9 +3342,21 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
|
||||
MMIO_D(GEN6_GFXPAUSE, D_BXT);
|
||||
MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS,
|
||||
0, 0, D_BXT, NULL, NULL);
|
||||
MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS,
|
||||
0, 0, D_BXT, NULL, NULL);
|
||||
MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS,
|
||||
0, 0, D_BXT, NULL, NULL);
|
||||
MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS,
|
||||
0, 0, D_BXT, NULL, NULL);
|
||||
|
||||
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -1277,7 +1277,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
|
||||
|
||||
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
|
||||
for_each_engine(engine, vgpu->gvt->gt, id)
|
||||
intel_context_unpin(s->shadow[id]);
|
||||
intel_context_put(s->shadow[id]);
|
||||
|
||||
kmem_cache_destroy(s->workloads);
|
||||
}
|
||||
@@ -1369,11 +1369,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
||||
ce->ring = __intel_context_ring_size(ring_size);
|
||||
}
|
||||
|
||||
ret = intel_context_pin(ce);
|
||||
intel_context_put(ce);
|
||||
if (ret)
|
||||
goto out_shadow_ctx;
|
||||
|
||||
s->shadow[i] = ce;
|
||||
}
|
||||
|
||||
@@ -1405,7 +1400,6 @@ out_shadow_ctx:
|
||||
if (IS_ERR(s->shadow[i]))
|
||||
break;
|
||||
|
||||
intel_context_unpin(s->shadow[i]);
|
||||
intel_context_put(s->shadow[i]);
|
||||
}
|
||||
i915_vm_put(&ppgtt->vm);
|
||||
@@ -1479,6 +1473,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu_submission *s = &workload->vgpu->submission;
|
||||
|
||||
intel_context_unpin(s->shadow[workload->engine->id]);
|
||||
release_shadow_batch_buffer(workload);
|
||||
release_shadow_wa_ctx(&workload->wa_ctx);
|
||||
|
||||
@@ -1724,6 +1719,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = intel_context_pin(s->shadow[engine->id]);
|
||||
if (ret) {
|
||||
intel_vgpu_destroy_workload(workload);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return workload;
|
||||
}
|
||||
|
||||
|
@@ -314,8 +314,10 @@ static void __vma_release(struct dma_fence_work *work)
|
||||
{
|
||||
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
|
||||
|
||||
if (vw->pinned)
|
||||
if (vw->pinned) {
|
||||
__i915_gem_object_unpin_pages(vw->pinned);
|
||||
i915_gem_object_put(vw->pinned);
|
||||
}
|
||||
|
||||
i915_vm_free_pt_stash(vw->vm, &vw->stash);
|
||||
i915_vm_put(vw->vm);
|
||||
@@ -431,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma,
|
||||
|
||||
if (vma->obj) {
|
||||
__i915_gem_object_pin_pages(vma->obj);
|
||||
work->pinned = vma->obj;
|
||||
work->pinned = i915_gem_object_get(vma->obj);
|
||||
}
|
||||
} else {
|
||||
vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
|
||||
|
@@ -111,10 +111,6 @@ static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
|
||||
{
|
||||
}
|
||||
|
||||
static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
|
||||
{
|
||||
struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
|
||||
@@ -140,7 +136,6 @@ static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
|
||||
|
||||
static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
|
||||
.enable = dw_hdmi_imx_encoder_enable,
|
||||
.disable = dw_hdmi_imx_encoder_disable,
|
||||
.atomic_check = dw_hdmi_imx_atomic_check,
|
||||
};
|
||||
|
||||
@@ -219,15 +214,9 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
|
||||
hdmi->dev = &pdev->dev;
|
||||
encoder = &hdmi->encoder;
|
||||
|
||||
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
|
||||
/*
|
||||
* If we failed to find the CRTC(s) which this encoder is
|
||||
* supposed to be connected to, it's because the CRTC has
|
||||
* not been registered yet. Defer probing, and hope that
|
||||
* the required CRTC is added later.
|
||||
*/
|
||||
if (encoder->possible_crtcs == 0)
|
||||
return -EPROBE_DEFER;
|
||||
ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = dw_hdmi_imx_parse_dt(hdmi);
|
||||
if (ret < 0)
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
@@ -212,7 +213,9 @@ static int imx_drm_bind(struct device *dev)
|
||||
drm->mode_config.allow_fb_modifiers = true;
|
||||
drm->mode_config.normalize_zpos = true;
|
||||
|
||||
drm_mode_config_init(drm);
|
||||
ret = drmm_mode_config_init(drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_vblank_init(drm, MAX_CRTC);
|
||||
if (ret)
|
||||
@@ -251,7 +254,6 @@ err_poll_fini:
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
component_unbind_all(drm->dev, drm);
|
||||
err_kms:
|
||||
drm_mode_config_cleanup(drm);
|
||||
drm_dev_put(drm);
|
||||
|
||||
return ret;
|
||||
@@ -267,11 +269,9 @@ static void imx_drm_unbind(struct device *dev)
|
||||
|
||||
component_unbind_all(drm->dev, drm);
|
||||
|
||||
drm_mode_config_cleanup(drm);
|
||||
drm_dev_put(drm);
|
||||
|
||||
dev_set_drvdata(dev, NULL);
|
||||
|
||||
drm_dev_put(drm);
|
||||
}
|
||||
|
||||
static const struct component_master_ops imx_drm_ops = {
|
||||
|
@@ -62,7 +62,6 @@ struct imx_ldb_channel {
|
||||
struct i2c_adapter *ddc;
|
||||
int chno;
|
||||
void *edid;
|
||||
int edid_len;
|
||||
struct drm_display_mode mode;
|
||||
int mode_valid;
|
||||
u32 bus_format;
|
||||
@@ -536,15 +535,14 @@ static int imx_ldb_panel_ddc(struct device *dev,
|
||||
}
|
||||
|
||||
if (!channel->ddc) {
|
||||
int edid_len;
|
||||
|
||||
/* if no DDC available, fallback to hardcoded EDID */
|
||||
dev_dbg(dev, "no ddc available\n");
|
||||
|
||||
edidp = of_get_property(child, "edid",
|
||||
&channel->edid_len);
|
||||
edidp = of_get_property(child, "edid", &edid_len);
|
||||
if (edidp) {
|
||||
channel->edid = kmemdup(edidp,
|
||||
channel->edid_len,
|
||||
GFP_KERNEL);
|
||||
channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
|
||||
} else if (!channel->panel) {
|
||||
/* fallback to display-timings node */
|
||||
ret = of_get_drm_display_mode(child,
|
||||
|
@@ -13,7 +13,6 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/videodev2.h>
|
||||
|
||||
#include <video/imx-ipu-v3.h>
|
||||
@@ -104,8 +103,6 @@ struct imx_tve {
|
||||
struct drm_connector connector;
|
||||
struct drm_encoder encoder;
|
||||
struct device *dev;
|
||||
spinlock_t lock; /* register lock */
|
||||
bool enabled;
|
||||
int mode;
|
||||
int di_hsync_pin;
|
||||
int di_vsync_pin;
|
||||
@@ -129,30 +126,10 @@ static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
|
||||
return container_of(e, struct imx_tve, encoder);
|
||||
}
|
||||
|
||||
static void tve_lock(void *__tve)
|
||||
__acquires(&tve->lock)
|
||||
{
|
||||
struct imx_tve *tve = __tve;
|
||||
|
||||
spin_lock(&tve->lock);
|
||||
}
|
||||
|
||||
static void tve_unlock(void *__tve)
|
||||
__releases(&tve->lock)
|
||||
{
|
||||
struct imx_tve *tve = __tve;
|
||||
|
||||
spin_unlock(&tve->lock);
|
||||
}
|
||||
|
||||
static void tve_enable(struct imx_tve *tve)
|
||||
{
|
||||
if (!tve->enabled) {
|
||||
tve->enabled = true;
|
||||
clk_prepare_enable(tve->clk);
|
||||
regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
|
||||
TVE_EN, TVE_EN);
|
||||
}
|
||||
regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, TVE_EN);
|
||||
|
||||
/* clear interrupt status register */
|
||||
regmap_write(tve->regmap, TVE_STAT_REG, 0xffffffff);
|
||||
@@ -169,12 +146,9 @@ static void tve_enable(struct imx_tve *tve)
|
||||
|
||||
static void tve_disable(struct imx_tve *tve)
|
||||
{
|
||||
if (tve->enabled) {
|
||||
tve->enabled = false;
|
||||
regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0);
|
||||
clk_disable_unprepare(tve->clk);
|
||||
}
|
||||
}
|
||||
|
||||
static int tve_setup_tvout(struct imx_tve *tve)
|
||||
{
|
||||
@@ -500,8 +474,7 @@ static struct regmap_config tve_regmap_config = {
|
||||
|
||||
.readable_reg = imx_tve_readable_reg,
|
||||
|
||||
.lock = tve_lock,
|
||||
.unlock = tve_unlock,
|
||||
.fast_io = true,
|
||||
|
||||
.max_register = 0xdc,
|
||||
};
|
||||
@@ -511,7 +484,7 @@ static const char * const imx_tve_modes[] = {
|
||||
[TVE_MODE_VGA] = "vga",
|
||||
};
|
||||
|
||||
static const int of_get_tve_mode(struct device_node *np)
|
||||
static int of_get_tve_mode(struct device_node *np)
|
||||
{
|
||||
const char *bm;
|
||||
int ret, i;
|
||||
@@ -544,7 +517,6 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
|
||||
memset(tve, 0, sizeof(*tve));
|
||||
|
||||
tve->dev = dev;
|
||||
spin_lock_init(&tve->lock);
|
||||
|
||||
ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0);
|
||||
if (ddc_node) {
|
||||
|
@@ -28,7 +28,6 @@ struct imx_parallel_display {
|
||||
struct drm_bridge bridge;
|
||||
struct device *dev;
|
||||
void *edid;
|
||||
int edid_len;
|
||||
u32 bus_format;
|
||||
u32 bus_flags;
|
||||
struct drm_display_mode mode;
|
||||
@@ -41,11 +40,6 @@ static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
|
||||
return container_of(c, struct imx_parallel_display, connector);
|
||||
}
|
||||
|
||||
static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
|
||||
{
|
||||
return container_of(e, struct imx_parallel_display, encoder);
|
||||
}
|
||||
|
||||
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
|
||||
{
|
||||
return container_of(b, struct imx_parallel_display, bridge);
|
||||
@@ -310,6 +304,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
||||
struct device_node *np = dev->of_node;
|
||||
const u8 *edidp;
|
||||
struct imx_parallel_display *imxpd;
|
||||
int edid_len;
|
||||
int ret;
|
||||
u32 bus_format = 0;
|
||||
const char *fmt;
|
||||
@@ -323,9 +318,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
||||
if (ret && ret != -ENODEV)
|
||||
return ret;
|
||||
|
||||
edidp = of_get_property(np, "edid", &imxpd->edid_len);
|
||||
edidp = of_get_property(np, "edid", &edid_len);
|
||||
if (edidp)
|
||||
imxpd->edid = kmemdup(edidp, imxpd->edid_len, GFP_KERNEL);
|
||||
imxpd->edid = devm_kmemdup(dev, edidp, edid_len, GFP_KERNEL);
|
||||
|
||||
ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
|
||||
if (!ret) {
|
||||
@@ -349,17 +344,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void imx_pd_unbind(struct device *dev, struct device *master,
|
||||
void *data)
|
||||
{
|
||||
struct imx_parallel_display *imxpd = dev_get_drvdata(dev);
|
||||
|
||||
kfree(imxpd->edid);
|
||||
}
|
||||
|
||||
static const struct component_ops imx_pd_ops = {
|
||||
.bind = imx_pd_bind,
|
||||
.unbind = imx_pd_unbind,
|
||||
};
|
||||
|
||||
static int imx_pd_probe(struct platform_device *pdev)
|
||||
|
@@ -626,6 +626,7 @@ static int panfrost_probe(struct platform_device *pdev)
|
||||
err_out1:
|
||||
pm_runtime_disable(pfdev->dev);
|
||||
panfrost_device_fini(pfdev);
|
||||
pm_runtime_set_suspended(pfdev->dev);
|
||||
err_out0:
|
||||
drm_dev_put(ddev);
|
||||
return err;
|
||||
@@ -640,9 +641,9 @@ static int panfrost_remove(struct platform_device *pdev)
|
||||
panfrost_gem_shrinker_cleanup(ddev);
|
||||
|
||||
pm_runtime_get_sync(pfdev->dev);
|
||||
panfrost_device_fini(pfdev);
|
||||
pm_runtime_put_sync_suspend(pfdev->dev);
|
||||
pm_runtime_disable(pfdev->dev);
|
||||
panfrost_device_fini(pfdev);
|
||||
pm_runtime_set_suspended(pfdev->dev);
|
||||
|
||||
drm_dev_put(ddev);
|
||||
return 0;
|
||||
|
@@ -105,14 +105,12 @@ void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
|
||||
kref_put(&mapping->refcount, panfrost_gem_mapping_release);
|
||||
}
|
||||
|
||||
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo)
|
||||
void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
|
||||
{
|
||||
struct panfrost_gem_mapping *mapping;
|
||||
|
||||
mutex_lock(&bo->mappings.lock);
|
||||
list_for_each_entry(mapping, &bo->mappings.list, node)
|
||||
panfrost_gem_teardown_mapping(mapping);
|
||||
mutex_unlock(&bo->mappings.lock);
|
||||
}
|
||||
|
||||
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
|
||||
|
@@ -82,7 +82,7 @@ struct panfrost_gem_mapping *
|
||||
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
|
||||
struct panfrost_file_priv *priv);
|
||||
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
|
||||
void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo);
|
||||
void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
|
||||
|
||||
void panfrost_gem_shrinker_init(struct drm_device *dev);
|
||||
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
|
||||
|
@@ -40,18 +40,26 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
|
||||
bool ret = false;
|
||||
|
||||
if (atomic_read(&bo->gpu_usecount))
|
||||
return false;
|
||||
|
||||
if (!mutex_trylock(&shmem->pages_lock))
|
||||
if (!mutex_trylock(&bo->mappings.lock))
|
||||
return false;
|
||||
|
||||
panfrost_gem_teardown_mappings(bo);
|
||||
if (!mutex_trylock(&shmem->pages_lock))
|
||||
goto unlock_mappings;
|
||||
|
||||
panfrost_gem_teardown_mappings_locked(bo);
|
||||
drm_gem_shmem_purge_locked(obj);
|
||||
ret = true;
|
||||
|
||||
mutex_unlock(&shmem->pages_lock);
|
||||
return true;
|
||||
|
||||
unlock_mappings:
|
||||
mutex_unlock(&bo->mappings.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
|
@@ -449,7 +449,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
|
||||
}
|
||||
|
||||
if (IS_ERR(cma_obj)) {
|
||||
struct drm_printer p = drm_info_printer(vc4->dev->dev);
|
||||
struct drm_printer p = drm_info_printer(vc4->base.dev);
|
||||
DRM_ERROR("Failed to allocate from CMA:\n");
|
||||
vc4_bo_stats_print(&p, vc4);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@@ -590,7 +590,7 @@ static void vc4_bo_cache_time_work(struct work_struct *work)
|
||||
{
|
||||
struct vc4_dev *vc4 =
|
||||
container_of(work, struct vc4_dev, bo_cache.time_work);
|
||||
struct drm_device *dev = vc4->dev;
|
||||
struct drm_device *dev = &vc4->base;
|
||||
|
||||
mutex_lock(&vc4->bo_lock);
|
||||
vc4_bo_cache_free_old(dev);
|
||||
@@ -1005,6 +1005,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused);
|
||||
int vc4_bo_cache_init(struct drm_device *dev)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
@@ -1033,10 +1034,10 @@ int vc4_bo_cache_init(struct drm_device *dev)
|
||||
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
|
||||
timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
|
||||
|
||||
return 0;
|
||||
return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL);
|
||||
}
|
||||
|
||||
void vc4_bo_cache_destroy(struct drm_device *dev)
|
||||
static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
int i;
|
||||
|
@@ -257,37 +257,37 @@ static int vc4_drm_bind(struct device *dev)
|
||||
|
||||
dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
vc4 = devm_kzalloc(dev, sizeof(*vc4), GFP_KERNEL);
|
||||
if (!vc4)
|
||||
return -ENOMEM;
|
||||
|
||||
/* If VC4 V3D is missing, don't advertise render nodes. */
|
||||
node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
|
||||
if (!node || !of_device_is_available(node))
|
||||
vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
|
||||
of_node_put(node);
|
||||
|
||||
drm = drm_dev_alloc(&vc4_drm_driver, dev);
|
||||
if (IS_ERR(drm))
|
||||
return PTR_ERR(drm);
|
||||
vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base);
|
||||
if (IS_ERR(vc4))
|
||||
return PTR_ERR(vc4);
|
||||
|
||||
drm = &vc4->base;
|
||||
platform_set_drvdata(pdev, drm);
|
||||
vc4->dev = drm;
|
||||
drm->dev_private = vc4;
|
||||
INIT_LIST_HEAD(&vc4->debugfs_list);
|
||||
|
||||
mutex_init(&vc4->bin_bo_lock);
|
||||
|
||||
ret = vc4_bo_cache_init(drm);
|
||||
if (ret)
|
||||
goto dev_put;
|
||||
return ret;
|
||||
|
||||
drm_mode_config_init(drm);
|
||||
ret = drmm_mode_config_init(drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vc4_gem_init(drm);
|
||||
ret = vc4_gem_init(drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = component_bind_all(dev, drm);
|
||||
if (ret)
|
||||
goto gem_destroy;
|
||||
return ret;
|
||||
|
||||
ret = vc4_plane_create_additional_planes(drm);
|
||||
if (ret)
|
||||
@@ -312,30 +312,17 @@ static int vc4_drm_bind(struct device *dev)
|
||||
|
||||
unbind_all:
|
||||
component_unbind_all(dev, drm);
|
||||
gem_destroy:
|
||||
vc4_gem_destroy(drm);
|
||||
drm_mode_config_cleanup(drm);
|
||||
vc4_bo_cache_destroy(drm);
|
||||
dev_put:
|
||||
drm_dev_put(drm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vc4_drm_unbind(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(dev);
|
||||
struct vc4_dev *vc4 = to_vc4_dev(drm);
|
||||
|
||||
drm_dev_unregister(drm);
|
||||
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
|
||||
drm_mode_config_cleanup(drm);
|
||||
|
||||
drm_atomic_private_obj_fini(&vc4->load_tracker);
|
||||
drm_atomic_private_obj_fini(&vc4->ctm_manager);
|
||||
|
||||
drm_dev_put(drm);
|
||||
}
|
||||
|
||||
static const struct component_master_ops vc4_drm_ops = {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user