Merge 7e63420847 ("Merge tag 'acpi-5.7-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm") into android-mainline

Baby steps for 5.7-rc1

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ib1579a254ae38651d8d61541dfc18fb7051b1226
This commit is contained in:
Greg Kroah-Hartman
2020-04-09 12:54:31 +02:00
874 changed files with 40602 additions and 15004 deletions

View File

@@ -0,0 +1,9 @@
This ABI is renamed and moved to a new location /sys/kernel/fadump/enabled.
What: /sys/kernel/fadump_enabled
Date: Feb 2012
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Primarily used to identify whether the FADump is enabled in
the kernel or not.
User: Kdump service

View File

@@ -0,0 +1,10 @@
This ABI is renamed and moved to a new location /sys/kernel/fadump/registered.¬
What: /sys/kernel/fadump_registered
Date: Feb 2012
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
Helps to control the dump collect feature from userspace.
Setting 1 to this file enables the system to collect the
dump and 0 to disable it.
User: Kdump service

View File

@@ -0,0 +1,10 @@
This ABI is renamed and moved to a new location /sys/kernel/fadump/release_mem.¬
What: /sys/kernel/fadump_release_mem
Date: Feb 2012
Contact: linuxppc-dev@lists.ozlabs.org
Description: write only
This is a special sysfs file and only available when
the system is booted to capture the vmcore using FADump.
It is used to release the memory reserved by FADump to
save the crash dump.

View File

@@ -0,0 +1,9 @@
This ABI is moved to /sys/firmware/opal/mpipl/release_core.
What: /sys/kernel/fadump_release_opalcore
Date: Sep 2019
Contact: linuxppc-dev@lists.ozlabs.org
Description: write only
The sysfs file is available when the system is booted to
collect the dump on OPAL based machine. It used to release
the memory used to collect the opalcore.

View File

@@ -0,0 +1,21 @@
What: /sys/firmware/opal/sensor_groups
Date: August 2017
Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
Description: Sensor groups directory for POWER9 powernv servers
Each folder in this directory contains a sensor group
which are classified based on type of the sensor
like power, temperature, frequency, current, etc. They
can also indicate the group of sensors belonging to
different owners like CSM, Profiler, Job-Scheduler
What: /sys/firmware/opal/sensor_groups/<sensor_group_name>/clear
Date: August 2017
Contact: Linux for PowerPC mailing list <linuxppc-dev@ozlabs.org>
Description: Sysfs file to clear the min-max of all the sensors
belonging to the group.
Writing 1 to this file will clear the minimum and
maximum values of all the sensors in the group.
In POWER9, the min-max of a sensor is the historical minimum
and maximum value of the sensor cached by OCC.

View File

@@ -0,0 +1,40 @@
What: /sys/kernel/fadump/*
Date: Dec 2019
Contact: linuxppc-dev@lists.ozlabs.org
Description:
The /sys/kernel/fadump/* is a collection of FADump sysfs
file provide information about the configuration status
of Firmware Assisted Dump (FADump).
What: /sys/kernel/fadump/enabled
Date: Dec 2019
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Primarily used to identify whether the FADump is enabled in
the kernel or not.
User: Kdump service
What: /sys/kernel/fadump/registered
Date: Dec 2019
Contact: linuxppc-dev@lists.ozlabs.org
Description: read/write
Helps to control the dump collect feature from userspace.
Setting 1 to this file enables the system to collect the
dump and 0 to disable it.
User: Kdump service
What: /sys/kernel/fadump/release_mem
Date: Dec 2019
Contact: linuxppc-dev@lists.ozlabs.org
Description: write only
This is a special sysfs file and only available when
the system is booted to capture the vmcore using FADump.
It is used to release the memory reserved by FADump to
save the crash dump.
What: /sys/kernel/fadump/mem_reserved
Date: Dec 2019
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Provide information about the amount of memory reserved by
FADump to save the crash dump in bytes.

View File

@@ -22,11 +22,13 @@
default: 0
acpi_backlight= [HW,ACPI]
acpi_backlight=vendor
acpi_backlight=video
If set to vendor, prefer vendor specific driver
{ vendor | video | native | none }
If set to vendor, prefer vendor-specific driver
(e.g. thinkpad_acpi, sony_acpi, etc.) instead
of the ACPI video.ko driver.
If set to video, use the ACPI video.ko driver.
If set to native, use the device's native backlight mode.
If set to none, disable the ACPI backlight interface.
acpi_force_32bit_fadt_addr
force FADT to use 32 bit addresses rather than the
@@ -3720,6 +3722,9 @@
Override pmtimer IOPort with a hex value.
e.g. pmtmr=0x508
pm_debug_messages [SUSPEND,KNL]
Enable suspend/resume debug messages during boot up.
pnp.debug=1 [PNP]
Enable PNP debug messages (depends on the
CONFIG_PNP_DEBUG_MESSAGES option). Change at run-time

View File

@@ -0,0 +1,270 @@
.. SPDX-License-Identifier: GPL-2.0
.. include:: <isonum.txt>
=========================
System Suspend Code Flows
=========================
:Copyright: |copy| 2020 Intel Corporation
:Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
At least one global system-wide transition needs to be carried out for the
system to get from the working state into one of the supported
:doc:`sleep states <sleep-states>`. Hibernation requires more than one
transition to occur for this purpose, but the other sleep states, commonly
referred to as *system-wide suspend* (or simply *system suspend*) states, need
only one.
For those sleep states, the transition from the working state of the system into
the target sleep state is referred to as *system suspend* too (in the majority
of cases, whether this means a transition or a sleep state of the system should
be clear from the context) and the transition back from the sleep state into the
working state is referred to as *system resume*.
The kernel code flows associated with the suspend and resume transitions for
different sleep states of the system are quite similar, but there are some
significant differences between the :ref:`suspend-to-idle <s2idle>` code flows
and the code flows related to the :ref:`suspend-to-RAM <s2ram>` and
:ref:`standby <standby>` sleep states.
The :ref:`suspend-to-RAM <s2ram>` and :ref:`standby <standby>` sleep states
cannot be implemented without platform support and the difference between them
boils down to the platform-specific actions carried out by the suspend and
resume hooks that need to be provided by the platform driver to make them
available. Apart from that, the suspend and resume code flows for these sleep
states are mostly identical, so they both together will be referred to as
*platform-dependent suspend* states in what follows.
.. _s2idle_suspend:
Suspend-to-idle Suspend Code Flow
=================================
The following steps are taken in order to transition the system from the working
state to the :ref:`suspend-to-idle <s2idle>` sleep state:
1. Invoking system-wide suspend notifiers.
Kernel subsystems can register callbacks to be invoked when the suspend
transition is about to occur and when the resume transition has finished.
That allows them to prepare for the change of the system state and to clean
up after getting back to the working state.
2. Freezing tasks.
Tasks are frozen primarily in order to avoid unchecked hardware accesses
from user space through MMIO regions or I/O registers exposed directly to
it and to prevent user space from entering the kernel while the next step
of the transition is in progress (which might have been problematic for
various reasons).
All user space tasks are intercepted as though they were sent a signal and
put into uninterruptible sleep until the end of the subsequent system resume
transition.
The kernel threads that choose to be frozen during system suspend for
specific reasons are frozen subsequently, but they are not intercepted.
Instead, they are expected to periodically check whether or not they need
to be frozen and to put themselves into uninterruptible sleep if so. [Note,
however, that kernel threads can use locking and other concurrency controls
available in kernel space to synchronize themselves with system suspend and
resume, which can be much more precise than the freezing, so the latter is
not a recommended option for kernel threads.]
3. Suspending devices and reconfiguring IRQs.
Devices are suspended in four phases called *prepare*, *suspend*,
*late suspend* and *noirq suspend* (see :ref:`driverapi_pm_devices` for more
information on what exactly happens in each phase).
Every device is visited in each phase, but typically it is not physically
accessed in more than two of them.
The runtime PM API is disabled for every device during the *late* suspend
phase and high-level ("action") interrupt handlers are prevented from being
invoked before the *noirq* suspend phase.
Interrupts are still handled after that, but they are only acknowledged to
interrupt controllers without performing any device-specific actions that
would be triggered in the working state of the system (those actions are
deferred till the subsequent system resume transition as described
`below <s2idle_resume_>`_).
IRQs associated with system wakeup devices are "armed" so that the resume
transition of the system is started when one of them signals an event.
4. Freezing the scheduler tick and suspending timekeeping.
When all devices have been suspended, CPUs enter the idle loop and are put
into the deepest available idle state. While doing that, each of them
"freezes" its own scheduler tick so that the timer events associated with
the tick do not occur until the CPU is woken up by another interrupt source.
The last CPU to enter the idle state also stops the timekeeping which
(among other things) prevents high resolution timers from triggering going
forward until the first CPU that is woken up restarts the timekeeping.
That allows the CPUs to stay in the deep idle state relatively long in one
go.
From this point on, the CPUs can only be woken up by non-timer hardware
interrupts. If that happens, they go back to the idle state unless the
interrupt that woke up one of them comes from an IRQ that has been armed for
system wakeup, in which case the system resume transition is started.
.. _s2idle_resume:
Suspend-to-idle Resume Code Flow
================================
The following steps are taken in order to transition the system from the
:ref:`suspend-to-idle <s2idle>` sleep state into the working state:
1. Resuming timekeeping and unfreezing the scheduler tick.
When one of the CPUs is woken up (by a non-timer hardware interrupt), it
leaves the idle state entered in the last step of the preceding suspend
transition, restarts the timekeeping (unless it has been restarted already
by another CPU that woke up earlier) and the scheduler tick on that CPU is
unfrozen.
If the interrupt that has woken up the CPU was armed for system wakeup,
the system resume transition begins.
2. Resuming devices and restoring the working-state configuration of IRQs.
Devices are resumed in four phases called *noirq resume*, *early resume*,
*resume* and *complete* (see :ref:`driverapi_pm_devices` for more
information on what exactly happens in each phase).
Every device is visited in each phase, but typically it is not physically
accessed in more than two of them.
The working-state configuration of IRQs is restored after the *noirq* resume
phase and the runtime PM API is re-enabled for every device whose driver
supports it during the *early* resume phase.
3. Thawing tasks.
Tasks frozen in step 2 of the preceding `suspend <s2idle_suspend_>`_
transition are "thawed", which means that they are woken up from the
uninterruptible sleep that they went into at that time and user space tasks
are allowed to exit the kernel.
4. Invoking system-wide resume notifiers.
This is analogous to step 1 of the `suspend <s2idle_suspend_>`_ transition
and the same set of callbacks is invoked at this point, but a different
"notification type" parameter value is passed to them.
Platform-dependent Suspend Code Flow
====================================
The following steps are taken in order to transition the system from the working
state to platform-dependent suspend state:
1. Invoking system-wide suspend notifiers.
This step is the same as step 1 of the suspend-to-idle suspend transition
described `above <s2idle_suspend_>`_.
2. Freezing tasks.
This step is the same as step 2 of the suspend-to-idle suspend transition
described `above <s2idle_suspend_>`_.
3. Suspending devices and reconfiguring IRQs.
This step is analogous to step 3 of the suspend-to-idle suspend transition
described `above <s2idle_suspend_>`_, but the arming of IRQs for system
wakeup generally does not have any effect on the platform.
There are platforms that can go into a very deep low-power state internally
when all CPUs in them are in sufficiently deep idle states and all I/O
devices have been put into low-power states. On those platforms,
suspend-to-idle can reduce system power very effectively.
On the other platforms, however, low-level components (like interrupt
controllers) need to be turned off in a platform-specific way (implemented
in the hooks provided by the platform driver) to achieve comparable power
reduction.
That usually prevents in-band hardware interrupts from waking up the system,
which must be done in a special platform-dependent way. Then, the
configuration of system wakeup sources usually starts when system wakeup
devices are suspended and is finalized by the platform suspend hooks later
on.
4. Disabling non-boot CPUs.
On some platforms the suspend hooks mentioned above must run in a one-CPU
configuration of the system (in particular, the hardware cannot be accessed
by any code running in parallel with the platform suspend hooks that may,
and often do, trap into the platform firmware in order to finalize the
suspend transition).
For this reason, the CPU offline/online (CPU hotplug) framework is used
to take all of the CPUs in the system, except for one (the boot CPU),
offline (typically, the CPUs that have been taken offline go into deep idle
states).
This means that all tasks are migrated away from those CPUs and all IRQs are
rerouted to the only CPU that remains online.
5. Suspending core system components.
This prepares the core system components for (possibly) losing power going
forward and suspends the timekeeping.
6. Platform-specific power removal.
This is expected to remove power from all of the system components except
for the memory controller and RAM (in order to preserve the contents of the
latter) and some devices designated for system wakeup.
In many cases control is passed to the platform firmware which is expected
to finalize the suspend transition as needed.
Platform-dependent Resume Code Flow
===================================
The following steps are taken in order to transition the system from a
platform-dependent suspend state into the working state:
1. Platform-specific system wakeup.
The platform is woken up by a signal from one of the designated system
wakeup devices (which need not be an in-band hardware interrupt) and
control is passed back to the kernel (the working configuration of the
platform may need to be restored by the platform firmware before the
kernel gets control again).
2. Resuming core system components.
The suspend-time configuration of the core system components is restored and
the timekeeping is resumed.
3. Re-enabling non-boot CPUs.
The CPUs disabled in step 4 of the preceding suspend transition are taken
back online and their suspend-time configuration is restored.
4. Resuming devices and restoring the working-state configuration of IRQs.
This step is the same as step 2 of the suspend-to-idle suspend transition
described `above <s2idle_resume_>`_.
5. Thawing tasks.
This step is the same as step 3 of the suspend-to-idle suspend transition
described `above <s2idle_resume_>`_.
6. Invoking system-wide resume notifiers.
This step is the same as step 4 of the suspend-to-idle suspend transition
described `above <s2idle_resume_>`_.

View File

@@ -8,3 +8,4 @@ System-Wide Power Management
:maxdepth: 2
sleep-states
suspend-flows

View File

@@ -0,0 +1,103 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/arm,syscon-icst.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: ARM System Controller ICST Clocks
maintainers:
- Linus Walleij <linusw@kernel.org>
description: |
The ICS525 and ICS307 oscillators are produced by Integrated
Devices Technology (IDT). ARM integrated these oscillators deeply into their
reference designs by adding special control registers that manage such
oscillators to their system controllers.
The various ARM system controllers contain logic to serialize and initialize
an ICST clock request after a write to the 32 bit register at an offset
into the system controller. Furthermore, to even be able to alter one of
these frequencies, the system controller must first be unlocked by
writing a special token to another offset in the system controller.
Some ARM hardware contain special versions of the serial interface that only
connects the low 8 bits of the VDW (missing one bit), hard-wires RDW to
different values and sometimes also hard-wires the output divider. They
therefore have special compatible strings as per this table (the OD value is
the value on the pins, not the resulting output divider).
In the core modules and logic tiles, the ICST is a configurable clock fed
from a 24 MHz clock on the motherboard (usually the main crystal) used for
generating e.g. video clocks. It is located on the core module and there is
only one of these. This clock node must be a subnode of the core module.
Hardware variant RDW OD VDW
Integrator/AP 22 1 Bit 8 0, rest variable
integratorap-cm
Integrator/AP 46 3 Bit 8 0, rest variable
integratorap-sys
Integrator/AP 22 or 1 17 or (33 or 25 MHz)
integratorap-pci 14 1 14
Integrator/CP 22 variable Bit 8 0, rest variable
integratorcp-cm-core
Integrator/CP 22 variable Bit 8 0, rest variable
integratorcp-cm-mem
The ICST oscillator must be provided inside a system controller node.
properties:
"#clock-cells":
const: 0
compatible:
enum:
- arm,syscon-icst525
- arm,syscon-icst307
- arm,syscon-icst525-integratorap-cm
- arm,syscon-icst525-integratorap-sys
- arm,syscon-icst525-integratorap-pci
- arm,syscon-icst525-integratorcp-cm-core
- arm,syscon-icst525-integratorcp-cm-mem
- arm,integrator-cm-auxosc
- arm,versatile-cm-auxosc
- arm,impd-vco1
- arm,impd-vco2
clocks:
description: Parent clock for the ICST VCO
maxItems: 1
clock-output-names:
maxItems: 1
lock-offset:
$ref: '/schemas/types.yaml#/definitions/uint32'
description: Offset to the unlocking register for the oscillator
vco-offset:
$ref: '/schemas/types.yaml#/definitions/uint32'
description: Offset to the VCO register for the oscillator
required:
- "#clock-cells"
- compatible
- clocks
examples:
- |
vco1: clock@00 {
compatible = "arm,impd1-vco1";
#clock-cells = <0>;
lock-offset = <0x08>;
vco-offset = <0x00>;
clocks = <&sysclk>;
clock-output-names = "IM-PD1-VCO1";
};
...

View File

@@ -1,34 +0,0 @@
Clock bindings for ARM Integrator and Versatile Core Module clocks
Auxiliary Oscillator Clock
This is a configurable clock fed from a 24 MHz chrystal,
used for generating e.g. video clocks. It is located on the
core module and there is only one of these.
This clock node *must* be a subnode of the core module, since
it obtains the base address for it's address range from its
parent node.
Required properties:
- compatible: must be "arm,integrator-cm-auxosc" or "arm,versatile-cm-auxosc"
- #clock-cells: must be <0>
Optional properties:
- clocks: parent clock(s)
Example:
core-module@10000000 {
xtal24mhz: xtal24mhz@24M {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <24000000>;
};
auxosc: cm_aux_osc@25M {
#clock-cells = <0>;
compatible = "arm,integrator-cm-auxosc";
clocks = <&xtal24mhz>;
};
};

View File

@@ -1,70 +0,0 @@
ARM System Controller ICST clocks
The ICS525 and ICS307 oscillators are produced by Integrated Devices
Technology (IDT). ARM integrated these oscillators deeply into their
reference designs by adding special control registers that manage such
oscillators to their system controllers.
The various ARM system controllers contain logic to serialize and initialize
an ICST clock request after a write to the 32 bit register at an offset
into the system controller. Furthermore, to even be able to alter one of
these frequencies, the system controller must first be unlocked by
writing a special token to another offset in the system controller.
Some ARM hardware contain special versions of the serial interface that only
connects the low 8 bits of the VDW (missing one bit), hardwires RDW to
different values and sometimes also hardwire the output divider. They
therefore have special compatible strings as per this table (the OD value is
the value on the pins, not the resulting output divider):
Hardware variant: RDW OD VDW
Integrator/AP 22 1 Bit 8 0, rest variable
integratorap-cm
Integrator/AP 46 3 Bit 8 0, rest variable
integratorap-sys
Integrator/AP 22 or 1 17 or (33 or 25 MHz)
integratorap-pci 14 1 14
Integrator/CP 22 variable Bit 8 0, rest variable
integratorcp-cm-core
Integrator/CP 22 variable Bit 8 0, rest variable
integratorcp-cm-mem
The ICST oscillator must be provided inside a system controller node.
Required properties:
- compatible: must be one of
"arm,syscon-icst525"
"arm,syscon-icst307"
"arm,syscon-icst525-integratorap-cm"
"arm,syscon-icst525-integratorap-sys"
"arm,syscon-icst525-integratorap-pci"
"arm,syscon-icst525-integratorcp-cm-core"
"arm,syscon-icst525-integratorcp-cm-mem"
- lock-offset: the offset address into the system controller where the
unlocking register is located
- vco-offset: the offset address into the system controller where the
ICST control register is located (even 32 bit address)
- #clock-cells: must be <0>
- clocks: parent clock, since the ICST needs a parent clock to derive its
frequency from, this attribute is compulsory.
Example:
syscon: syscon@10000000 {
compatible = "syscon";
reg = <0x10000000 0x1000>;
oscclk0: osc0@c {
compatible = "arm,syscon-icst307";
#clock-cells = <0>;
lock-offset = <0x20>;
vco-offset = <0x0c>;
clocks = <&xtal24mhz>;
};
(...)
};

View File

@@ -1,29 +0,0 @@
* Clock bindings for NXP i.MX8M Mini
Required properties:
- compatible: Should be "fsl,imx8mm-ccm"
- reg: Address and length of the register set
- #clock-cells: Should be <1>
- clocks: list of clock specifiers, must contain an entry for each required
entry in clock-names
- clock-names: should include the following entries:
- "osc_32k"
- "osc_24m"
- "clk_ext1"
- "clk_ext2"
- "clk_ext3"
- "clk_ext4"
clk: clock-controller@30380000 {
compatible = "fsl,imx8mm-ccm";
reg = <0x0 0x30380000 0x0 0x10000>;
#clock-cells = <1>;
clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>, <&clk_ext2>,
<&clk_ext3>, <&clk_ext4>;
clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4";
};
The clock consumer should specify the desired clock by having the clock
ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mm-clock.h
for the full list of i.MX8M Mini clock IDs.

View File

@@ -0,0 +1,68 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/imx8mm-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP i.MX8M Mini Clock Control Module Binding
maintainers:
- Anson Huang <Anson.Huang@nxp.com>
description: |
NXP i.MX8M Mini clock control module is an integrated clock controller, which
generates and supplies to all modules.
properties:
compatible:
const: fsl,imx8mm-ccm
reg:
maxItems: 1
clocks:
items:
- description: 32k osc
- description: 24m osc
- description: ext1 clock input
- description: ext2 clock input
- description: ext3 clock input
- description: ext4 clock input
clock-names:
items:
- const: osc_32k
- const: osc_24m
- const: clk_ext1
- const: clk_ext2
- const: clk_ext3
- const: clk_ext4
'#clock-cells':
const: 1
description:
The clock consumer should specify the desired clock by having the clock
ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mm-clock.h
for the full list of i.MX8M Mini clock IDs.
required:
- compatible
- reg
- clocks
- clock-names
- '#clock-cells'
examples:
# Clock Control Module node:
- |
clk: clock-controller@30380000 {
compatible = "fsl,imx8mm-ccm";
reg = <0x30380000 0x10000>;
#clock-cells = <1>;
clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>, <&clk_ext2>,
<&clk_ext3>, <&clk_ext4>;
clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4";
};
...

View File

@@ -40,7 +40,7 @@ properties:
'#clock-cells':
const: 1
description: |
description:
The clock consumer should specify the desired clock by having the clock
ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mn-clock.h
for the full list of i.MX8M Nano clock IDs.
@@ -59,7 +59,7 @@ examples:
- |
clk: clock-controller@30380000 {
compatible = "fsl,imx8mn-ccm";
reg = <0x0 0x30380000 0x0 0x10000>;
reg = <0x30380000 0x10000>;
#clock-cells = <1>;
clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>,
<&clk_ext2>, <&clk_ext3>, <&clk_ext4>;
@@ -67,48 +67,4 @@ examples:
"clk_ext2", "clk_ext3", "clk_ext4";
};
# Required external clocks for Clock Control Module node:
- |
osc_32k: clock-osc-32k {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <32768>;
clock-output-names = "osc_32k";
};
osc_24m: clock-osc-24m {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <24000000>;
clock-output-names = "osc_24m";
};
clk_ext1: clock-ext1 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <133000000>;
clock-output-names = "clk_ext1";
};
clk_ext2: clock-ext2 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <133000000>;
clock-output-names = "clk_ext2";
};
clk_ext3: clock-ext3 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <133000000>;
clock-output-names = "clk_ext3";
};
clk_ext4: clock-ext4 {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency= <133000000>;
clock-output-names = "clk_ext4";
};
...

View File

@@ -1,20 +0,0 @@
* Clock bindings for NXP i.MX8M Quad
Required properties:
- compatible: Should be "fsl,imx8mq-ccm"
- reg: Address and length of the register set
- #clock-cells: Should be <1>
- clocks: list of clock specifiers, must contain an entry for each required
entry in clock-names
- clock-names: should include the following entries:
- "ckil"
- "osc_25m"
- "osc_27m"
- "clk_ext1"
- "clk_ext2"
- "clk_ext3"
- "clk_ext4"
The clock consumer should specify the desired clock by having the clock
ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mq-clock.h
for the full list of i.MX8M Quad clock IDs.

View File

@@ -0,0 +1,72 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/imx8mq-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: NXP i.MX8M Quad Clock Control Module Binding
maintainers:
- Anson Huang <Anson.Huang@nxp.com>
description: |
NXP i.MX8M Quad clock control module is an integrated clock controller, which
generates and supplies to all modules.
properties:
compatible:
const: fsl,imx8mq-ccm
reg:
maxItems: 1
clocks:
items:
- description: 32k osc
- description: 25m osc
- description: 27m osc
- description: ext1 clock input
- description: ext2 clock input
- description: ext3 clock input
- description: ext4 clock input
clock-names:
items:
- const: ckil
- const: osc_25m
- const: osc_27m
- const: clk_ext1
- const: clk_ext2
- const: clk_ext3
- const: clk_ext4
'#clock-cells':
const: 1
description:
The clock consumer should specify the desired clock by having the clock
ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mq-clock.h
for the full list of i.MX8M Quad clock IDs.
required:
- compatible
- reg
- clocks
- clock-names
- '#clock-cells'
examples:
# Clock Control Module node:
- |
clk: clock-controller@30380000 {
compatible = "fsl,imx8mq-ccm";
reg = <0x30380000 0x10000>;
#clock-cells = <1>;
clocks = <&ckil>, <&osc_25m>, <&osc_27m>,
<&clk_ext1>, <&clk_ext2>,
<&clk_ext3>, <&clk_ext4>;
clock-names = "ckil", "osc_25m", "osc_27m",
"clk_ext1", "clk_ext2",
"clk_ext3", "clk_ext4";
};
...

View File

@@ -0,0 +1,64 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/marvell,mmp2-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Marvell MMP2 and MMP3 Clock Controller
maintainers:
- Lubomir Rintel <lkundrak@v3.sk>
description: |
The clock subsystem on MMP2 or MMP3 generates and supplies clock to various
controllers within the SoC.
Each clock is assigned an identifier and client nodes use this identifier
to specify the clock which they consume.
All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.
properties:
compatible:
enum:
- marvell,mmp2-clock # controller compatible with MMP2 SoC
- marvell,mmp3-clock # controller compatible with MMP3 SoC
reg:
items:
- description: MPMU register region
- description: APMU register region
- description: APBC register region
reg-names:
items:
- const: mpmu
- const: apmu
- const: apbc
'#clock-cells':
const: 1
'#reset-cells':
const: 1
required:
- compatible
- reg
- reg-names
- '#clock-cells'
- '#reset-cells'
additionalProperties: false
examples:
- |
clock-controller@d4050000 {
compatible = "marvell,mmp2-clock";
reg = <0xd4050000 0x1000>,
<0xd4282800 0x400>,
<0xd4015000 0x1000>;
reg-names = "mpmu", "apmu", "apbc";
#clock-cells = <1>;
#reset-cells = <1>;
};

View File

@@ -1,21 +0,0 @@
* Marvell MMP2 Clock Controller
The MMP2 clock subsystem generates and supplies clock to various
controllers within the MMP2 SoC.
Required Properties:
- compatible: should be one of the following.
- "marvell,mmp2-clock" - controller compatible with MMP2 SoC.
- reg: physical base address of the clock subsystem and length of memory mapped
region. There are 3 places in SOC has clock control logic:
"mpmu", "apmu", "apbc". So three reg spaces need to be defined.
- #clock-cells: should be 1.
- #reset-cells: should be 1.
Each clock is assigned an identifier and client nodes use this identifier
to specify the clock which they consume.
All these identifiers could be found in <dt-bindings/clock/marvell,mmp2.h>.

View File

@@ -0,0 +1,72 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/qcom,gcc-sm8250.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Global Clock & Reset Controller Binding for SM8250
maintainers:
- Stephen Boyd <sboyd@kernel.org>
- Taniya Das <tdas@codeaurora.org>
description: |
Qualcomm global clock control module which supports the clocks, resets and
power domains on SM8250.
See also:
- dt-bindings/clock/qcom,gcc-sm8250.h
properties:
compatible:
const: qcom,gcc-sm8250
clocks:
items:
- description: Board XO source
- description: Sleep clock source
clock-names:
items:
- const: bi_tcxo
- const: sleep_clk
'#clock-cells':
const: 1
'#reset-cells':
const: 1
'#power-domain-cells':
const: 1
reg:
maxItems: 1
protected-clocks:
description:
Protected clock specifier list as per common clock binding.
required:
- compatible
- clocks
- clock-names
- reg
- '#clock-cells'
- '#reset-cells'
- '#power-domain-cells'
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
clock-controller@100000 {
compatible = "qcom,gcc-sm8250";
reg = <0 0x00100000 0 0x1f0000>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&sleep_clk>;
clock-names = "bi_tcxo", "sleep_clk";
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
};
...

View File

@@ -14,7 +14,9 @@ Required properties :
"qcom,rpmcc-apq8060", "qcom,rpmcc"
"qcom,rpmcc-msm8916", "qcom,rpmcc"
"qcom,rpmcc-msm8974", "qcom,rpmcc"
"qcom,rpmcc-msm8976", "qcom,rpmcc"
"qcom,rpmcc-apq8064", "qcom,rpmcc"
"qcom,rpmcc-ipq806x", "qcom,rpmcc"
"qcom,rpmcc-msm8996", "qcom,rpmcc"
"qcom,rpmcc-msm8998", "qcom,rpmcc"
"qcom,rpmcc-qcs404", "qcom,rpmcc"

View File

@@ -20,6 +20,7 @@ properties:
- qcom,sc7180-rpmh-clk
- qcom,sdm845-rpmh-clk
- qcom,sm8150-rpmh-clk
- qcom,sm8250-rpmh-clk
clocks:
maxItems: 1

View File

@@ -0,0 +1,62 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/qcom,sc7180-mss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Modem Clock Controller Binding for SC7180
maintainers:
- Taniya Das <tdas@codeaurora.org>
description: |
Qualcomm modem clock control module which supports the clocks on SC7180.
See also:
- dt-bindings/clock/qcom,mss-sc7180.h
properties:
compatible:
const: qcom,sc7180-mss
clocks:
items:
- description: gcc_mss_mfab_axi clock from GCC
- description: gcc_mss_nav_axi clock from GCC
- description: gcc_mss_cfg_ahb clock from GCC
clock-names:
items:
- const: gcc_mss_mfab_axis
- const: gcc_mss_nav_axi
- const: cfg_ahb
'#clock-cells':
const: 1
reg:
maxItems: 1
required:
- compatible
- reg
- clocks
- '#clock-cells'
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,gcc-sc7180.h>
clock-controller@41a8000 {
compatible = "qcom,sc7180-mss";
reg = <0 0x041a8000 0 0x8000>;
clocks = <&gcc GCC_MSS_MFAB_AXIS_CLK>,
<&gcc GCC_MSS_NAV_AXI_CLK>,
<&gcc GCC_MSS_CFG_AHB_CLK>;
clock-names = "gcc_mss_mfab_axis",
"gcc_mss_nav_axi",
"cfg_ahb";
#clock-cells = <1>;
};
...

View File

@@ -1,100 +0,0 @@
* Renesas Clock Pulse Generator / Module Standby and Software Reset
On Renesas ARM SoCs (SH/R-Mobile, R-Car, RZ), the CPG (Clock Pulse Generator)
and MSSR (Module Standby and Software Reset) blocks are intimately connected,
and share the same register block.
They provide the following functionalities:
- The CPG block generates various core clocks,
- The MSSR block provides two functions:
1. Module Standby, providing a Clock Domain to control the clock supply
to individual SoC devices,
2. Reset Control, to perform a software reset of individual SoC devices.
Required Properties:
- compatible: Must be one of:
- "renesas,r7s9210-cpg-mssr" for the r7s9210 SoC (RZ/A2)
- "renesas,r8a7743-cpg-mssr" for the r8a7743 SoC (RZ/G1M)
- "renesas,r8a7744-cpg-mssr" for the r8a7744 SoC (RZ/G1N)
- "renesas,r8a7745-cpg-mssr" for the r8a7745 SoC (RZ/G1E)
- "renesas,r8a77470-cpg-mssr" for the r8a77470 SoC (RZ/G1C)
- "renesas,r8a774a1-cpg-mssr" for the r8a774a1 SoC (RZ/G2M)
- "renesas,r8a774b1-cpg-mssr" for the r8a774b1 SoC (RZ/G2N)
- "renesas,r8a774c0-cpg-mssr" for the r8a774c0 SoC (RZ/G2E)
- "renesas,r8a7790-cpg-mssr" for the r8a7790 SoC (R-Car H2)
- "renesas,r8a7791-cpg-mssr" for the r8a7791 SoC (R-Car M2-W)
- "renesas,r8a7792-cpg-mssr" for the r8a7792 SoC (R-Car V2H)
- "renesas,r8a7793-cpg-mssr" for the r8a7793 SoC (R-Car M2-N)
- "renesas,r8a7794-cpg-mssr" for the r8a7794 SoC (R-Car E2)
- "renesas,r8a7795-cpg-mssr" for the r8a7795 SoC (R-Car H3)
- "renesas,r8a7796-cpg-mssr" for the r8a77960 SoC (R-Car M3-W)
- "renesas,r8a77961-cpg-mssr" for the r8a77961 SoC (R-Car M3-W+)
- "renesas,r8a77965-cpg-mssr" for the r8a77965 SoC (R-Car M3-N)
- "renesas,r8a77970-cpg-mssr" for the r8a77970 SoC (R-Car V3M)
- "renesas,r8a77980-cpg-mssr" for the r8a77980 SoC (R-Car V3H)
- "renesas,r8a77990-cpg-mssr" for the r8a77990 SoC (R-Car E3)
- "renesas,r8a77995-cpg-mssr" for the r8a77995 SoC (R-Car D3)
- reg: Base address and length of the memory resource used by the CPG/MSSR
block
- clocks: References to external parent clocks, one entry for each entry in
clock-names
- clock-names: List of external parent clock names. Valid names are:
- "extal" (r7s9210, r8a7743, r8a7744, r8a7745, r8a77470, r8a774a1,
r8a774b1, r8a774c0, r8a7790, r8a7791, r8a7792, r8a7793,
r8a7794, r8a7795, r8a77960, r8a77961, r8a77965, r8a77970,
r8a77980, r8a77990, r8a77995)
- "extalr" (r8a774a1, r8a774b1, r8a7795, r8a77960, r8a77961, r8a77965,
r8a77970, r8a77980)
- "usb_extal" (r8a7743, r8a7744, r8a7745, r8a77470, r8a7790, r8a7791,
r8a7793, r8a7794)
- #clock-cells: Must be 2
- For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
and a core clock reference, as defined in
<dt-bindings/clock/*-cpg-mssr.h>.
- For module clocks, the two clock specifier cells must be "CPG_MOD" and
a module number, as defined in the datasheet.
- #power-domain-cells: Must be 0
- SoC devices that are part of the CPG/MSSR Clock Domain and can be
power-managed through Module Standby should refer to the CPG device
node in their "power-domains" property, as documented by the generic PM
Domain bindings in
Documentation/devicetree/bindings/power/power-domain.yaml.
- #reset-cells: Must be 1
- The single reset specifier cell must be the module number, as defined
in the datasheet.
Examples
--------
- CPG device node:
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a7795-cpg-mssr";
reg = <0 0xe6150000 0 0x1000>;
clocks = <&extal_clk>, <&extalr_clk>;
clock-names = "extal", "extalr";
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
};
- CPG/MSSR Clock Domain member device node:
scif2: serial@e6e88000 {
compatible = "renesas,scif-r8a7795", "renesas,scif";
reg = <0 0xe6e88000 0 64>;
interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 310>;
clock-names = "fck";
dmas = <&dmac1 0x13>, <&dmac1 0x12>;
dma-names = "tx", "rx";
power-domains = <&cpg>;
resets = <&cpg 310>;
};

View File

@@ -0,0 +1,119 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: "http://devicetree.org/schemas/clock/renesas,cpg-mssr.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Renesas Clock Pulse Generator / Module Standby and Software Reset
maintainers:
- Geert Uytterhoeven <geert+renesas@glider.be>
description: |
On Renesas ARM SoCs (SH/R-Mobile, R-Car, RZ), the CPG (Clock Pulse Generator)
and MSSR (Module Standby and Software Reset) blocks are intimately connected,
and share the same register block.
They provide the following functionalities:
- The CPG block generates various core clocks,
- The MSSR block provides two functions:
1. Module Standby, providing a Clock Domain to control the clock supply
to individual SoC devices,
2. Reset Control, to perform a software reset of individual SoC devices.
properties:
compatible:
enum:
- renesas,r7s9210-cpg-mssr # RZ/A2
- renesas,r8a7743-cpg-mssr # RZ/G1M
- renesas,r8a7744-cpg-mssr # RZ/G1N
- renesas,r8a7745-cpg-mssr # RZ/G1E
- renesas,r8a77470-cpg-mssr # RZ/G1C
- renesas,r8a774a1-cpg-mssr # RZ/G2M
- renesas,r8a774b1-cpg-mssr # RZ/G2N
- renesas,r8a774c0-cpg-mssr # RZ/G2E
- renesas,r8a7790-cpg-mssr # R-Car H2
- renesas,r8a7791-cpg-mssr # R-Car M2-W
- renesas,r8a7792-cpg-mssr # R-Car V2H
- renesas,r8a7793-cpg-mssr # R-Car M2-N
- renesas,r8a7794-cpg-mssr # R-Car E2
- renesas,r8a7795-cpg-mssr # R-Car H3
- renesas,r8a7796-cpg-mssr # R-Car M3-W
- renesas,r8a77961-cpg-mssr # R-Car M3-W+
- renesas,r8a77965-cpg-mssr # R-Car M3-N
- renesas,r8a77970-cpg-mssr # R-Car V3M
- renesas,r8a77980-cpg-mssr # R-Car V3H
- renesas,r8a77990-cpg-mssr # R-Car E3
- renesas,r8a77995-cpg-mssr # R-Car D3
reg:
maxItems: 1
clocks:
minItems: 1
maxItems: 2
clock-names:
minItems: 1
maxItems: 2
items:
enum:
- extal # All
- extalr # Most R-Car Gen3 and RZ/G2
- usb_extal # Most R-Car Gen2 and RZ/G1
'#clock-cells':
description: |
- For CPG core clocks, the two clock specifier cells must be "CPG_CORE"
and a core clock reference, as defined in
<dt-bindings/clock/*-cpg-mssr.h>
- For module clocks, the two clock specifier cells must be "CPG_MOD" and
a module number, as defined in the datasheet.
const: 2
'#power-domain-cells':
description:
SoC devices that are part of the CPG/MSSR Clock Domain and can be
power-managed through Module Standby should refer to the CPG device node
in their "power-domains" property, as documented by the generic PM Domain
bindings in Documentation/devicetree/bindings/power/power-domain.yaml.
const: 0
'#reset-cells':
description:
The single reset specifier cell must be the module number, as defined in
the datasheet.
const: 1
if:
not:
properties:
compatible:
items:
enum:
- renesas,r7s9210-cpg-mssr
then:
required:
- '#reset-cells'
required:
- compatible
- reg
- clocks
- clock-names
- '#clock-cells'
- '#power-domain-cells'
additionalProperties: false
examples:
- |
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a7795-cpg-mssr";
reg = <0xe6150000 0x1000>;
clocks = <&extal_clk>, <&extalr_clk>;
clock-names = "extal", "extalr";
#clock-cells = <2>;
#power-domain-cells = <0>;
#reset-cells = <1>;
};

View File

@@ -38,10 +38,17 @@ Required properties:
- reg: offset and length of the USB 2.0 clock selector register block.
- clocks: A list of phandles and specifier pairs.
- clock-names: Name of the clocks.
- The functional clock must be "ehci_ohci"
- The functional clock of USB 2.0 host side must be "ehci_ohci"
- The functional clock of HS-USB side must be "hs-usb-if"
- The USB_EXTAL clock pin must be "usb_extal"
- The USB_XTAL clock pin must be "usb_xtal"
- #clock-cells: Must be 0
- power-domains: A phandle and symbolic PM domain specifier.
See power/renesas,rcar-sysc.yaml.
- resets: A list of phandles and specifier pairs.
- reset-names: Name of the resets.
- The reset of USB 2.0 host side must be "ehci_ohci"
- The reset of HS-USB side must be "hs-usb-if"
Example (R-Car H3):
@@ -49,7 +56,11 @@ Example (R-Car H3):
compatible = "renesas,r8a7795-rcar-usb2-clock-sel",
"renesas,rcar-gen3-usb2-clock-sel";
reg = <0 0xe6590630 0 0x02>;
clocks = <&cpg CPG_MOD 703>, <&usb_extal>, <&usb_xtal>;
clock-names = "ehci_ohci", "usb_extal", "usb_xtal";
clocks = <&cpg CPG_MOD 703>, <&cpg CPG_MOD 704>,
<&usb_extal>, <&usb_xtal>;
clock-names = "ehci_ohci", "hs-usb-if", "usb_extal", "usb_xtal";
#clock-cells = <0>;
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
resets = <&cpg 703>, <&cpg 704>;
reset-names = "ehci_ohci", "hs-usb-if";
};

View File

@@ -1,4 +1,4 @@
Spreadtrum Clock Binding
Spreadtrum SC9860 Clock Binding
------------------------
Required properties:

View File

@@ -0,0 +1,105 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
# Copyright 2019 Unisoc Inc.
%YAML 1.2
---
$id: "http://devicetree.org/schemas/clock/sprd,sc9863a-clk.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: SC9863A Clock Control Unit Device Tree Bindings
maintainers:
- Orson Zhai <orsonzhai@gmail.com>
- Baolin Wang <baolin.wang7@gmail.com>
- Chunyan Zhang <zhang.lyra@gmail.com>
properties:
"#clock-cells":
const: 1
compatible :
enum:
- sprd,sc9863a-ap-clk
- sprd,sc9863a-aon-clk
- sprd,sc9863a-apahb-gate
- sprd,sc9863a-pmu-gate
- sprd,sc9863a-aonapb-gate
- sprd,sc9863a-pll
- sprd,sc9863a-mpll
- sprd,sc9863a-rpll
- sprd,sc9863a-dpll
- sprd,sc9863a-mm-gate
- sprd,sc9863a-apapb-gate
clocks:
minItems: 1
maxItems: 4
description: |
The input parent clock(s) phandle for this clock, only list fixed
clocks which are declared in devicetree.
clock-names:
minItems: 1
maxItems: 4
items:
- const: ext-26m
- const: ext-32k
- const: ext-4m
- const: rco-100m
reg:
maxItems: 1
required:
- compatible
- '#clock-cells'
if:
properties:
compatible:
enum:
- sprd,sc9863a-ap-clk
- sprd,sc9863a-aon-clk
then:
required:
- reg
else:
description: |
Other SC9863a clock nodes should be the child of a syscon node in
which compatible string shoule be:
"sprd,sc9863a-glbregs", "syscon", "simple-mfd"
The 'reg' property for the clock node is also required if there is a sub
range of registers for the clocks.
examples:
- |
ap_clk: clock-controller@21500000 {
compatible = "sprd,sc9863a-ap-clk";
reg = <0 0x21500000 0 0x1000>;
clocks = <&ext_26m>, <&ext_32k>;
clock-names = "ext-26m", "ext-32k";
#clock-cells = <1>;
};
- |
soc {
#address-cells = <2>;
#size-cells = <2>;
ap_ahb_regs: syscon@20e00000 {
compatible = "sprd,sc9863a-glbregs", "syscon", "simple-mfd";
reg = <0 0x20e00000 0 0x4000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0 0x20e00000 0x4000>;
apahb_gate: apahb-gate@0 {
compatible = "sprd,sc9863a-apahb-gate";
reg = <0x0 0x1020>;
#clock-cells = <1>;
};
};
};
...

View File

@@ -0,0 +1,35 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/ti,am654-ehrpwm-tbclk.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: TI EHRPWM Time Base Clock
maintainers:
- Vignesh Raghavendra <vigneshr@ti.com>
properties:
compatible:
items:
- const: ti,am654-ehrpwm-tbclk
- const: syscon
"#clock-cells":
const: 1
reg:
maxItems: 1
required:
- compatible
- "#clock-cells"
- reg
examples:
- |
ehrpwm_tbclk: syscon@4140 {
compatible = "ti,am654-ehrpwm-tbclk", "syscon";
reg = <0x4140 0x18>;
#clock-cells = <1>;
};

View File

@@ -1,37 +0,0 @@
JZ4740 and similar SoCs real-time clock driver
Required properties:
- compatible: One of:
- "ingenic,jz4740-rtc" - for use with the JZ4740 SoC
- "ingenic,jz4780-rtc" - for use with the JZ4780 SoC
- reg: Address range of rtc register set
- interrupts: IRQ number for the alarm interrupt
- clocks: phandle to the "rtc" clock
- clock-names: must be "rtc"
Optional properties:
- system-power-controller: To use this component as the
system power controller
- reset-pin-assert-time-ms: Reset pin low-level assertion
time after wakeup (default 60ms; range 0-125ms if RTC clock
at 32 kHz)
- min-wakeup-pin-assert-time-ms: Minimum wakeup pin assertion
time (default 100ms; range 0-2s if RTC clock at 32 kHz)
Example:
rtc@10003000 {
compatible = "ingenic,jz4740-rtc";
reg = <0x10003000 0x40>;
interrupt-parent = <&intc>;
interrupts = <32>;
clocks = <&rtc_clock>;
clock-names = "rtc";
system-power-controller;
reset-pin-assert-time-ms = <60>;
min-wakeup-pin-assert-time-ms = <100>;
};

View File

@@ -0,0 +1,83 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/rtc/ingenic,rtc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ingenic SoCs Real-Time Clock DT bindings
maintainers:
- Paul Cercueil <paul@crapouillou.net>
allOf:
- $ref: rtc.yaml#
properties:
compatible:
oneOf:
- enum:
- ingenic,jz4740-rtc
- ingenic,jz4760-rtc
- items:
- const: ingenic,jz4725b-rtc
- const: ingenic,jz4740-rtc
- items:
- enum:
- ingenic,jz4770-rtc
- ingenic,jz4780-rtc
- const: ingenic,jz4760-rtc
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 1
clock-names:
const: rtc
system-power-controller:
description: |
Indicates that the RTC is responsible for powering OFF
the system.
type: boolean
ingenic,reset-pin-assert-time-ms:
minimum: 0
maximum: 125
default: 60
description: |
Reset pin low-level assertion time after wakeup
(assuming RTC clock at 32 kHz)
ingenic,min-wakeup-pin-assert-time-ms:
minimum: 0
maximum: 2000
default: 100
description: |
Minimum wakeup pin assertion time
(assuming RTC clock at 32 kHz)
required:
- compatible
- reg
- interrupts
- clocks
- clock-names
examples:
- |
#include <dt-bindings/clock/jz4740-cgu.h>
rtc_dev: rtc@10003000 {
compatible = "ingenic,jz4740-rtc";
reg = <0x10003000 0x40>;
interrupt-parent = <&intc>;
interrupts = <15>;
clocks = <&cgu JZ4740_CLK_RTC>;
clock-names = "rtc";
};

View File

@@ -0,0 +1,14 @@
Device-Tree bindings for MediaTek SoC based RTC
Required properties:
- compatible : Should be "mediatek,mt2712-rtc" : for MT2712 SoC
- reg : Specifies base physical address and size of the registers;
- interrupts : Should contain the interrupt for RTC alarm;
Example:
rtc: rtc@10011000 {
compatible = "mediatek,mt2712-rtc";
reg = <0 0x10011000 0 0x1000>;
interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_LOW>;
};

View File

@@ -416,7 +416,7 @@ The preferred way to set up the helpers is to fill in the
struct gpio_irq_chip inside struct gpio_chip before adding the gpio_chip.
If you do this, the additional irq_chip will be set up by gpiolib at the
same time as setting up the rest of the GPIO functionality. The following
is a typical example of a cascaded interrupt handler using gpio_irq_chip::
is a typical example of a cascaded interrupt handler using gpio_irq_chip:
.. code-block:: c
@@ -453,7 +453,7 @@ is a typical example of a cascaded interrupt handler using gpio_irq_chip::
return devm_gpiochip_add_data(dev, &g->gc, g);
The helper support using hierarchical interrupt controllers as well.
In this case the typical set-up will look like this::
In this case the typical set-up will look like this:
.. code-block:: c

View File

@@ -115,8 +115,10 @@ data. Note that the opposite is not true - it would be valid for
FIEMAP_EXTENT_NOT_ALIGNED to appear alone.
* FIEMAP_EXTENT_LAST
This is the last extent in the file. A mapping attempt past this
extent will return nothing.
This is generally the last extent in the file. A mapping attempt past
this extent may return nothing. Some implementations set this flag to
indicate this extent is the last one in the range queried by the user
(via fiemap->fm_length).
* FIEMAP_EXTENT_UNKNOWN
The location of this extent is currently unknown. This may indicate

View File

@@ -112,13 +112,13 @@ to ensure that crash data is preserved to process later.
-- On OPAL based machines (PowerNV), if the kernel is build with
CONFIG_OPAL_CORE=y, OPAL memory at the time of crash is also
exported as /sys/firmware/opal/core file. This procfs file is
exported as /sys/firmware/opal/mpipl/core file. This procfs file is
helpful in debugging OPAL crashes with GDB. The kernel memory
used for exporting this procfs file can be released by echo'ing
'1' to /sys/kernel/fadump_release_opalcore node.
'1' to /sys/firmware/opal/mpipl/release_core node.
e.g.
# echo 1 > /sys/kernel/fadump_release_opalcore
# echo 1 > /sys/firmware/opal/mpipl/release_core
Implementation details:
-----------------------
@@ -268,6 +268,11 @@ Here is the list of files under kernel sysfs:
be handled and vmcore will not be captured. This interface can be
easily integrated with kdump service start/stop.
/sys/kernel/fadump/mem_reserved
This is used to display the memory reserved by FADump for saving the
crash dump.
/sys/kernel/fadump_release_mem
This file is available only when FADump is active during
second kernel. This is used to release the reserved memory
@@ -283,14 +288,29 @@ Here is the list of files under kernel sysfs:
enhanced to use this interface to release the memory reserved for
dump and continue without 2nd reboot.
/sys/kernel/fadump_release_opalcore
Note: /sys/kernel/fadump_release_opalcore sysfs has moved to
/sys/firmware/opal/mpipl/release_core
/sys/firmware/opal/mpipl/release_core
This file is available only on OPAL based machines when FADump is
active during capture kernel. This is used to release the memory
used by the kernel to export /sys/firmware/opal/core file. To
used by the kernel to export /sys/firmware/opal/mpipl/core file. To
release this memory, echo '1' to it:
echo 1 > /sys/kernel/fadump_release_opalcore
echo 1 > /sys/firmware/opal/mpipl/release_core
Note: The following FADump sysfs files are deprecated.
+----------------------------------+--------------------------------+
| Deprecated | Alternative |
+----------------------------------+--------------------------------+
| /sys/kernel/fadump_enabled | /sys/kernel/fadump/enabled |
+----------------------------------+--------------------------------+
| /sys/kernel/fadump_registered | /sys/kernel/fadump/registered |
+----------------------------------+--------------------------------+
| /sys/kernel/fadump_release_mem | /sys/kernel/fadump/release_mem |
+----------------------------------+--------------------------------+
Here is the list of files under powerpc debugfs:
(Assuming debugfs is mounted on /sys/kernel/debug directory.)

View File

@@ -125,10 +125,13 @@ of ftrace. Here is a list of some of the key files:
trace:
This file holds the output of the trace in a human
readable format (described below). Note, tracing is temporarily
disabled when the file is open for reading. Once all readers
are closed, tracing is re-enabled. Opening this file for
readable format (described below). Opening this file for
writing with the O_TRUNC flag clears the ring buffer content.
Note, this file is not a consumer. If tracing is off
(no tracer running, or tracing_on is zero), it will produce
the same output each time it is read. When tracing is on,
it may produce inconsistent results as it tries to read
the entire buffer without consuming it.
trace_pipe:
@@ -142,9 +145,7 @@ of ftrace. Here is a list of some of the key files:
will not be read again with a sequential read. The
"trace" file is static, and if the tracer is not
adding more data, it will display the same
information every time it is read. Unlike the
"trace" file, opening this file for reading will not
temporarily disable tracing.
information every time it is read.
trace_options:
@@ -262,6 +263,20 @@ of ftrace. Here is a list of some of the key files:
traced by the function tracer as well. This option will also
cause PIDs of tasks that exit to be removed from the file.
set_ftrace_notrace_pid:
Have the function tracer ignore threads whose PID are listed in
this file.
If the "function-fork" option is set, then when a task whose
PID is listed in this file forks, the child's PID will
automatically be added to this file, and the child will not be
traced by the function tracer as well. This option will also
cause PIDs of tasks that exit to be removed from the file.
If a PID is in both this file and "set_ftrace_pid", then this
file takes precedence, and the thread will not be traced.
set_event_pid:
Have the events only trace a task with a PID listed in this file.
@@ -273,6 +288,19 @@ of ftrace. Here is a list of some of the key files:
cause the PIDs of tasks to be removed from this file when the task
exits.
set_event_notrace_pid:
Have the events not trace a task with a PID listed in this file.
Note, sched_switch and sched_wakeup will trace threads not listed
in this file, even if a thread's PID is in the file if the
sched_switch or sched_wakeup events also trace a thread that should
be traced.
To have the PIDs of children of tasks with their PID in this file
added on fork, enable the "event-fork" option. That option will also
cause the PIDs of tasks to be removed from this file when the task
exits.
set_graph_function:
Functions listed in this file will cause the function graph
@@ -1125,6 +1153,12 @@ Here are the available options:
the trace displays additional information about the
latency, as described in "Latency trace format".
pause-on-trace
When set, opening the trace file for read, will pause
writing to the ring buffer (as if tracing_on was set to zero).
This simulates the original behavior of the trace file.
When the file is closed, tracing will be enabled again.
record-cmd
When any event or tracer is enabled, a hook is enabled
in the sched_switch trace point to fill comm cache
@@ -1176,6 +1210,8 @@ Here are the available options:
tasks fork. Also, when tasks with PIDs in set_event_pid exit,
their PIDs will be removed from the file.
This affects PIDs listed in set_event_notrace_pid as well.
function-trace
The latency tracers will enable function tracing
if this option is enabled (default it is). When
@@ -1190,6 +1226,8 @@ Here are the available options:
set_ftrace_pid exit, their PIDs will be removed from the
file.
This affects PIDs in set_ftrace_notrace_pid as well.
display-graph
When set, the latency tracers (irqsoff, wakeup, etc) will
use function graph tracing instead of function tracing.
@@ -2126,6 +2164,8 @@ periodically make a CPU constantly busy with interrupts disabled.
# cat trace
# tracer: hwlat
#
# entries-in-buffer/entries-written: 13/13 #P:8
#
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
@@ -2133,12 +2173,18 @@ periodically make a CPU constantly busy with interrupts disabled.
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<...>-3638 [001] d... 19452.055471: #1 inner/outer(us): 12/14 ts:1499801089.066141940
<...>-3638 [003] d... 19454.071354: #2 inner/outer(us): 11/9 ts:1499801091.082164365
<...>-3638 [002] dn.. 19461.126852: #3 inner/outer(us): 12/9 ts:1499801098.138150062
<...>-3638 [001] d... 19488.340960: #4 inner/outer(us): 8/12 ts:1499801125.354139633
<...>-3638 [003] d... 19494.388553: #5 inner/outer(us): 8/12 ts:1499801131.402150961
<...>-3638 [003] d... 19501.283419: #6 inner/outer(us): 0/12 ts:1499801138.297435289 nmi-total:4 nmi-count:1
<...>-1729 [001] d... 678.473449: #1 inner/outer(us): 11/12 ts:1581527483.343962693 count:6
<...>-1729 [004] d... 689.556542: #2 inner/outer(us): 16/9 ts:1581527494.889008092 count:1
<...>-1729 [005] d... 714.756290: #3 inner/outer(us): 16/16 ts:1581527519.678961629 count:5
<...>-1729 [001] d... 718.788247: #4 inner/outer(us): 9/17 ts:1581527523.889012713 count:1
<...>-1729 [002] d... 719.796341: #5 inner/outer(us): 13/9 ts:1581527524.912872606 count:1
<...>-1729 [006] d... 844.787091: #6 inner/outer(us): 9/12 ts:1581527649.889048502 count:2
<...>-1729 [003] d... 849.827033: #7 inner/outer(us): 18/9 ts:1581527654.889013793 count:1
<...>-1729 [007] d... 853.859002: #8 inner/outer(us): 9/12 ts:1581527658.889065736 count:1
<...>-1729 [001] d... 855.874978: #9 inner/outer(us): 9/11 ts:1581527660.861991877 count:1
<...>-1729 [001] d... 863.938932: #10 inner/outer(us): 9/11 ts:1581527668.970010500 count:1 nmi-total:7 nmi-count:1
<...>-1729 [007] d... 878.050780: #11 inner/outer(us): 9/12 ts:1581527683.385002600 count:1 nmi-total:5 nmi-count:1
<...>-1729 [007] d... 886.114702: #12 inner/outer(us): 9/12 ts:1581527691.385001600 count:1
The above output is somewhat the same in the header. All events will have
@@ -2148,7 +2194,7 @@ interrupts disabled 'd'. Under the FUNCTION title there is:
This is the count of events recorded that were greater than the
tracing_threshold (See below).
inner/outer(us): 12/14
inner/outer(us): 11/11
This shows two numbers as "inner latency" and "outer latency". The test
runs in a loop checking a timestamp twice. The latency detected within
@@ -2156,11 +2202,15 @@ interrupts disabled 'd'. Under the FUNCTION title there is:
after the previous timestamp and the next timestamp in the loop is
the "outer latency".
ts:1499801089.066141940
ts:1581527483.343962693
The absolute timestamp that the event happened.
The absolute timestamp that the first latency was recorded in the window.
nmi-total:4 nmi-count:1
count:6
The number of times a latency was detected during the window.
nmi-total:7 nmi-count:1
On architectures that support it, if an NMI comes in during the
test, the time spent in NMI is reported in "nmi-total" (in

View File

@@ -1297,7 +1297,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/arm/arm-boards
F: Documentation/devicetree/bindings/auxdisplay/arm-charlcd.txt
F: Documentation/devicetree/bindings/clock/arm-integrator.txt
F: Documentation/devicetree/bindings/clock/arm,syscon-icst.yaml
F: Documentation/devicetree/bindings/i2c/i2c-versatile.txt
F: Documentation/devicetree/bindings/interrupt-controller/arm,versatile-fpga-irq.txt
F: Documentation/devicetree/bindings/mtd/arm-versatile.txt
@@ -2015,7 +2015,9 @@ M: Sean Wang <sean.wang@mediatek.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/rtc/rtc-mt2712.txt
F: Documentation/devicetree/bindings/rtc/rtc-mt7622.txt
F: drivers/rtc/rtc-mt2712.c
F: drivers/rtc/rtc-mt6397.c
F: drivers/rtc/rtc-mt7622.c
@@ -6383,6 +6385,13 @@ F: include/trace/events/mdio.h
F: include/uapi/linux/mdio.h
F: include/uapi/linux/mii.h
EXFAT FILE SYSTEM
M: Namjae Jeon <namjae.jeon@samsung.com>
M: Sungjong Seo <sj1557.seo@samsung.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/exfat/
EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.com>
L: linux-ext4@vger.kernel.org
@@ -9683,17 +9692,16 @@ F: include/uapi/linux/lightnvm.h
LINUX FOR POWER MACINTOSH
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
W: http://www.penguinppc.org/
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
S: Odd Fixes
F: arch/powerpc/platforms/powermac/
F: drivers/macintosh/
LINUX FOR POWERPC (32-BIT AND 64-BIT)
M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
M: Paul Mackerras <paulus@samba.org>
M: Michael Ellerman <mpe@ellerman.id.au>
W: https://github.com/linuxppc/linux/wiki
R: Benjamin Herrenschmidt <benh@kernel.crashing.org>
R: Paul Mackerras <paulus@samba.org>
W: https://github.com/linuxppc/wiki/wiki
L: linuxppc-dev@lists.ozlabs.org
Q: http://patchwork.ozlabs.org/project/linuxppc-dev/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
@@ -9710,6 +9718,8 @@ F: drivers/crypto/vmx/
F: drivers/i2c/busses/i2c-opal.c
F: drivers/net/ethernet/ibm/ibmveth.*
F: drivers/net/ethernet/ibm/ibmvnic.*
F: drivers/*/*/*pasemi*
F: drivers/*/*pasemi*
F: drivers/pci/hotplug/pnv_php.c
F: drivers/pci/hotplug/rpa*
F: drivers/rtc/rtc-opal.c
@@ -9726,51 +9736,31 @@ N: pseries
LINUX FOR POWERPC EMBEDDED MPC5XXX
M: Anatolij Gustschin <agust@denx.de>
L: linuxppc-dev@lists.ozlabs.org
T: git git://git.denx.de/linux-denx-agust.git
S: Maintained
S: Odd Fixes
F: arch/powerpc/platforms/512x/
F: arch/powerpc/platforms/52xx/
LINUX FOR POWERPC EMBEDDED PPC4XX
M: Alistair Popple <alistair@popple.id.au>
M: Matt Porter <mporter@kernel.crashing.org>
W: http://www.penguinppc.org/
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
S: Orphan
F: arch/powerpc/platforms/40x/
F: arch/powerpc/platforms/44x/
LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
M: Scott Wood <oss@buserror.net>
M: Kumar Gala <galak@kernel.crashing.org>
W: http://www.penguinppc.org/
L: linuxppc-dev@lists.ozlabs.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux.git
S: Maintained
S: Odd fixes
F: arch/powerpc/platforms/83xx/
F: arch/powerpc/platforms/85xx/
F: Documentation/devicetree/bindings/powerpc/fsl/
LINUX FOR POWERPC EMBEDDED PPC8XX
M: Vitaly Bordug <vitb@kernel.crashing.org>
W: http://www.penguinppc.org/
M: Christophe Leroy <christophe.leroy@c-s.fr>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: arch/powerpc/platforms/8xx/
LINUX FOR POWERPC EMBEDDED XILINX VIRTEX
L: linuxppc-dev@lists.ozlabs.org
S: Orphan
F: arch/powerpc/*/*virtex*
F: arch/powerpc/*/*/*virtex*
LINUX FOR POWERPC PA SEMI PWRFICIENT
L: linuxppc-dev@lists.ozlabs.org
S: Orphan
F: arch/powerpc/platforms/pasemi/
F: drivers/*/*pasemi*
F: drivers/*/*/*pasemi*
LINUX KERNEL DUMP TEST MODULE (LKDTM)
M: Kees Cook <keescook@chromium.org>
S: Maintained
@@ -12664,16 +12654,6 @@ W: http://wireless.kernel.org/en/users/Drivers/p54
S: Maintained
F: drivers/net/wireless/intersil/p54/
PA SEMI ETHERNET DRIVER
L: netdev@vger.kernel.org
S: Orphan
F: drivers/net/ethernet/pasemi/*
PA SEMI SMBUS DRIVER
L: linux-i2c@vger.kernel.org
S: Orphan
F: drivers/i2c/busses/i2c-pasemi.c
PACKING
M: Vladimir Oltean <olteanv@gmail.com>
L: netdev@vger.kernel.org
@@ -13291,6 +13271,7 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
F: samples/pidfd/
F: tools/testing/selftests/pidfd/
F: tools/testing/selftests/pid_namespace/
F: tools/testing/selftests/clone3/
K: (?i)pidfd
K: (?i)clone3
@@ -14619,7 +14600,7 @@ F: Documentation/s390/
F: Documentation/driver-api/s390-drivers.rst
S390 COMMON I/O LAYER
M: Sebastian Ott <sebott@linux.ibm.com>
M: Vineeth Vijayan <vneethv@linux.ibm.com>
M: Peter Oberparleiter <oberpar@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -14661,7 +14642,7 @@ S: Supported
F: drivers/s390/net/
S390 PCI SUBSYSTEM
M: Sebastian Ott <sebott@linux.ibm.com>
M: Niklas Schnelle <schnelle@linux.ibm.com>
M: Gerald Schaefer <gerald.schaefer@de.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/

View File

@@ -248,11 +248,18 @@ config ARCH_HAS_SET_DIRECT_MAP
bool
#
# Select if arch has an uncached kernel segment and provides the
# uncached_kernel_address / cached_kernel_address symbols to use it
# Select if the architecture provides the arch_dma_set_uncached symbol to
# either provide an uncached segement alias for a DMA allocation, or
# to remap the page tables in place.
#
config ARCH_HAS_UNCACHED_SEGMENT
select ARCH_HAS_DMA_PREP_COHERENT
config ARCH_HAS_DMA_SET_UNCACHED
bool
#
# Select if the architectures provides the arch_dma_clear_uncached symbol
# to undo an in-place page table remap for uncached access.
#
config ARCH_HAS_DMA_CLEAR_UNCACHED
bool
# Select if arch init_task must go in the __init_task_data section

View File

@@ -33,7 +33,5 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
int arm_dma_supported(struct device *dev, u64 mask);
#endif /* __KERNEL__ */
#endif

View File

@@ -410,13 +410,10 @@ static int __ref impd1_probe(struct lm_device *dev)
* 5 = Key lower right
*/
/* We need the two MMCI GPIO entries */
lookup->table[0].chip_label = chipname;
lookup->table[0].chip_hwnum = 3;
lookup->table[0].con_id = "wp";
lookup->table[1].chip_label = chipname;
lookup->table[1].chip_hwnum = 4;
lookup->table[1].con_id = "cd";
lookup->table[1].flags = GPIO_ACTIVE_LOW;
lookup->table[0] = (struct gpiod_lookup)
GPIO_LOOKUP(chipname, 3, "wp", 0);
lookup->table[1] = (struct gpiod_lookup)
GPIO_LOOKUP(chipname, 4, "cd", GPIO_ACTIVE_LOW);
gpiod_add_lookup_table(lookup);
}

View File

@@ -109,6 +109,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
int index)
{
struct omap3_idle_statedata *cx = &omap3_idle_data[index];
int error;
if (omap_irq_pending() || need_resched())
goto return_sleep_time;
@@ -125,8 +126,11 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
* Call idle CPU PM enter notifier chain so that
* VFP context is saved.
*/
if (cx->mpu_state == PWRDM_POWER_OFF)
cpu_pm_enter();
if (cx->mpu_state == PWRDM_POWER_OFF) {
error = cpu_pm_enter();
if (error)
goto out_clkdm_set;
}
/* Execute ARM wfi */
omap_sram_idle();
@@ -139,6 +143,7 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
cpu_pm_exit();
out_clkdm_set:
/* Re-allow idle for C1 */
if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE)
clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);

View File

@@ -122,6 +122,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
{
struct idle_statedata *cx = state_ptr + index;
u32 mpuss_can_lose_context = 0;
int error;
/*
* CPU0 has to wait and stay ON until CPU1 is OFF state.
@@ -159,7 +160,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU PM enter notifier chain so that
* VFP and per CPU interrupt context is saved.
*/
cpu_pm_enter();
error = cpu_pm_enter();
if (error)
goto cpu_pm_out;
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
@@ -169,13 +172,17 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU cluster PM enter notifier chain
* to save GIC and wakeupgen context.
*/
if (mpuss_can_lose_context)
cpu_cluster_pm_enter();
if (mpuss_can_lose_context) {
error = cpu_cluster_pm_enter();
if (error)
goto cpu_cluster_pm_out;
}
}
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu_done[dev->cpu] = true;
cpu_cluster_pm_out:
/* Wakeup CPU1 only if it is not offlined */
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
@@ -197,12 +204,6 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
}
}
/*
* Call idle CPU PM exit notifier chain to restore
* VFP and per CPU IRQ context.
*/
cpu_pm_exit();
/*
* Call idle CPU cluster PM exit notifier chain
* to restore GIC and wakeupgen context.
@@ -210,6 +211,13 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0 && mpuss_can_lose_context)
cpu_cluster_pm_exit();
/*
* Call idle CPU PM exit notifier chain to restore
* VFP and per CPU IRQ context.
*/
cpu_pm_exit();
cpu_pm_out:
tick_broadcast_exit();
fail:

View File

@@ -191,6 +191,7 @@ void omap_sram_idle(void)
int per_next_state = PWRDM_POWER_ON;
int core_next_state = PWRDM_POWER_ON;
u32 sdrc_pwr = 0;
int error;
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
switch (mpu_next_state) {
@@ -219,8 +220,11 @@ void omap_sram_idle(void)
pwrdm_pre_transition(NULL);
/* PER */
if (per_next_state == PWRDM_POWER_OFF)
cpu_cluster_pm_enter();
if (per_next_state == PWRDM_POWER_OFF) {
error = cpu_cluster_pm_enter();
if (error)
return;
}
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {

View File

@@ -179,6 +179,23 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir);
}
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*/
static int arm_dma_supported(struct device *dev, u64 mask)
{
unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
/*
* Translate the device's DMA mask to a PFN limit. This
* PFN number includes the page which we can DMA to.
*/
return dma_to_pfn(dev, mask) >= max_dma_pfn;
}
const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
@@ -219,49 +236,6 @@ const struct dma_map_ops arm_coherent_dma_ops = {
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
static int __dma_supported(struct device *dev, u64 mask, bool warn)
{
unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
/*
* Translate the device's DMA mask to a PFN limit. This
* PFN number includes the page which we can DMA to.
*/
if (dma_to_pfn(dev, mask) < max_dma_pfn) {
if (warn)
dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
mask,
dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
max_dma_pfn + 1);
return 0;
}
return 1;
}
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = (u64)DMA_BIT_MASK(32);
if (dev) {
mask = dev->coherent_dma_mask;
/*
* Sanity check the DMA mask - it must be non-zero, and
* must be able to be satisfied by a DMA allocation.
*/
if (mask == 0) {
dev_warn(dev, "coherent DMA mask is unset\n");
return 0;
}
if (!__dma_supported(dev, mask, true))
return 0;
}
return mask;
}
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
{
/*
@@ -688,7 +662,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, pgprot_t prot, bool is_coherent,
unsigned long attrs, const void *caller)
{
u64 mask = get_coherent_dma_mask(dev);
u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
struct page *page = NULL;
void *addr;
bool allowblock, cma;
@@ -712,9 +686,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
}
#endif
if (!mask)
return NULL;
buf = kzalloc(sizeof(*buf),
gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
if (!buf)
@@ -1087,17 +1058,6 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
dir);
}
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*/
int arm_dma_supported(struct device *dev, u64 mask)
{
return __dma_supported(dev, mask, false);
}
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
/*

View File

@@ -4,6 +4,8 @@
#ifdef CONFIG_ARCH_RANDOM
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <asm/cpufeature.h>
@@ -66,6 +68,18 @@ static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf;
}
static inline bool __init __must_check
arch_get_random_seed_long_early(unsigned long *v)
{
WARN_ON(system_state != SYSTEM_BOOTING);
if (!__early_cpu_has_rndr())
return false;
return __arm64_rndr(v);
}
#define arch_get_random_seed_long_early arch_get_random_seed_long_early
#else
static inline bool __arm64_rndr(unsigned long *v) { return false; }

View File

@@ -3,6 +3,7 @@ config CSKY
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP
@@ -38,16 +39,22 @@ config CSKY
select HAVE_ARCH_AUDITSYSCALL
select HAVE_COPY_THREAD_TLS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
select HAVE_KPROBES if !CPU_CK610
select HAVE_KPROBES_ON_FTRACE if !CPU_CK610
select HAVE_KRETPROBES if !CPU_CK610
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_DMA_CONTIGUOUS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select MAY_HAVE_SPARSE_IRQ
@@ -65,6 +72,12 @@ config CSKY
select PCI_SYSCALL if PCI
select PCI_MSI if PCI
config LOCKDEP_SUPPORT
def_bool y
config ARCH_SUPPORTS_UPROBES
def_bool y if !CPU_CK610
config CPU_HAS_CACHEV2
bool

View File

@@ -172,10 +172,7 @@
addi r6, 0xe
cpwcr r6, cpcr30
lsri r6, 28
addi r6, 2
lsli r6, 28
addi r6, 0xe
movi r6, 0
cpwcr r6, cpcr31
.endm

View File

@@ -10,11 +10,6 @@
#define MTCR_DIST 0xC0006420
#define MFCR_DIST 0xC0006020
void __init init_fpu(void)
{
mtcr("cr<1, 2>", 0);
}
/*
* fpu_libc_helper() is to help libc to excute:
* - mfcr %a, cr<1, 2>

View File

@@ -100,6 +100,66 @@
rte
.endm
.macro SAVE_REGS_FTRACE
subi sp, 152
stw tls, (sp, 0)
stw lr, (sp, 4)
mfcr lr, psr
stw lr, (sp, 12)
addi lr, sp, 152
stw lr, (sp, 16)
stw a0, (sp, 20)
stw a0, (sp, 24)
stw a1, (sp, 28)
stw a2, (sp, 32)
stw a3, (sp, 36)
addi sp, 40
stm r4-r13, (sp)
addi sp, 40
stm r16-r30, (sp)
#ifdef CONFIG_CPU_HAS_HILO
mfhi lr
stw lr, (sp, 60)
mflo lr
stw lr, (sp, 64)
mfcr lr, cr14
stw lr, (sp, 68)
#endif
subi sp, 80
.endm
.macro RESTORE_REGS_FTRACE
ldw tls, (sp, 0)
ldw a0, (sp, 16)
mtcr a0, ss0
#ifdef CONFIG_CPU_HAS_HILO
ldw a0, (sp, 140)
mthi a0
ldw a0, (sp, 144)
mtlo a0
ldw a0, (sp, 148)
mtcr a0, cr14
#endif
ldw a0, (sp, 24)
ldw a1, (sp, 28)
ldw a2, (sp, 32)
ldw a3, (sp, 36)
addi sp, 40
ldm r4-r13, (sp)
addi sp, 40
ldm r16-r30, (sp)
addi sp, 72
mfcr sp, ss0
.endm
.macro SAVE_SWITCH_STACK
subi sp, 64
stm r4-r11, (sp)
@@ -230,11 +290,8 @@
addi r6, 0x1ce
mtcr r6, cr<30, 15> /* Set MSA0 */
lsri r6, 28
addi r6, 2
lsli r6, 28
addi r6, 0x1ce
mtcr r6, cr<31, 15> /* Set MSA1 */
movi r6, 0
mtcr r6, cr<31, 15> /* Clr MSA1 */
/* enable MMU */
mfcr r6, cr18

View File

@@ -9,7 +9,8 @@
int fpu_libc_helper(struct pt_regs *regs);
void fpu_fpe(struct pt_regs *regs);
void __init init_fpu(void);
static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); }
void save_to_user_fp(struct user_fp *user_fp);
void restore_from_user_fp(struct user_fp *user_fp);

View File

@@ -3,6 +3,8 @@
#include <linux/linkage.h>
#include <asm/ftrace.h>
#include <abi/entry.h>
#include <asm/asm-offsets.h>
/*
* csky-gcc with -pg will put the following asm after prologue:
@@ -44,6 +46,22 @@
jmp t1
.endm
.macro mcount_enter_regs
subi sp, 8
stw lr, (sp, 0)
stw r8, (sp, 4)
SAVE_REGS_FTRACE
.endm
.macro mcount_exit_regs
RESTORE_REGS_FTRACE
ldw t1, (sp, 0)
ldw r8, (sp, 4)
ldw lr, (sp, 8)
addi sp, 12
jmp t1
.endm
.macro save_return_regs
subi sp, 16
stw a0, (sp, 0)
@@ -122,6 +140,8 @@ ENTRY(ftrace_caller)
ldw a0, (sp, 16)
subi a0, 4
ldw a1, (sp, 24)
lrw a2, function_trace_op
ldw a2, (a2, 0)
nop
GLOBAL(ftrace_call)
@@ -157,3 +177,31 @@ ENTRY(return_to_handler)
jmp lr
END(return_to_handler)
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_regs_caller)
mcount_enter_regs
lrw t1, PT_FRAME_SIZE
add t1, sp
ldw a0, (t1, 0)
subi a0, 4
ldw a1, (t1, 8)
lrw a2, function_trace_op
ldw a2, (a2, 0)
mov a3, sp
nop
GLOBAL(ftrace_regs_call)
nop32_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
nop
GLOBAL(ftrace_graph_regs_call)
nop32_stub
#endif
mcount_exit_regs
ENDPROC(ftrace_regs_caller)
#endif /* CONFIG_DYNAMIC_FTRACE */

View File

@@ -10,6 +10,8 @@
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ARCH_SUPPORTS_FTRACE_OPS 1
#define MCOUNT_ADDR ((unsigned long)_mcount)
#ifndef __ASSEMBLY__

View File

@@ -0,0 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_CSKY_KPROBES_H
#define __ASM_CSKY_KPROBES_H
#include <asm-generic/kprobes.h>
#ifdef CONFIG_KPROBES
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 1
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
#include <asm/probes.h>
struct prev_kprobe {
struct kprobe *kp;
unsigned int status;
};
/* Single step context for kprobe */
struct kprobe_step_ctx {
unsigned long ss_pending;
unsigned long match_addr;
};
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned int kprobe_status;
unsigned long saved_sr;
struct prev_kprobe prev_kprobe;
struct kprobe_step_ctx ss_ctx;
};
void arch_remove_kprobe(struct kprobe *p);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
int kprobe_breakpoint_handler(struct pt_regs *regs);
int kprobe_single_step_handler(struct pt_regs *regs);
void kretprobe_trampoline(void);
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
#endif /* CONFIG_KPROBES */
#endif /* __ASM_CSKY_KPROBES_H */

View File

@@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CSKY_PROBES_H
#define __ASM_CSKY_PROBES_H
typedef u32 probe_opcode_t;
typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
/* architecture specific copy of original instruction */
struct arch_probe_insn {
probe_opcode_t *insn;
probes_handler_t *handler;
/* restore address after simulation */
unsigned long restore;
};
#ifdef CONFIG_KPROBES
typedef u32 kprobe_opcode_t;
struct arch_specific_insn {
struct arch_probe_insn api;
};
#endif
#endif /* __ASM_CSKY_PROBES_H */

View File

@@ -43,6 +43,7 @@ extern struct cpuinfo_csky cpu_data[];
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
unsigned long sr; /* saved status register */
unsigned long trap_no; /* saved status register */
/* FPU regs */
struct user_fp __aligned(16) user_fp;

View File

@@ -7,11 +7,14 @@
#include <uapi/asm/ptrace.h>
#include <asm/traps.h>
#include <linux/types.h>
#include <linux/compiler.h>
#ifndef __ASSEMBLY__
#define PS_S 0x80000000 /* Supervisor Mode */
#define USR_BKPT 0x1464
#define arch_has_single_step() (1)
#define current_pt_regs() \
({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; })
@@ -22,6 +25,18 @@
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->pc = val;
}
#if defined(__CSKYABIV2__)
#define MAX_REG_OFFSET offsetof(struct pt_regs, dcsr)
#else
#define MAX_REG_OFFSET offsetof(struct pt_regs, regs[9])
#endif
static inline bool in_syscall(struct pt_regs const *regs)
{
return ((regs->sr >> 16) & 0xff) == VEC_TRAP0;
@@ -37,5 +52,33 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->a0;
}
/* Valid only for Kernel mode traps. */
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->usp;
}
extern int regs_query_register_offset(const char *name);
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n);
/*
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten
* @offset: offset of the register.
*
* regs_get_register returns the value of a register whose offset from @regs.
* The @offset is the offset of the register in struct pt_regs.
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
*/
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
return *(unsigned long *)((unsigned long)regs + offset);
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_CSKY_PTRACE_H */

View File

@@ -57,6 +57,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
#define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing */
#define TIF_UPROBE 6 /* uprobe breakpoint or singlestep */
#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
@@ -68,6 +69,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)

View File

@@ -0,0 +1,33 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_CSKY_UPROBES_H
#define __ASM_CSKY_UPROBES_H
#include <asm/probes.h>
#define MAX_UINSN_BYTES 4
#define UPROBE_SWBP_INSN USR_BKPT
#define UPROBE_SWBP_INSN_SIZE 2
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
typedef u32 uprobe_opcode_t;
struct arch_uprobe_task {
unsigned long saved_trap_no;
};
struct arch_uprobe {
union {
u8 insn[MAX_UINSN_BYTES];
u8 ixol[MAX_UINSN_BYTES];
};
struct arch_probe_insn api;
unsigned long insn_size;
bool simulate;
};
int uprobe_breakpoint_handler(struct pt_regs *regs);
int uprobe_single_step_handler(struct pt_regs *regs);
#endif /* __ASM_CSKY_UPROBES_H */

View File

@@ -4,6 +4,7 @@ extra-y := head.o vmlinux.lds
obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o
obj-y += power.o syscall.o syscall_table.o setup.o
obj-y += process.o cpu-probe.o ptrace.o dumpstack.o
obj-y += probes/
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o

View File

@@ -72,6 +72,7 @@ int main(void)
DEFINE(PT_RLO, offsetof(struct pt_regs, rlo));
#endif
DEFINE(PT_USP, offsetof(struct pt_regs, usp));
DEFINE(PT_FRAME_SIZE, sizeof(struct pt_regs));
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t,

View File

@@ -128,7 +128,10 @@ tlbop_end 1
ENTRY(csky_systemcall)
SAVE_ALL TRAP0_SIZE
zero_fp
#ifdef CONFIG_RSEQ_DEBUG
mov a0, sp
jbsr rseq_syscall
#endif
psrset ee, ie
lrw r11, __NR_syscalls
@@ -218,10 +221,17 @@ ret_from_exception:
andn r9, r10
ldw r12, (r9, TINFO_FLAGS)
andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | _TIF_UPROBE)
cmpnei r12, 0
bt exit_work
1:
#ifdef CONFIG_TRACE_IRQFLAGS
ld r10, (sp, LSAVE_PSR)
btsti r10, 6
bf 2f
jbsr trace_hardirqs_on
2:
#endif
RESTORE_ALL
exit_work:
@@ -277,6 +287,10 @@ ENTRY(csky_irq)
zero_fp
psrset ee
#ifdef CONFIG_TRACE_IRQFLAGS
jbsr trace_hardirqs_off
#endif
#ifdef CONFIG_PREEMPTION
mov r9, sp /* Get current stack pointer */
bmaski r10, THREAD_SHIFT

View File

@@ -3,6 +3,7 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -126,6 +127,9 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = ftrace_modify_code((unsigned long)&ftrace_call,
(unsigned long)func, true, true);
if (!ret)
ret = ftrace_modify_code((unsigned long)&ftrace_regs_call,
(unsigned long)func, true, true);
return ret;
}
@@ -135,6 +139,14 @@ int __init ftrace_dyn_arch_init(void)
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
return ftrace_modify_code(rec->ip, addr, true, true);
}
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
@@ -190,5 +202,35 @@ int ftrace_disable_ftrace_graph_caller(void)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifndef CONFIG_CPU_HAS_ICACHE_INS
struct ftrace_modify_param {
int command;
atomic_t cpu_count;
};
static int __ftrace_modify_code(void *data)
{
struct ftrace_modify_param *param = data;
if (atomic_inc_return(&param->cpu_count) == 1) {
ftrace_modify_all_code(param->command);
atomic_inc(&param->cpu_count);
} else {
while (atomic_read(&param->cpu_count) <= num_online_cpus())
cpu_relax();
local_icache_inv_all(NULL);
}
return 0;
}
void arch_ftrace_update_code(int command)
{
struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
}
#endif
/* _mcount is defined in abi's mcount.S */
EXPORT_SYMBOL(_mcount);

View File

@@ -21,6 +21,11 @@ END(_start)
ENTRY(_start_smp_secondary)
SETUP_MMU
/* copy msa1 from CPU0 */
lrw r6, secondary_msa1
ld.w r6, (r6, 0)
mtcr r6, cr<31, 15>
/* set stack point */
lrw r6, secondary_stack
ld.w r6, (r6, 0)

View File

@@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o
obj-$(CONFIG_KPROBES) += kprobes_trampoline.o
obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o
CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)

View File

@@ -0,0 +1,49 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <asm/sections.h>
#include "decode-insn.h"
#include "simulate-insn.h"
/* Return:
* INSN_REJECTED If instruction is one not allowed to kprobe,
* INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
*/
enum probe_insn __kprobes
csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
{
probe_opcode_t insn = le32_to_cpu(*addr);
CSKY_INSN_SET_SIMULATE(br16, insn);
CSKY_INSN_SET_SIMULATE(bt16, insn);
CSKY_INSN_SET_SIMULATE(bf16, insn);
CSKY_INSN_SET_SIMULATE(jmp16, insn);
CSKY_INSN_SET_SIMULATE(jsr16, insn);
CSKY_INSN_SET_SIMULATE(lrw16, insn);
CSKY_INSN_SET_SIMULATE(pop16, insn);
CSKY_INSN_SET_SIMULATE(br32, insn);
CSKY_INSN_SET_SIMULATE(bt32, insn);
CSKY_INSN_SET_SIMULATE(bf32, insn);
CSKY_INSN_SET_SIMULATE(jmp32, insn);
CSKY_INSN_SET_SIMULATE(jsr32, insn);
CSKY_INSN_SET_SIMULATE(lrw32, insn);
CSKY_INSN_SET_SIMULATE(pop32, insn);
CSKY_INSN_SET_SIMULATE(bez32, insn);
CSKY_INSN_SET_SIMULATE(bnez32, insn);
CSKY_INSN_SET_SIMULATE(bnezad32, insn);
CSKY_INSN_SET_SIMULATE(bhsz32, insn);
CSKY_INSN_SET_SIMULATE(bhz32, insn);
CSKY_INSN_SET_SIMULATE(blsz32, insn);
CSKY_INSN_SET_SIMULATE(blz32, insn);
CSKY_INSN_SET_SIMULATE(bsr32, insn);
CSKY_INSN_SET_SIMULATE(jmpi32, insn);
CSKY_INSN_SET_SIMULATE(jsri32, insn);
return INSN_GOOD;
}

View File

@@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __CSKY_KERNEL_KPROBES_DECODE_INSN_H
#define __CSKY_KERNEL_KPROBES_DECODE_INSN_H
#include <asm/sections.h>
#include <asm/kprobes.h>
enum probe_insn {
INSN_REJECTED,
INSN_GOOD_NO_SLOT,
INSN_GOOD,
};
#define is_insn32(insn) ((insn & 0xc000) == 0xc000)
enum probe_insn __kprobes
csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi);
#endif /* __CSKY_KERNEL_KPROBES_DECODE_INSN_H */

View File

@@ -0,0 +1,66 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kprobes.h>
int arch_check_ftrace_location(struct kprobe *p)
{
if (ftrace_location((unsigned long)p->addr))
p->flags |= KPROBE_FLAG_FTRACE;
return 0;
}
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{
bool lr_saver = false;
struct kprobe *p;
struct kprobe_ctlblk *kcb;
/* Preempt is disabled by ftrace */
p = get_kprobe((kprobe_opcode_t *)ip);
if (!p) {
p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE));
if (unlikely(!p) || kprobe_disabled(p))
return;
lr_saver = true;
}
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
unsigned long orig_ip = instruction_pointer(regs);
if (lr_saver)
ip -= MCOUNT_INSN_SIZE;
instruction_pointer_set(regs, ip);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
/*
* Emulate singlestep (and also recover regs->pc)
* as if there is a nop
*/
instruction_pointer_set(regs,
(unsigned long)p->addr + MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
instruction_pointer_set(regs, orig_ip);
}
/*
* If pre_handler returns !0, it changes regs->pc. We have to
* skip emulating post_handler.
*/
__this_cpu_write(current_kprobe, NULL);
}
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.api.insn = NULL;
return 0;
}

View File

@@ -0,0 +1,499 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kprobes.h>
#include <linux/extable.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <asm/ptrace.h>
#include <linux/uaccess.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#include "decode-insn.h"
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
struct csky_insn_patch {
kprobe_opcode_t *addr;
u32 opcode;
atomic_t cpu_count;
};
static int __kprobes patch_text_cb(void *priv)
{
struct csky_insn_patch *param = priv;
unsigned int addr = (unsigned int)param->addr;
if (atomic_inc_return(&param->cpu_count) == 1) {
*(u16 *) addr = cpu_to_le16(param->opcode);
dcache_wb_range(addr, addr + 2);
atomic_inc(&param->cpu_count);
} else {
while (atomic_read(&param->cpu_count) <= num_online_cpus())
cpu_relax();
}
icache_inv_range(addr, addr + 2);
return 0;
}
static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
{
struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) };
return stop_machine_cpuslocked(patch_text_cb, &param, cpu_online_mask);
}
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
p->ainsn.api.restore = (unsigned long)p->addr + offset;
patch_text(p->ainsn.api.insn, p->opcode);
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
{
p->ainsn.api.restore = 0;
}
static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (p->ainsn.api.handler)
p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
post_kprobe_handler(kcb, regs);
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
unsigned long probe_addr = (unsigned long)p->addr;
if (probe_addr & 0x1) {
pr_warn("Address not aligned.\n");
return -EINVAL;
}
/* copy instruction */
p->opcode = le32_to_cpu(*p->addr);
/* decode instruction */
switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) {
case INSN_REJECTED: /* insn not supported */
return -EINVAL;
case INSN_GOOD_NO_SLOT: /* insn need simulation */
p->ainsn.api.insn = NULL;
break;
case INSN_GOOD: /* instruction uses slot */
p->ainsn.api.insn = get_insn_slot();
if (!p->ainsn.api.insn)
return -ENOMEM;
break;
}
/* prepare the instruction */
if (p->ainsn.api.insn)
arch_prepare_ss_slot(p);
else
arch_prepare_simulate(p);
return 0;
}
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
patch_text(p->addr, USR_BKPT);
}
/* remove breakpoint from text */
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
patch_text(p->addr, p->opcode);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static void __kprobes set_current_kprobe(struct kprobe *p)
{
__this_cpu_write(current_kprobe, p);
}
/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
* interrupt occurrence in the period of exception return and start of
* out-of-line single-step, that result in wrongly single stepping
* into the interrupt handler.
*/
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
kcb->saved_sr = regs->sr;
regs->sr &= ~BIT(6);
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
regs->sr = kcb->saved_sr;
}
static void __kprobes
set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
{
unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
kcb->ss_ctx.ss_pending = true;
kcb->ss_ctx.match_addr = addr + offset;
}
static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
{
kcb->ss_ctx.ss_pending = false;
kcb->ss_ctx.match_addr = 0;
}
#define TRACE_MODE_SI BIT(14)
#define TRACE_MODE_MASK ~(0x3 << 14)
#define TRACE_MODE_RUN 0
static void __kprobes setup_singlestep(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
{
unsigned long slot;
if (reenter) {
save_previous_kprobe(kcb);
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_REENTER;
} else {
kcb->kprobe_status = KPROBE_HIT_SS;
}
if (p->ainsn.api.insn) {
/* prepare for single stepping */
slot = (unsigned long)p->ainsn.api.insn;
set_ss_context(kcb, slot, p); /* mark pending ss */
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
instruction_pointer_set(regs, slot);
} else {
/* insn simulation */
arch_simulate_insn(p, regs);
}
}
static int __kprobes reenter_kprobe(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
switch (kcb->kprobe_status) {
case KPROBE_HIT_SSDONE:
case KPROBE_HIT_ACTIVE:
kprobes_inc_nmissed_count(p);
setup_singlestep(p, regs, kcb, 1);
break;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
break;
default:
WARN_ON(1);
return 0;
}
return 1;
}
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
if (!cur)
return;
/* return addr restore if non-branching insn */
if (cur->ainsn.api.restore != 0)
regs->pc = cur->ainsn.api.restore;
/* restore back original saved kprobe variables and continue */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
return;
}
/* call post handler */
kcb->kprobe_status = KPROBE_HIT_SSDONE;
if (cur->post_handler) {
/* post_handler can hit breakpoint and single step
* again, so we enable D-flag for recursive exception.
*/
cur->post_handler(cur, regs, 0);
}
reset_current_kprobe();
}
int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe and the ip points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->pc = (unsigned long) cur->addr;
if (!instruction_pointer(regs))
BUG();
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if (fixup_exception(regs))
return 1;
}
return 0;
}
int __kprobes
kprobe_breakpoint_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur_kprobe;
struct kprobe_ctlblk *kcb;
unsigned long addr = instruction_pointer(regs);
kcb = get_kprobe_ctlblk();
cur_kprobe = kprobe_running();
p = get_kprobe((kprobe_opcode_t *) addr);
if (p) {
if (cur_kprobe) {
if (reenter_kprobe(p, regs, kcb))
return 1;
} else {
/* Probe hit */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
* pre-handler and it returned non-zero, it will
* modify the execution path and no need to single
* stepping. Let's just reset current kprobe and exit.
*
* pre_handler can hit a breakpoint and can step thru
* before return.
*/
if (!p->pre_handler || !p->pre_handler(p, regs))
setup_singlestep(p, regs, kcb, 0);
else
reset_current_kprobe();
}
return 1;
}
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
* Return back to original instruction, and continue.
*/
return 0;
}
int __kprobes
kprobe_single_step_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if ((kcb->ss_ctx.ss_pending)
&& (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
clear_ss_context(kcb); /* clear pending ss */
kprobes_restore_local_irqflag(kcb, regs);
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
post_kprobe_handler(kcb, regs);
return 1;
}
return 0;
}
/*
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
*/
int __init arch_populate_kprobe_blacklist(void)
{
int ret;
ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
(unsigned long)__irqentry_text_end);
return ret;
}
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =
(unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because multiple functions in the call path have
* return probes installed on them, and/or more than one
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always pushed into the head of the list
* - when multiple return probes are registered for the same
* function, the (chronologically) first instance's ret_addr
* will be the real return address, and all the rest will
* point to kretprobe_trampoline.
*/
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
return (void *)orig_ret_address;
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->lr;
regs->lr = (unsigned long) &kretprobe_trampoline;
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
}
int __init arch_init_kprobes(void)
{
return 0;
}

View File

@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#include <linux/linkage.h>
#include <abi/entry.h>
ENTRY(kretprobe_trampoline)
SAVE_REGS_FTRACE
mov a0, sp /* pt_regs */
jbsr trampoline_probe_handler
/* use the result as the return-address */
mov lr, a0
RESTORE_REGS_FTRACE
rts
ENDPROC(kretprobe_trampoline)

View File

@@ -0,0 +1,398 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include "decode-insn.h"
#include "simulate-insn.h"
static inline bool csky_insn_reg_get_val(struct pt_regs *regs,
unsigned long index,
unsigned long *ptr)
{
if (index < 14)
*ptr = *(&regs->a0 + index);
if (index > 15 && index < 31)
*ptr = *(&regs->exregs[0] + index - 16);
switch (index) {
case 14:
*ptr = regs->usp;
break;
case 15:
*ptr = regs->lr;
break;
case 31:
*ptr = regs->tls;
break;
default:
goto fail;
}
return true;
fail:
return false;
}
static inline bool csky_insn_reg_set_val(struct pt_regs *regs,
unsigned long index,
unsigned long val)
{
if (index < 14)
*(&regs->a0 + index) = val;
if (index > 15 && index < 31)
*(&regs->exregs[0] + index - 16) = val;
switch (index) {
case 14:
regs->usp = val;
break;
case 15:
regs->lr = val;
break;
case 31:
regs->tls = val;
break;
default:
goto fail;
}
return true;
fail:
return false;
}
void __kprobes
simulate_br16(u32 opcode, long addr, struct pt_regs *regs)
{
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0x3ff) << 1, 9));
}
void __kprobes
simulate_br32(u32 opcode, long addr, struct pt_regs *regs)
{
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
}
void __kprobes
simulate_bt16(u32 opcode, long addr, struct pt_regs *regs)
{
if (regs->sr & 1)
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0x3ff) << 1, 9));
else
instruction_pointer_set(regs, addr + 2);
}
void __kprobes
simulate_bt32(u32 opcode, long addr, struct pt_regs *regs)
{
if (regs->sr & 1)
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
else
instruction_pointer_set(regs, addr + 4);
}
void __kprobes
simulate_bf16(u32 opcode, long addr, struct pt_regs *regs)
{
if (!(regs->sr & 1))
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0x3ff) << 1, 9));
else
instruction_pointer_set(regs, addr + 2);
}
void __kprobes
simulate_bf32(u32 opcode, long addr, struct pt_regs *regs)
{
if (!(regs->sr & 1))
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
else
instruction_pointer_set(regs, addr + 4);
}
void __kprobes
simulate_jmp16(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = (opcode >> 2) & 0xf;
csky_insn_reg_get_val(regs, tmp, &tmp);
instruction_pointer_set(regs, tmp & 0xfffffffe);
}
void __kprobes
simulate_jmp32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
csky_insn_reg_get_val(regs, tmp, &tmp);
instruction_pointer_set(regs, tmp & 0xfffffffe);
}
void __kprobes
simulate_jsr16(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = (opcode >> 2) & 0xf;
csky_insn_reg_get_val(regs, tmp, &tmp);
regs->lr = addr + 2;
instruction_pointer_set(regs, tmp & 0xfffffffe);
}
void __kprobes
simulate_jsr32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
csky_insn_reg_get_val(regs, tmp, &tmp);
regs->lr = addr + 4;
instruction_pointer_set(regs, tmp & 0xfffffffe);
}
void __kprobes
simulate_lrw16(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long val;
unsigned long tmp = (opcode & 0x300) >> 3;
unsigned long offset = ((opcode & 0x1f) | tmp) << 2;
tmp = (opcode & 0xe0) >> 5;
val = *(unsigned int *)(instruction_pointer(regs) + offset);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_lrw32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long val;
unsigned long offset = (opcode & 0xffff0000) >> 14;
unsigned long tmp = opcode & 0x0000001f;
val = *(unsigned int *)
((instruction_pointer(regs) + offset) & 0xfffffffc);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_pop16(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long *tmp = (unsigned long *)regs->usp;
int i;
for (i = 0; i < (opcode & 0xf); i++) {
csky_insn_reg_set_val(regs, i + 4, *tmp);
tmp += 1;
}
if (opcode & 0x10) {
csky_insn_reg_set_val(regs, 15, *tmp);
tmp += 1;
}
regs->usp = (unsigned long)tmp;
instruction_pointer_set(regs, regs->lr);
}
void __kprobes
simulate_pop32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long *tmp = (unsigned long *)regs->usp;
int i;
for (i = 0; i < ((opcode & 0xf0000) >> 16); i++) {
csky_insn_reg_set_val(regs, i + 4, *tmp);
tmp += 1;
}
if (opcode & 0x100000) {
csky_insn_reg_set_val(regs, 15, *tmp);
tmp += 1;
}
for (i = 0; i < ((opcode & 0xe00000) >> 21); i++) {
csky_insn_reg_set_val(regs, i + 16, *tmp);
tmp += 1;
}
if (opcode & 0x1000000) {
csky_insn_reg_set_val(regs, 29, *tmp);
tmp += 1;
}
regs->usp = (unsigned long)tmp;
instruction_pointer_set(regs, regs->lr);
}
void __kprobes
simulate_bez32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
csky_insn_reg_get_val(regs, tmp, &tmp);
if (tmp == 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
}
void __kprobes
simulate_bnez32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
csky_insn_reg_get_val(regs, tmp, &tmp);
if (tmp != 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
}
void __kprobes
simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
unsigned long val;
csky_insn_reg_get_val(regs, tmp, &val);
val -= 1;
if (val > 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
unsigned long val;
csky_insn_reg_get_val(regs, tmp, &val);
if (val >= 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
unsigned long val;
csky_insn_reg_get_val(regs, tmp, &val);
if (val > 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
unsigned long val;
csky_insn_reg_get_val(regs, tmp, &val);
if (val <= 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_blz32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
unsigned long val;
csky_insn_reg_get_val(regs, tmp, &val);
if (val < 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_bsr32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp;
tmp = (opcode & 0xffff) << 16;
tmp |= (opcode & 0xffff0000) >> 16;
instruction_pointer_set(regs,
addr + sign_extend32((tmp & 0x3ffffff) << 1, 15));
regs->lr = addr + 4;
}
void __kprobes
simulate_jmpi32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long val;
unsigned long offset = ((opcode & 0xffff0000) >> 14);
val = *(unsigned int *)
((instruction_pointer(regs) + offset) & 0xfffffffc);
instruction_pointer_set(regs, val);
}
void __kprobes
simulate_jsri32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long val;
unsigned long offset = ((opcode & 0xffff0000) >> 14);
val = *(unsigned int *)
((instruction_pointer(regs) + offset) & 0xfffffffc);
regs->lr = addr + 4;
instruction_pointer_set(regs, val);
}

View File

@@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __CSKY_KERNEL_PROBES_SIMULATE_INSN_H
#define __CSKY_KERNEL_PROBES_SIMULATE_INSN_H
#define __CSKY_INSN_FUNCS(name, mask, val) \
static __always_inline bool csky_insn_is_##name(probe_opcode_t code) \
{ \
BUILD_BUG_ON(~(mask) & (val)); \
return (code & (mask)) == (val); \
} \
void simulate_##name(u32 opcode, long addr, struct pt_regs *regs);
#define CSKY_INSN_SET_SIMULATE(name, code) \
do { \
if (csky_insn_is_##name(code)) { \
api->handler = simulate_##name; \
return INSN_GOOD_NO_SLOT; \
} \
} while (0)
__CSKY_INSN_FUNCS(br16, 0xfc00, 0x0400)
__CSKY_INSN_FUNCS(bt16, 0xfc00, 0x0800)
__CSKY_INSN_FUNCS(bf16, 0xfc00, 0x0c00)
__CSKY_INSN_FUNCS(jmp16, 0xffc3, 0x7800)
__CSKY_INSN_FUNCS(jsr16, 0xffc3, 0x7801)
__CSKY_INSN_FUNCS(lrw16, 0xfc00, 0x1000)
__CSKY_INSN_FUNCS(pop16, 0xffe0, 0x1480)
__CSKY_INSN_FUNCS(br32, 0x0000ffff, 0x0000e800)
__CSKY_INSN_FUNCS(bt32, 0x0000ffff, 0x0000e860)
__CSKY_INSN_FUNCS(bf32, 0x0000ffff, 0x0000e840)
__CSKY_INSN_FUNCS(jmp32, 0xffffffe0, 0x0000e8c0)
__CSKY_INSN_FUNCS(jsr32, 0xffffffe0, 0x0000e8e0)
__CSKY_INSN_FUNCS(lrw32, 0x0000ffe0, 0x0000ea80)
__CSKY_INSN_FUNCS(pop32, 0xfe00ffff, 0x0000ebc0)
__CSKY_INSN_FUNCS(bez32, 0x0000ffe0, 0x0000e900)
__CSKY_INSN_FUNCS(bnez32, 0x0000ffe0, 0x0000e920)
__CSKY_INSN_FUNCS(bnezad32, 0x0000ffe0, 0x0000e820)
__CSKY_INSN_FUNCS(bhsz32, 0x0000ffe0, 0x0000e9a0)
__CSKY_INSN_FUNCS(bhz32, 0x0000ffe0, 0x0000e940)
__CSKY_INSN_FUNCS(blsz32, 0x0000ffe0, 0x0000e960)
__CSKY_INSN_FUNCS(blz32, 0x0000ffe0, 0x0000e980)
__CSKY_INSN_FUNCS(bsr32, 0x0000fc00, 0x0000e000)
__CSKY_INSN_FUNCS(jmpi32, 0x0000ffff, 0x0000eac0)
__CSKY_INSN_FUNCS(jsri32, 0x0000ffff, 0x0000eae0)
#endif /* __CSKY_KERNEL_PROBES_SIMULATE_INSN_H */

View File

@@ -0,0 +1,150 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
*/
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/uprobes.h>
#include <asm/cacheflush.h>
#include "decode-insn.h"
#define UPROBE_TRAP_NR UINT_MAX
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs);
}
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long addr)
{
probe_opcode_t insn;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
auprobe->insn_size = is_insn32(insn) ? 4 : 2;
switch (csky_probe_decode_insn(&insn, &auprobe->api)) {
case INSN_REJECTED:
return -EINVAL;
case INSN_GOOD_NO_SLOT:
auprobe->simulate = true;
break;
default:
break;
}
return 0;
}
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
utask->autask.saved_trap_no = current->thread.trap_no;
current->thread.trap_no = UPROBE_TRAP_NR;
instruction_pointer_set(regs, utask->xol_vaddr);
user_enable_single_step(current);
return 0;
}
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR);
instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
user_disable_single_step(current);
return 0;
}
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
{
if (t->thread.trap_no != UPROBE_TRAP_NR)
return true;
return false;
}
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
probe_opcode_t insn;
unsigned long addr;
if (!auprobe->simulate)
return false;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
addr = instruction_pointer(regs);
if (auprobe->api.handler)
auprobe->api.handler(insn, addr, regs);
return true;
}
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
/*
* Task has received a fatal signal, so reset back to probbed
* address.
*/
instruction_pointer_set(regs, utask->vaddr);
user_disable_single_step(current);
}
bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
struct pt_regs *regs)
{
if (ctx == RP_CHECK_CHAIN_CALL)
return regs->usp <= ret->stack;
else
return regs->usp < ret->stack;
}
unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
struct pt_regs *regs)
{
unsigned long ra;
ra = regs->lr;
regs->lr = trampoline_vaddr;
return ra;
}
int arch_uprobe_exception_notify(struct notifier_block *self,
unsigned long val, void *data)
{
return NOTIFY_DONE;
}
int uprobe_breakpoint_handler(struct pt_regs *regs)
{
if (uprobe_pre_sstep_notifier(regs))
return 1;
return 0;
}
int uprobe_single_step_handler(struct pt_regs *regs)
{
if (uprobe_post_sstep_notifier(regs))
return 1;
return 0;
}

View File

@@ -193,6 +193,109 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_csky_view;
}
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(tls),
REG_OFFSET_NAME(lr),
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(sr),
REG_OFFSET_NAME(usp),
REG_OFFSET_NAME(orig_a0),
REG_OFFSET_NAME(a0),
REG_OFFSET_NAME(a1),
REG_OFFSET_NAME(a2),
REG_OFFSET_NAME(a3),
REG_OFFSET_NAME(regs[0]),
REG_OFFSET_NAME(regs[1]),
REG_OFFSET_NAME(regs[2]),
REG_OFFSET_NAME(regs[3]),
REG_OFFSET_NAME(regs[4]),
REG_OFFSET_NAME(regs[5]),
REG_OFFSET_NAME(regs[6]),
REG_OFFSET_NAME(regs[7]),
REG_OFFSET_NAME(regs[8]),
REG_OFFSET_NAME(regs[9]),
#if defined(__CSKYABIV2__)
REG_OFFSET_NAME(exregs[0]),
REG_OFFSET_NAME(exregs[1]),
REG_OFFSET_NAME(exregs[2]),
REG_OFFSET_NAME(exregs[3]),
REG_OFFSET_NAME(exregs[4]),
REG_OFFSET_NAME(exregs[5]),
REG_OFFSET_NAME(exregs[6]),
REG_OFFSET_NAME(exregs[7]),
REG_OFFSET_NAME(exregs[8]),
REG_OFFSET_NAME(exregs[9]),
REG_OFFSET_NAME(exregs[10]),
REG_OFFSET_NAME(exregs[11]),
REG_OFFSET_NAME(exregs[12]),
REG_OFFSET_NAME(exregs[13]),
REG_OFFSET_NAME(exregs[14]),
REG_OFFSET_NAME(rhi),
REG_OFFSET_NAME(rlo),
REG_OFFSET_NAME(dcsr),
#endif
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return (addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
void ptrace_disable(struct task_struct *child)
{
singlestep_disable(child);

View File

@@ -24,26 +24,9 @@ struct screen_info screen_info = {
};
#endif
phys_addr_t __init_memblock memblock_end_of_REG0(void)
{
return (memblock.memory.regions[0].base +
memblock.memory.regions[0].size);
}
phys_addr_t __init_memblock memblock_start_of_REG1(void)
{
return memblock.memory.regions[1].base;
}
size_t __init_memblock memblock_size_of_REG1(void)
{
return memblock.memory.regions[1].size;
}
static void __init csky_memblock_init(void)
{
unsigned long zone_size[MAX_NR_ZONES];
unsigned long zhole_size[MAX_NR_ZONES];
signed long size;
memblock_reserve(__pa(_stext), _end - _stext);
@@ -54,54 +37,36 @@ static void __init csky_memblock_init(void)
memblock_dump_all();
memset(zone_size, 0, sizeof(zone_size));
memset(zhole_size, 0, sizeof(zhole_size));
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = PFN_UP(memblock_end_of_REG0());
if (max_low_pfn == 0)
max_low_pfn = max_pfn;
max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM());
size = max_pfn - min_low_pfn;
if (memblock.memory.cnt > 1) {
zone_size[ZONE_NORMAL] =
PFN_DOWN(memblock_start_of_REG1()) - min_low_pfn;
zhole_size[ZONE_NORMAL] =
PFN_DOWN(memblock_start_of_REG1()) - max_low_pfn;
if (size <= PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET))
zone_size[ZONE_NORMAL] = size;
else if (size < PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) {
zone_size[ZONE_NORMAL] =
PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET);
max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
} else {
if (size <= PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET))
zone_size[ZONE_NORMAL] = max_pfn - min_low_pfn;
else {
zone_size[ZONE_NORMAL] =
zone_size[ZONE_NORMAL] =
PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
}
max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE);
}
#ifdef CONFIG_HIGHMEM
size = 0;
if (memblock.memory.cnt > 1) {
size = PFN_DOWN(memblock_size_of_REG1());
highstart_pfn = PFN_DOWN(memblock_start_of_REG1());
} else {
size = max_pfn - min_low_pfn -
PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
highstart_pfn = min_low_pfn +
PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
}
zone_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
if (size > 0)
zone_size[ZONE_HIGHMEM] = size;
highend_pfn = max_pfn;
highstart_pfn = max_low_pfn;
highend_pfn = max_pfn;
#endif
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
dma_contiguous_reserve(0);
free_area_init_node(0, zone_size, min_low_pfn, zhole_size);
free_area_init_node(0, zone_size, min_low_pfn, NULL);
}
void __init setup_arch(char **cmdline_p)

View File

@@ -175,6 +175,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *oldset = sigmask_to_save();
int ret;
rseq_signal_deliver(ksig, regs);
/* Are we from a system call? */
if (in_syscall(regs)) {
/* Avoid additional syscall restarting via ret_from_exception */
@@ -251,6 +253,9 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
/* Handle pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
@@ -258,5 +263,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
}

View File

@@ -22,6 +22,9 @@
#include <asm/sections.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#ifdef CONFIG_CPU_HAS_FPU
#include <abi/fpu.h>
#endif
struct ipi_data_struct {
unsigned long bits ____cacheline_aligned;
@@ -156,6 +159,8 @@ volatile unsigned int secondary_hint;
volatile unsigned int secondary_ccr;
volatile unsigned int secondary_stack;
unsigned long secondary_msa1;
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
unsigned long mask = 1 << cpu;
@@ -164,6 +169,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
(unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
secondary_hint = mfcr("cr31");
secondary_ccr = mfcr("cr18");
secondary_msa1 = read_mmu_msa1();
/*
* Because other CPUs are in reset status, we must flush data

View File

@@ -14,6 +14,7 @@
#include <linux/kallsyms.h>
#include <linux/rtc.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <asm/setup.h>
#include <asm/traps.h>
@@ -109,14 +110,14 @@ void buserr(struct pt_regs *regs)
force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc);
}
#define USR_BKPT 0x1464
asmlinkage void trap_c(struct pt_regs *regs)
{
int sig;
unsigned long vector;
siginfo_t info;
struct task_struct *tsk = current;
vector = (mfcr("psr") >> 16) & 0xff;
vector = (regs->sr >> 16) & 0xff;
switch (vector) {
case VEC_ZERODIV:
@@ -125,10 +126,27 @@ asmlinkage void trap_c(struct pt_regs *regs)
break;
/* ptrace */
case VEC_TRACE:
#ifdef CONFIG_KPROBES
if (kprobe_single_step_handler(regs))
return;
#endif
#ifdef CONFIG_UPROBES
if (uprobe_single_step_handler(regs))
return;
#endif
info.si_code = TRAP_TRACE;
sig = SIGTRAP;
break;
case VEC_ILLEGAL:
tsk->thread.trap_no = vector;
#ifdef CONFIG_KPROBES
if (kprobe_breakpoint_handler(regs))
return;
#endif
#ifdef CONFIG_UPROBES
if (uprobe_breakpoint_handler(regs))
return;
#endif
die_if_kernel("Kernel mode ILLEGAL", regs, vector);
#ifndef CONFIG_CPU_NO_USER_BKPT
if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT)
@@ -146,16 +164,20 @@ asmlinkage void trap_c(struct pt_regs *regs)
sig = SIGTRAP;
break;
case VEC_ACCESS:
tsk->thread.trap_no = vector;
return buserr(regs);
#ifdef CONFIG_CPU_NEED_SOFTALIGN
case VEC_ALIGN:
tsk->thread.trap_no = vector;
return csky_alignment(regs);
#endif
#ifdef CONFIG_CPU_HAS_FPU
case VEC_FPE:
tsk->thread.trap_no = vector;
die_if_kernel("Kernel mode FPE", regs, vector);
return fpu_fpe(regs);
case VEC_PRIV:
tsk->thread.trap_no = vector;
die_if_kernel("Kernel mode PRIV", regs, vector);
if (fpu_libc_helper(regs))
return;
@@ -164,5 +186,8 @@ asmlinkage void trap_c(struct pt_regs *regs)
sig = SIGSEGV;
break;
}
tsk->thread.trap_no = vector;
send_sig(sig, current, 0);
}

View File

@@ -7,8 +7,12 @@
#include <asm/cache.h>
#include <asm/barrier.h>
/* for L1-cache */
#define INS_CACHE (1 << 0)
#define DATA_CACHE (1 << 1)
#define CACHE_INV (1 << 4)
#define CACHE_CLR (1 << 5)
#define CACHE_OMS (1 << 6)
void local_icache_inv_all(void *priv)
{
@@ -16,11 +20,6 @@ void local_icache_inv_all(void *priv)
sync_is();
}
void icache_inv_all(void)
{
on_each_cpu(local_icache_inv_all, NULL, 1);
}
#ifdef CONFIG_CPU_HAS_ICACHE_INS
void icache_inv_range(unsigned long start, unsigned long end)
{
@@ -31,9 +30,43 @@ void icache_inv_range(unsigned long start, unsigned long end)
sync_is();
}
#else
struct cache_range {
unsigned long start;
unsigned long end;
};
static DEFINE_SPINLOCK(cache_lock);
static inline void cache_op_line(unsigned long i, unsigned int val)
{
mtcr("cr22", i);
mtcr("cr17", val);
}
void local_icache_inv_range(void *priv)
{
struct cache_range *param = priv;
unsigned long i = param->start & ~(L1_CACHE_BYTES - 1);
unsigned long flags;
spin_lock_irqsave(&cache_lock, flags);
for (; i < param->end; i += L1_CACHE_BYTES)
cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS);
spin_unlock_irqrestore(&cache_lock, flags);
sync_is();
}
void icache_inv_range(unsigned long start, unsigned long end)
{
icache_inv_all();
struct cache_range param = { start, end };
if (irqs_disabled())
local_icache_inv_range(&param);
else
on_each_cpu(local_icache_inv_range, &param, 1);
}
#endif

View File

@@ -18,6 +18,7 @@
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
@@ -53,6 +54,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
int fault;
unsigned long address = mmu_meh & PAGE_MASK;
if (kprobe_page_fault(regs, tsk->thread.trap_no))
return;
si_code = SEGV_MAPERR;
#ifndef CONFIG_CPU_HAS_TLBI
@@ -179,11 +183,14 @@ bad_area:
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
}
no_context:
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
@@ -198,6 +205,8 @@ no_context:
die_if_kernel("Oops", regs, write);
out_of_memory:
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
@@ -206,6 +215,8 @@ out_of_memory:
return;
do_sigbus:
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */

View File

@@ -8,7 +8,7 @@ config MICROBLAZE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT if !MMU
select ARCH_HAS_DMA_SET_UNCACHED if !MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
@@ -47,8 +47,6 @@ config MICROBLAZE
select CPU_NO_EFFICIENT_FFS
select MMU_GATHER_NO_RANGE if MMU
select SPARSE_IRQ
select GENERIC_IRQ_MULTI_HANDLER
select HANDLE_DOMAIN_IRQ
# Endianness selection
choice

View File

@@ -11,4 +11,7 @@
struct pt_regs;
extern void do_IRQ(struct pt_regs *regs);
/* should be defined in each interrupt controller driver */
extern unsigned int xintc_get_irq(void);
#endif /* _ASM_MICROBLAZE_IRQ_H */

View File

@@ -20,10 +20,29 @@
#include <linux/irqchip.h>
#include <linux/of_irq.h>
static u32 concurrent_irq;
void __irq_entry do_IRQ(struct pt_regs *regs)
{
unsigned int irq;
struct pt_regs *old_regs = set_irq_regs(regs);
trace_hardirqs_off();
handle_arch_irq(regs);
irq_enter();
irq = xintc_get_irq();
next_irq:
BUG_ON(!irq);
generic_handle_irq(irq);
irq = xintc_get_irq();
if (irq != -1U) {
pr_debug("next irq: %d\n", irq);
++concurrent_irq;
goto next_irq;
}
irq_exit();
set_irq_regs(old_regs);
trace_hardirqs_on();
}

View File

@@ -40,7 +40,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
#define UNCACHED_SHADOW_MASK 0
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
void *uncached_kernel_address(void *ptr)
void *arch_dma_set_uncached(void *ptr, size_t size)
{
unsigned long addr = (unsigned long)ptr;
@@ -49,11 +49,4 @@ void *uncached_kernel_address(void *ptr)
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
return (void *)addr;
}
void *cached_kernel_address(void *ptr)
{
unsigned long addr = (unsigned long)ptr;
return (void *)(addr & ~UNCACHED_SHADOW_MASK);
}
#endif /* CONFIG_MMU */

View File

@@ -1192,8 +1192,9 @@ config DMA_NONCOHERENT
# significant advantages.
#
select ARCH_HAS_DMA_WRITE_COMBINE
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT
select ARCH_HAS_DMA_SET_UNCACHED
select DMA_NONCOHERENT_MMAP
select DMA_NONCOHERENT_CACHE_SYNC
select NEED_DMA_MAP_STATE

View File

@@ -49,16 +49,11 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
dma_cache_wback_inv((unsigned long)page_address(page), size);
}
void *uncached_kernel_address(void *addr)
void *arch_dma_set_uncached(void *addr, size_t size)
{
return (void *)(__pa(addr) + UNCAC_BASE);
}
void *cached_kernel_address(void *addr)
{
return __va(addr) - UNCAC_BASE;
}
static inline void dma_sync_virt(void *addr, size_t size,
enum dma_data_direction dir)
{

View File

@@ -2,9 +2,10 @@
config NIOS2
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT
select ARCH_HAS_DMA_SET_UNCACHED
select ARCH_NO_SWAP
select TIMER_OF
select GENERIC_ATOMIC64

View File

@@ -67,7 +67,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
flush_dcache_range(start, start + size);
}
void *uncached_kernel_address(void *ptr)
void *arch_dma_set_uncached(void *ptr, size_t size)
{
unsigned long addr = (unsigned long)ptr;
@@ -75,13 +75,3 @@ void *uncached_kernel_address(void *ptr)
return (void *)ptr;
}
void *cached_kernel_address(void *ptr)
{
unsigned long addr = (unsigned long)ptr;
addr &= ~CONFIG_NIOS2_IO_REGION_BASE;
addr |= CONFIG_NIOS2_KERNEL_REGION_BASE;
return (void *)ptr;
}

View File

@@ -7,6 +7,8 @@
config OPENRISC
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_DMA_SET_UNCACHED
select ARCH_HAS_DMA_CLEAR_UNCACHED
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select OF
select OF_EARLY_FLATTREE

View File

@@ -11,8 +11,6 @@
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* DMA mapping callbacks...
* As alloc_coherent is the only DMA callback being used currently, that's
* the only thing implemented properly. The rest need looking into...
*/
#include <linux/dma-noncoherent.h>
@@ -67,62 +65,29 @@ static const struct mm_walk_ops clear_nocache_walk_ops = {
.pte_entry = page_clear_nocache,
};
/*
* Alloc "coherent" memory, which for OpenRISC means simply uncached.
*
* This function effectively just calls __get_free_pages, sets the
* cache-inhibit bit on those pages, and makes sure that the pages are
* flushed out of the cache before they are used.
*
* If the NON_CONSISTENT attribute is set, then this function just
* returns "normal", cachable memory.
*
* There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
* into consideration here, too. All current known implementations of
* the OR1K support only strongly ordered memory accesses, so that flag
* is being ignored for now; uncached but write-combined memory is a
* missing feature of the OR1K.
*/
void *
arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
void *arch_dma_set_uncached(void *cpu_addr, size_t size)
{
unsigned long va;
void *page;
page = alloc_pages_exact(size, gfp | __GFP_ZERO);
if (!page)
return NULL;
/* This gives us the real physical address of the first page. */
*dma_handle = __pa(page);
va = (unsigned long)page;
unsigned long va = (unsigned long)cpu_addr;
int error;
/*
* We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit.
*/
if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
NULL)) {
free_pages_exact(page, size);
return NULL;
}
return (void *)va;
error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
NULL);
if (error)
return ERR_PTR(error);
return cpu_addr;
}
void
arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
void arch_dma_clear_uncached(void *cpu_addr, size_t size)
{
unsigned long va = (unsigned long)vaddr;
unsigned long va = (unsigned long)cpu_addr;
/* walk_page_range shouldn't be able to fail here */
WARN_ON(walk_page_range(&init_mm, va, va + size,
&clear_nocache_walk_ops, NULL));
free_pages_exact(vaddr, size);
}
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,

View File

@@ -239,10 +239,8 @@ KBUILD_CFLAGS += $(call cc-option,-mno-vsx)
KBUILD_CFLAGS += $(call cc-option,-mno-spe)
KBUILD_CFLAGS += $(call cc-option,-mspe=no)
# FIXME: the module load should be taught about the additional relocs
# generated by this.
# revert to pre-gcc-4.4 behaviour of .eh_frame
KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
# Don't emit .eh_frame since we have no use for it
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Never use string load/store instructions as they are
# often slow when they are implemented at all
@@ -298,6 +296,7 @@ $(BOOT_TARGETS2): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
PHONY += bootwrapper_install
bootwrapper_install:
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
@@ -403,9 +402,11 @@ define archhelp
@echo ' (minus the .dts extension).'
endef
PHONY += install
install:
$(Q)$(MAKE) $(build)=$(boot) install
PHONY += vdso_install
vdso_install:
ifdef CONFIG_PPC64
$(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
@@ -425,6 +426,7 @@ archheaders:
ifdef CONFIG_STACKPROTECTOR
prepare: stack_protector_prepare
PHONY += stack_protector_prepare
stack_protector_prepare: prepare0
ifdef CONFIG_PPC64
$(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h))
@@ -436,10 +438,12 @@ endif
ifdef CONFIG_SMP
prepare: task_cpu_prepare
PHONY += task_cpu_prepare
task_cpu_prepare: prepare0
$(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TASK_CPU") print $$3;}' include/generated/asm-offsets.h))
endif
PHONY += checkbin
# Check toolchain versions:
# - gcc-4.6 is the minimum kernel-wide version so nothing required.
checkbin:

View File

@@ -445,6 +445,8 @@ install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
zInstall: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^
PHONY += install zInstall
# anything not in $(targets)
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \

View File

@@ -44,9 +44,6 @@ p_end: .long _end
p_pstack: .long _platform_stack_top
#endif
.globl _zimage_start
/* Clang appears to require the .weak directive to be after the symbol
* is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */
.weak _zimage_start
_zimage_start:
.globl _zimage_start_lib

View File

@@ -97,6 +97,10 @@ ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
unsigned long __init early_init(unsigned long dt_ptr);
void __init machine_init(u64 dt_ptr);
#endif
long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs);
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low);
@@ -104,14 +108,6 @@ long sys_switch_endian(void);
notrace unsigned int __check_irq_replay(void);
void notrace restore_interrupts(void);
/* ptrace */
long do_syscall_trace_enter(struct pt_regs *regs);
void do_syscall_trace_leave(struct pt_regs *regs);
/* process */
void restore_math(struct pt_regs *regs);
void restore_tm_state(struct pt_regs *regs);
/* prom_init (OpenFirmware) */
unsigned long __init prom_init(unsigned long r3, unsigned long r4,
unsigned long pp,
@@ -122,9 +118,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
void __init early_setup(unsigned long dt_ptr);
void early_setup_secondary(void);
/* time */
void accumulate_stolen_time(void);
/* misc runtime */
extern u64 __bswapdi2(u64);
extern s64 __lshrdi3(s64, int);

View File

@@ -17,9 +17,9 @@
* updating the accessed and modified bits in the page table tree.
*/
#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
#define _PAGE_USER 0x004 /* usermode access allowed */
#define _PAGE_USER 0x001 /* usermode access allowed */
#define _PAGE_RW 0x002 /* software: user write access allowed */
#define _PAGE_PRESENT 0x004 /* software: pte contains a translation */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
@@ -27,7 +27,7 @@
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: exec allowed */
#define _PAGE_RW 0x400 /* software: user write access allowed */
#define _PAGE_HASHPTE 0x400 /* hash_page has made an HPTE for this pte */
#define _PAGE_SPECIAL 0x800 /* software: Special page */
#ifdef CONFIG_PTE_64BIT

View File

@@ -366,10 +366,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \
((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
(pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte)
#define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr))
static inline void pte_unmap(pte_t *pte) { }
/*
* Encode and decode a swap entry.

View File

@@ -156,6 +156,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int hash__has_transparent_hugepage(void);
#endif
static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
{
BUG();
return pmd;
}
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */

View File

@@ -246,7 +246,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
*/
static inline int hash__pmd_trans_huge(pmd_t pmd)
{
return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
(_PAGE_PTE | H_PAGE_THP_HUGE));
}
@@ -272,6 +272,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
extern int hash__has_transparent_hugepage(void);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */

View File

@@ -3,6 +3,7 @@
#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
#include <linux/const.h>
#include <asm/reg.h>
#define AMR_KUAP_BLOCK_READ UL(0x4000000000000000)
#define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000)
@@ -56,7 +57,20 @@
#ifdef CONFIG_PPC_KUAP
#include <asm/reg.h>
#include <asm/mmu.h>
#include <asm/ptrace.h>
static inline void kuap_restore_amr(struct pt_regs *regs)
{
if (mmu_has_feature(MMU_FTR_RADIX_KUAP))
mtspr(SPRN_AMR, regs->kuap);
}
static inline void kuap_check_amr(void)
{
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
}
/*
* We support individually allowing read or write, but we don't support nesting
@@ -127,6 +141,14 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
}
#else /* CONFIG_PPC_KUAP */
static inline void kuap_restore_amr(struct pt_regs *regs)
{
}
static inline void kuap_check_amr(void)
{
}
#endif /* CONFIG_PPC_KUAP */
#endif /* __ASSEMBLY__ */

View File

@@ -1303,7 +1303,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm);
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
if (radix_enabled())
return radix__pmd_mkdevmap(pmd);
return hash__pmd_mkdevmap(pmd);
}
static inline int pmd_devmap(pmd_t pmd)

View File

@@ -263,6 +263,11 @@ static inline int radix__has_transparent_hugepage(void)
}
#endif
static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
{
return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
}
extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);

Some files were not shown because too many files have changed in this diff Show More