Merge android12-5.10.19+ (d92620d
) into msm-5.10
* refs/heads/tmp-d92620d: BACKPORT: media: v4l2-ctrl: Add base layer priority id control. ANDROID: GKI: defconfig: disable CONFIG_ION ANDROID: scsi: ufs: replace variants with android vendor hooks BACKPORT: media: v4l2-ctrl: Add layer wise bitrate controls for h264 BACKPORT: media: v4l2-ctrl: Add frame-specific min/max qp controls for hevc FROMLIST: dts: bindings: Document device tree bindings for Arm TRBE FROMLIST: coresight: sink: Add TRBE driver FROMLIST: coresight: core: Add support for dedicated percpu sinks FROMLIST: coresight: etm-perf: Handle stale output handles FROMLIST: dts: bindings: Document device tree bindings for ETE FROMLIST: coresight: ete: Add support for ETE tracing FROMLIST: coresight: ete: Add support for ETE sysreg access FROMLIST: coresight: etm4x: Add support for PE OS lock FROMLIST: coresight: Do not scan for graph if none is present FROMLIST: coresight: etm-perf: Allow an event to use different sinks FROMLIST: coresight: etm4x: Move ETM to prohibited region for disable FROMLIST: arm64: kvm: Enable access to TRBE support for host FROMLIST: arm64: Add TRBE definitions FROMLIST: arm64: Add support for trace synchronization barrier FROMLIST: kvm: arm64: Disable guest access to trace filter controls FROMLIST: kvm: arm64: nvhe: Save the SPE context early FROMLIST: kvm: arm64: Hide system instruction access to Trace registers FROMLIST: perf: aux: Add CoreSight PMU buffer formats FROMLIST: perf: aux: Add flags for the buffer format UPSTREAM: Documentation: coresight: Add PID tracing description UPSTREAM: coresight: etm-perf: Support PID tracing for kernel at EL2 UPSTREAM: coresight: etm-perf: Clarify comment on perf options UPSTREAM: coresight: etm4x: Fix merge resolution for amba rework UPSTREAM: coresight: etm4x: Handle accesses to TRCSTALLCTLR UPSTREAM: coresight: Add support for v8.4 SelfHosted tracing UPSTREAM: arm64: Add TRFCR_ELx definitions UPSTREAM: dts: bindings: coresight: ETM system register access only units UPSTREAM: coresight: etm4x: Add support for sysreg only devices UPSTREAM: coresight: etm4x: Run arch feature detection on the CPU UPSTREAM: coresight: etm4x: Refactor probing routine UPSTREAM: coresight: etm4x: Detect system instructions support UPSTREAM: coresight: etm4x: Add necessary synchronization for sysreg access UPSTREAM: coresight: etm4x: Expose trcdevarch via sysfs UPSTREAM: coresight: etm4x: Use TRCDEVARCH for component discovery UPSTREAM: coresight: etm4x: Detect access early on the target CPU UPSTREAM: coresight: etm4x: Handle ETM architecture version UPSTREAM: coresight: etm4x: Clean up exception level masks UPSTREAM: coresight: etm4x: Cleanup secure exception level masks UPSTREAM: coresight: etm4x: Check for Software Lock UPSTREAM: coresight: etm4x: Define DEVARCH register fields UPSTREAM: coresight: etm4x: Hide sysfs attributes for unavailable registers UPSTREAM: coresight: etm4x: Add sysreg access helpers UPSTREAM: coresight: etm4x: Add commentary on the registers UPSTREAM: coresight: etm4x: Make offset available for sysfs attributes UPSTREAM: coresight: etm4x: Convert all register accesses UPSTREAM: coresight: etm4x: Always read the registers on the host CPU UPSTREAM: coresight: Convert claim/disclaim operations to use access wrappers UPSTREAM: coresight: Convert coresight_timeout to use access abstraction UPSTREAM: coresight: tpiu: Prepare for using coresight device access abstraction UPSTREAM: coresight: Introduce device access abstraction UPSTREAM: coresight: etm4x: Skip accessing TRCPDCR in save/restore UPSTREAM: coresight: etm4x: Handle access to TRCSSPCICRn UPSTREAM: coresight: etm4x: add AMBA id for Cortex-A55 and Cortex-A75 UPSTREAM: coresight: cti: Reduce scope for the variable 'cs_fwnode' in cti_plat_create_connection() BACKPORT: UPSTREAM: amba: Make the remove callback return void UPSTREAM: coresight: etm4x: Modify core-commit to avoid HiSilicon ETM overflow ANDROID: GKI: Enable CONFIG_NETFILTER_XT_TARGET_TEE=y ANDROID: GKI: Update abi_gki_aarch64_qcom for zram and zsmalloc ANDROID: sched/rt: Only enable RT sync for SMP targets UPSTREAM: kfence: report sensitive information based on no_hash_pointers ANDROID: Incremental fs: set the correct access to mapped files ANDROID: Incremental fs: Build merkle tree when enabling verity ANDROID: Incremental fs: Add FS_IOC_MEASURE_VERITY ANDROID: Incremental fs: Store fs-verity state in backing file ANDROID: Incremental fs: Add FS_IOC_GETFLAGS ANDROID: Incremental fs: Add FS_IOC_ENABLE_VERITY ANDROID: fs-verity: Export function to check signatures ANDROID: Incremental fs: Fix memory leak on closing file ANDROID: Incremental fs: inotify on create mapped file ANDROID: Incremental fs: inotify support fs-verity: support reading signature with ioctl fs-verity: support reading descriptor with ioctl fs-verity: support reading Merkle tree with ioctl fs-verity: add FS_IOC_READ_VERITY_METADATA ioctl fs-verity: don't pass whole descriptor to fsverity_verify_signature() fs-verity: factor out fsverity_get_descriptor() fs: simplify freeze_bdev/thaw_bdev f2fs: remove FAULT_ALLOC_BIO f2fs: use blkdev_issue_flush in __submit_flush_wait f2fs: remove a few bd_part checks Documentation: f2fs: fix typo s/automaic/automatic f2fs: give a warning only for readonly partition f2fs: don't grab superblock freeze for flush/ckpt thread f2fs: add ckpt_thread_ioprio sysfs node f2fs: introduce checkpoint_merge mount option f2fs: relocate inline conversion from mmap() to mkwrite() f2fs: fix a wrong condition in __submit_bio f2fs: remove unnecessary initialization in xattr.c f2fs: fix to avoid inconsistent quota data f2fs: flush data when enabling checkpoint back f2fs: deprecate f2fs_trace_io f2fs: Remove readahead collision detection f2fs: remove unused stat_{inc, dec}_atomic_write f2fs: introduce sb_status sysfs node f2fs: fix to use per-inode maxbytes f2fs: compress: fix potential deadlock libfs: unexport generic_ci_d_compare() and generic_ci_d_hash() f2fs: fix to set/clear I_LINKABLE under i_lock f2fs: fix null page reference in redirty_blocks f2fs: clean up post-read processing f2fs: trival cleanup in move_data_block() f2fs: fix out-of-repair __setattr_copy() f2fs: fix to tag FIEMAP_EXTENT_MERGED in f2fs_fiemap() f2fs: introduce a new per-sb directory in sysfs f2fs: compress: support compress level f2fs: compress: deny setting unsupported compress algorithm f2fs: relocate f2fs_precache_extents() f2fs: enforce the immutable flag on open files f2fs: enhance to update i_mode and acl atomically in f2fs_setattr() f2fs: fix to set inode->i_mode correctly for posix_acl_update_mode f2fs: Replace expression with offsetof() f2fs: handle unallocated section and zone on pinned/atgc Conflicts: Documentation/devicetree/bindings Documentation/devicetree/bindings/arm/coresight.txt drivers/hwtracing/coresight/Kconfig drivers/hwtracing/coresight/coresight-core.c include/linux/coresight.h Change-Id: I88ddc19d690ecf6657527b172bd7b2602fcc2c8c Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
@@ -371,6 +371,14 @@ Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
|
||||
Description: (Read) Print the content of the Device ID Register
|
||||
(0xFC8). The value is taken directly from the HW.
|
||||
|
||||
What: /sys/bus/coresight/devices/etm<N>/mgmt/trcdevarch
|
||||
Date: January 2021
|
||||
KernelVersion: 5.12
|
||||
Contact: Mathieu Poirier <mathieu.poirier@linaro.org>
|
||||
Description: (Read) Print the content of the Device Architecture Register
|
||||
(offset 0xFBC). The value is taken directly read
|
||||
from the HW.
|
||||
|
||||
What: /sys/bus/coresight/devices/etm<N>/mgmt/trcdevtype
|
||||
Date: April 2015
|
||||
KernelVersion: 4.01
|
||||
|
14
Documentation/ABI/testing/sysfs-bus-coresight-devices-trbe
Normal file
14
Documentation/ABI/testing/sysfs-bus-coresight-devices-trbe
Normal file
@@ -0,0 +1,14 @@
|
||||
What: /sys/bus/coresight/devices/trbe<cpu>/align
|
||||
Date: March 2021
|
||||
KernelVersion: 5.13
|
||||
Contact: Anshuman Khandual <anshuman.khandual@arm.com>
|
||||
Description: (Read) Shows the TRBE write pointer alignment. This value
|
||||
is fetched from the TRBIDR register.
|
||||
|
||||
What: /sys/bus/coresight/devices/trbe<cpu>/flag
|
||||
Date: March 2021
|
||||
KernelVersion: 5.13
|
||||
Contact: Anshuman Khandual <anshuman.khandual@arm.com>
|
||||
Description: (Read) Shows if TRBE updates in the memory are with access
|
||||
and dirty flag updates as well. This value is fetched from
|
||||
the TRBIDR register.
|
@@ -377,3 +377,35 @@ Description: This gives a control to limit the bio size in f2fs.
|
||||
Default is zero, which will follow underlying block layer limit,
|
||||
whereas, if it has a certain bytes value, f2fs won't submit a
|
||||
bio larger than that size.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/stat/sb_status
|
||||
Date: December 2020
|
||||
Contact: "Chao Yu" <yuchao0@huawei.com>
|
||||
Description: Show status of f2fs superblock in real time.
|
||||
|
||||
====== ===================== =================================
|
||||
value sb status macro description
|
||||
0x1 SBI_IS_DIRTY dirty flag for checkpoint
|
||||
0x2 SBI_IS_CLOSE specify unmounting
|
||||
0x4 SBI_NEED_FSCK need fsck.f2fs to fix
|
||||
0x8 SBI_POR_DOING recovery is doing or not
|
||||
0x10 SBI_NEED_SB_WRITE need to recover superblock
|
||||
0x20 SBI_NEED_CP need to checkpoint
|
||||
0x40 SBI_IS_SHUTDOWN shutdown by ioctl
|
||||
0x80 SBI_IS_RECOVERED recovered orphan/data
|
||||
0x100 SBI_CP_DISABLED CP was disabled last mount
|
||||
0x200 SBI_CP_DISABLED_QUICK CP was disabled quickly
|
||||
0x400 SBI_QUOTA_NEED_FLUSH need to flush quota info in CP
|
||||
0x800 SBI_QUOTA_SKIP_FLUSH skip flushing quota in current CP
|
||||
0x1000 SBI_QUOTA_NEED_REPAIR quota file may be corrupted
|
||||
0x2000 SBI_IS_RESIZEFS resizefs is in process
|
||||
====== ===================== =================================
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/ckpt_thread_ioprio
|
||||
Date: January 2021
|
||||
Contact: "Daeho Jeong" <daehojeong@google.com>
|
||||
Description: Give a way to change checkpoint merge daemon's io priority.
|
||||
Its default value is "be,3", which means "BE" I/O class and
|
||||
I/O priority "3". We can select the class between "rt" and "be",
|
||||
and set the I/O priority within valid range of it. "," delimiter
|
||||
is necessary in between I/O class and priority number.
|
||||
|
@@ -88,8 +88,8 @@ A typical out-of-bounds access looks like this::
|
||||
|
||||
The header of the report provides a short summary of the function involved in
|
||||
the access. It is followed by more detailed information about the access and
|
||||
its origin. Note that, real kernel addresses are only shown for
|
||||
``CONFIG_DEBUG_KERNEL=y`` builds.
|
||||
its origin. Note that, real kernel addresses are only shown when using the
|
||||
kernel command line option ``no_hash_pointers``.
|
||||
|
||||
Use-after-free accesses are reported as::
|
||||
|
||||
@@ -184,8 +184,8 @@ invalidly written bytes (offset from the address) are shown; in this
|
||||
representation, '.' denote untouched bytes. In the example above ``0xac`` is
|
||||
the value written to the invalid address at offset 0, and the remaining '.'
|
||||
denote that no following bytes have been touched. Note that, real values are
|
||||
only shown for ``CONFIG_DEBUG_KERNEL=y`` builds; to avoid information
|
||||
disclosure for non-debug builds, '!' is used instead to denote invalidly
|
||||
only shown if the kernel was booted with ``no_hash_pointers``; to avoid
|
||||
information disclosure otherwise, '!' is used instead to denote invalidly
|
||||
written bytes.
|
||||
|
||||
And finally, KFENCE may also report on invalid accesses to any protected page
|
||||
|
@@ -179,7 +179,6 @@ fault_type=%d Support configuring fault injection type, should be
|
||||
FAULT_KVMALLOC 0x000000002
|
||||
FAULT_PAGE_ALLOC 0x000000004
|
||||
FAULT_PAGE_GET 0x000000008
|
||||
FAULT_ALLOC_BIO 0x000000010
|
||||
FAULT_ALLOC_NID 0x000000020
|
||||
FAULT_ORPHAN 0x000000040
|
||||
FAULT_BLOCK 0x000000080
|
||||
@@ -247,8 +246,24 @@ checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enabl
|
||||
hide up to all remaining free space. The actual space that
|
||||
would be unusable can be viewed at /sys/fs/f2fs/<disk>/unusable
|
||||
This space is reclaimed once checkpoint=enable.
|
||||
checkpoint_merge When checkpoint is enabled, this can be used to create a kernel
|
||||
daemon and make it to merge concurrent checkpoint requests as
|
||||
much as possible to eliminate redundant checkpoint issues. Plus,
|
||||
we can eliminate the sluggish issue caused by slow checkpoint
|
||||
operation when the checkpoint is done in a process context in
|
||||
a cgroup having low i/o budget and cpu shares. To make this
|
||||
do better, we set the default i/o priority of the kernel daemon
|
||||
to "3", to give one higher priority than other kernel threads.
|
||||
This is the same way to give a I/O priority to the jbd2
|
||||
journaling thread of ext4 filesystem.
|
||||
nocheckpoint_merge Disable checkpoint merge feature.
|
||||
compress_algorithm=%s Control compress algorithm, currently f2fs supports "lzo",
|
||||
"lz4", "zstd" and "lzo-rle" algorithm.
|
||||
compress_algorithm=%s:%d Control compress algorithm and its compress level, now, only
|
||||
"lz4" and "zstd" support compress level config.
|
||||
algorithm level range
|
||||
lz4 3 - 16
|
||||
zstd 1 - 22
|
||||
compress_log_size=%u Support configuring compress cluster size, the size will
|
||||
be 4KB * (1 << %u), 16KB is minimum size, also it's
|
||||
default size.
|
||||
@@ -831,7 +846,7 @@ This is the default option. f2fs does automatic compression in the writeback of
|
||||
compression enabled files.
|
||||
|
||||
2) compress_mode=user
|
||||
This disables the automaic compression and gives the user discretion of choosing the
|
||||
This disables the automatic compression and gives the user discretion of choosing the
|
||||
target file and the timing. The user can do manual compression/decompression on the
|
||||
compression enabled files using F2FS_IOC_DECOMPRESS_FILE and F2FS_IOC_COMPRESS_FILE
|
||||
ioctls like the below.
|
||||
|
@@ -217,6 +217,82 @@ FS_IOC_MEASURE_VERITY can fail with the following errors:
|
||||
- ``EOVERFLOW``: the digest is longer than the specified
|
||||
``digest_size`` bytes. Try providing a larger buffer.
|
||||
|
||||
FS_IOC_READ_VERITY_METADATA
|
||||
---------------------------
|
||||
|
||||
The FS_IOC_READ_VERITY_METADATA ioctl reads verity metadata from a
|
||||
verity file. This ioctl is available since Linux v5.12.
|
||||
|
||||
This ioctl allows writing a server program that takes a verity file
|
||||
and serves it to a client program, such that the client can do its own
|
||||
fs-verity compatible verification of the file. This only makes sense
|
||||
if the client doesn't trust the server and if the server needs to
|
||||
provide the storage for the client.
|
||||
|
||||
This is a fairly specialized use case, and most fs-verity users won't
|
||||
need this ioctl.
|
||||
|
||||
This ioctl takes in a pointer to the following structure::
|
||||
|
||||
#define FS_VERITY_METADATA_TYPE_MERKLE_TREE 1
|
||||
#define FS_VERITY_METADATA_TYPE_DESCRIPTOR 2
|
||||
#define FS_VERITY_METADATA_TYPE_SIGNATURE 3
|
||||
|
||||
struct fsverity_read_metadata_arg {
|
||||
__u64 metadata_type;
|
||||
__u64 offset;
|
||||
__u64 length;
|
||||
__u64 buf_ptr;
|
||||
__u64 __reserved;
|
||||
};
|
||||
|
||||
``metadata_type`` specifies the type of metadata to read:
|
||||
|
||||
- ``FS_VERITY_METADATA_TYPE_MERKLE_TREE`` reads the blocks of the
|
||||
Merkle tree. The blocks are returned in order from the root level
|
||||
to the leaf level. Within each level, the blocks are returned in
|
||||
the same order that their hashes are themselves hashed.
|
||||
See `Merkle tree`_ for more information.
|
||||
|
||||
- ``FS_VERITY_METADATA_TYPE_DESCRIPTOR`` reads the fs-verity
|
||||
descriptor. See `fs-verity descriptor`_.
|
||||
|
||||
- ``FS_VERITY_METADATA_TYPE_SIGNATURE`` reads the signature which was
|
||||
passed to FS_IOC_ENABLE_VERITY, if any. See `Built-in signature
|
||||
verification`_.
|
||||
|
||||
The semantics are similar to those of ``pread()``. ``offset``
|
||||
specifies the offset in bytes into the metadata item to read from, and
|
||||
``length`` specifies the maximum number of bytes to read from the
|
||||
metadata item. ``buf_ptr`` is the pointer to the buffer to read into,
|
||||
cast to a 64-bit integer. ``__reserved`` must be 0. On success, the
|
||||
number of bytes read is returned. 0 is returned at the end of the
|
||||
metadata item. The returned length may be less than ``length``, for
|
||||
example if the ioctl is interrupted.
|
||||
|
||||
The metadata returned by FS_IOC_READ_VERITY_METADATA isn't guaranteed
|
||||
to be authenticated against the file digest that would be returned by
|
||||
`FS_IOC_MEASURE_VERITY`_, as the metadata is expected to be used to
|
||||
implement fs-verity compatible verification anyway (though absent a
|
||||
malicious disk, the metadata will indeed match). E.g. to implement
|
||||
this ioctl, the filesystem is allowed to just read the Merkle tree
|
||||
blocks from disk without actually verifying the path to the root node.
|
||||
|
||||
FS_IOC_READ_VERITY_METADATA can fail with the following errors:
|
||||
|
||||
- ``EFAULT``: the caller provided inaccessible memory
|
||||
- ``EINTR``: the ioctl was interrupted before any data was read
|
||||
- ``EINVAL``: reserved fields were set, or ``offset + length``
|
||||
overflowed
|
||||
- ``ENODATA``: the file is not a verity file, or
|
||||
FS_VERITY_METADATA_TYPE_SIGNATURE was requested but the file doesn't
|
||||
have a built-in signature
|
||||
- ``ENOTTY``: this type of filesystem does not implement fs-verity, or
|
||||
this ioctl is not yet implemented on it
|
||||
- ``EOPNOTSUPP``: the kernel was not configured with fs-verity
|
||||
support, or the filesystem superblock has not had the 'verity'
|
||||
feature enabled on it. (See `Filesystem support`_.)
|
||||
|
||||
FS_IOC_GETFLAGS
|
||||
---------------
|
||||
|
||||
|
38
Documentation/trace/coresight/coresight-trbe.rst
Normal file
38
Documentation/trace/coresight/coresight-trbe.rst
Normal file
@@ -0,0 +1,38 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
==============================
|
||||
Trace Buffer Extension (TRBE).
|
||||
==============================
|
||||
|
||||
:Author: Anshuman Khandual <anshuman.khandual@arm.com>
|
||||
:Date: November 2020
|
||||
|
||||
Hardware Description
|
||||
--------------------
|
||||
|
||||
Trace Buffer Extension (TRBE) is a percpu hardware which captures in system
|
||||
memory, CPU traces generated from a corresponding percpu tracing unit. This
|
||||
gets plugged in as a coresight sink device because the corresponding trace
|
||||
generators (ETE), are plugged in as source device.
|
||||
|
||||
The TRBE is not compliant to CoreSight architecture specifications, but is
|
||||
driven via the CoreSight driver framework to support the ETE (which is
|
||||
CoreSight compliant) integration.
|
||||
|
||||
Sysfs files and directories
|
||||
---------------------------
|
||||
|
||||
The TRBE devices appear on the existing coresight bus alongside the other
|
||||
coresight devices::
|
||||
|
||||
>$ ls /sys/bus/coresight/devices
|
||||
trbe0 trbe1 trbe2 trbe3
|
||||
|
||||
The ``trbe<N>`` named TRBEs are associated with a CPU.::
|
||||
|
||||
>$ ls /sys/bus/coresight/devices/trbe0/
|
||||
align flag
|
||||
|
||||
*Key file items are:-*
|
||||
* ``align``: TRBE write pointer alignment
|
||||
* ``flag``: TRBE updates memory with access and dirty flags
|
@@ -512,6 +512,38 @@ The --itrace option controls the type and frequency of synthesized events
|
||||
Note that only 64-bit programs are currently supported - further work is
|
||||
required to support instruction decode of 32-bit Arm programs.
|
||||
|
||||
2.2) Tracing PID
|
||||
|
||||
The kernel can be built to write the PID value into the PE ContextID registers.
|
||||
For a kernel running at EL1, the PID is stored in CONTEXTIDR_EL1. A PE may
|
||||
implement Arm Virtualization Host Extensions (VHE), which the kernel can
|
||||
run at EL2 as a virtualisation host; in this case, the PID value is stored in
|
||||
CONTEXTIDR_EL2.
|
||||
|
||||
perf provides PMU formats that program the ETM to insert these values into the
|
||||
trace data; the PMU formats are defined as below:
|
||||
|
||||
"contextid1": Available on both EL1 kernel and EL2 kernel. When the
|
||||
kernel is running at EL1, "contextid1" enables the PID
|
||||
tracing; when the kernel is running at EL2, this enables
|
||||
tracing the PID of guest applications.
|
||||
|
||||
"contextid2": Only usable when the kernel is running at EL2. When
|
||||
selected, enables PID tracing on EL2 kernel.
|
||||
|
||||
"contextid": Will be an alias for the option that enables PID
|
||||
tracing. I.e,
|
||||
contextid == contextid1, on EL1 kernel.
|
||||
contextid == contextid2, on EL2 kernel.
|
||||
|
||||
perf will always enable PID tracing at the relevant EL, this is accomplished by
|
||||
automatically enable the "contextid" config - but for EL2 it is possible to make
|
||||
specific adjustments using configs "contextid1" and "contextid2", E.g. if a user
|
||||
wants to trace PIDs for both host and guest, the two configs "contextid1" and
|
||||
"contextid2" can be set at the same time:
|
||||
|
||||
perf record -e cs_etm/contextid1,contextid2/u -- vm
|
||||
|
||||
|
||||
Generating coverage files for Feedback Directed Optimization: AutoFDO
|
||||
---------------------------------------------------------------------
|
||||
|
@@ -1182,6 +1182,18 @@ enum v4l2_mpeg_video_h264_entropy_mode -
|
||||
V4L2_CID_MPEG_VIDEO_H264_MAX_QP is also set, the quantization parameter
|
||||
should be chosen to meet both requirements.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP (integer)``
|
||||
Minimum quantization parameter for the H264 B frame to limit B frame
|
||||
quality to a range. Valid range: from 0 to 51. If
|
||||
V4L2_CID_MPEG_VIDEO_H264_MIN_QP is also set, the quantization parameter
|
||||
should be chosen to meet both requirements.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP (integer)``
|
||||
Maximum quantization parameter for the H264 B frame to limit B frame
|
||||
quality to a range. Valid range: from 0 to 51. If
|
||||
V4L2_CID_MPEG_VIDEO_H264_MAX_QP is also set, the quantization parameter
|
||||
should be chosen to meet both requirements.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (integer)``
|
||||
Quantization parameter for an I frame for MPEG4. Valid range: from 1
|
||||
to 31.
|
||||
@@ -1501,6 +1513,26 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
|
||||
* - Bit 16:32
|
||||
- Layer number
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR (integer)``
|
||||
Indicates bit rate (bps) for hierarchical coding layer 0 for H264 encoder.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR (integer)``
|
||||
Indicates bit rate (bps) for hierarchical coding layer 1 for H264 encoder.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR (integer)``
|
||||
Indicates bit rate (bps) for hierarchical coding layer 2 for H264 encoder.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR (integer)``
|
||||
Indicates bit rate (bps) for hierarchical coding layer 3 for H264 encoder.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR (integer)``
|
||||
Indicates bit rate (bps) for hierarchical coding layer 4 for H264 encoder.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR (integer)``
|
||||
Indicates bit rate (bps) for hierarchical coding layer 5 for H264 encoder.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR (integer)``
|
||||
Indicates bit rate (bps) for hierarchical coding layer 6 for H264 encoder.
|
||||
|
||||
.. _v4l2-mpeg-h264:
|
||||
|
||||
@@ -3441,11 +3473,11 @@ HEVC/H.265 Control IDs
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP (integer)``
|
||||
Minimum quantization parameter for HEVC.
|
||||
Valid range: from 0 to 51.
|
||||
Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP (integer)``
|
||||
Maximum quantization parameter for HEVC.
|
||||
Valid range: from 0 to 51.
|
||||
Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP (integer)``
|
||||
Quantization parameter for an I frame for HEVC.
|
||||
@@ -3462,6 +3494,42 @@ HEVC/H.265 Control IDs
|
||||
Valid range: [V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP,
|
||||
V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP].
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP (integer)``
|
||||
Minimum quantization parameter for the HEVC I frame to limit I frame
|
||||
quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
|
||||
If V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP is also set, the quantization parameter
|
||||
should be chosen to meet both requirements.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP (integer)``
|
||||
Maximum quantization parameter for the HEVC I frame to limit I frame
|
||||
quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
|
||||
If V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP is also set, the quantization parameter
|
||||
should be chosen to meet both requirements.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP (integer)``
|
||||
Minimum quantization parameter for the HEVC P frame to limit P frame
|
||||
quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
|
||||
If V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP is also set, the quantization parameter
|
||||
should be chosen to meet both requirements.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP (integer)``
|
||||
Maximum quantization parameter for the HEVC P frame to limit P frame
|
||||
quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
|
||||
If V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP is also set, the quantization parameter
|
||||
should be chosen to meet both requirements.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP (integer)``
|
||||
Minimum quantization parameter for the HEVC B frame to limit B frame
|
||||
quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
|
||||
If V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP is also set, the quantization parameter
|
||||
should be chosen to meet both requirements.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP (integer)``
|
||||
Maximum quantization parameter for the HEVC B frame to limit B frame
|
||||
quality to a range. Valid range: from 0 to 51 for 8 bit and from 0 to 63 for 10 bit.
|
||||
If V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP is also set, the quantization parameter
|
||||
should be chosen to meet both requirements.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_HEVC_HIER_QP (boolean)``
|
||||
HIERARCHICAL_QP allows the host to specify the quantization parameter
|
||||
values for each temporal layer through HIERARCHICAL_QP_LAYER. This is
|
||||
@@ -4382,3 +4450,12 @@ enum v4l2_mpeg_video_hevc_size_of_length_field -
|
||||
- Selecting this value specifies that HEVC slices are expected
|
||||
to be prefixed by Annex B start codes. According to :ref:`hevc`
|
||||
valid start codes can be 3-bytes 0x000001 or 4-bytes 0x00000001.
|
||||
|
||||
``V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID (integer)``
|
||||
Specifies a priority identifier for the NAL unit, which will be applied to
|
||||
the base layer. By default this value is set to 0 for the base layer,
|
||||
and the next layer will have the priority ID assigned as 1, 2, 3 and so on.
|
||||
The video encoder can't decide the priority id to be applied to a layer,
|
||||
so this has to come from client.
|
||||
This is applicable to H264 and valid Range is from 0 to 63.
|
||||
Source Rec. ITU-T H.264 (06/2019); G.7.4.1.1, G.8.8.1.
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1112,13 +1112,6 @@
|
||||
iommu_put_dma_cookie
|
||||
iommu_set_fault_handler
|
||||
iommu_unmap
|
||||
ion_alloc
|
||||
ion_buffer_zero
|
||||
__ion_device_add_heap
|
||||
ion_free
|
||||
ion_heap_map_kernel
|
||||
ion_heap_map_user
|
||||
ion_heap_unmap_kernel
|
||||
__ioread32_copy
|
||||
__ioremap
|
||||
iounmap
|
||||
|
@@ -154,6 +154,7 @@ CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TEE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
|
||||
@@ -471,8 +472,6 @@ CONFIG_VHOST_VSOCK=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_DEBUG_KINFO=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_ION_SYSTEM_HEAP=y
|
||||
CONFIG_COMMON_CLK_SCPI=y
|
||||
# CONFIG_CLK_SUNXI is not set
|
||||
# CONFIG_SUNXI_CCU is not set
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
|
||||
|
||||
#define psb_csync() asm volatile("hint #17" : : : "memory")
|
||||
#define tsb_csync() asm volatile("hint #18" : : : "memory")
|
||||
#define csdb() asm volatile("hint #20" : : : "memory")
|
||||
|
||||
#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
|
||||
|
@@ -65,6 +65,19 @@
|
||||
// use EL1&0 translation.
|
||||
|
||||
.Lskip_spe_\@:
|
||||
/* Trace buffer */
|
||||
ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4
|
||||
cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
|
||||
|
||||
mrs_s x0, SYS_TRBIDR_EL1
|
||||
and x0, x0, TRBIDR_PROG
|
||||
cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
|
||||
|
||||
mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
|
||||
orr x2, x2, x0 // allow the EL1&0 translation
|
||||
// to own it.
|
||||
|
||||
.Lskip_trace_\@:
|
||||
msr mdcr_el2, x2 // Configure debug traps
|
||||
.endm
|
||||
|
||||
|
@@ -278,6 +278,9 @@
|
||||
#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
|
||||
|
||||
/* Hyp Debug Configuration Register bits */
|
||||
#define MDCR_EL2_E2TB_MASK (UL(0x3))
|
||||
#define MDCR_EL2_E2TB_SHIFT (UL(24))
|
||||
#define MDCR_EL2_TTRF (1 << 19)
|
||||
#define MDCR_EL2_TPMS (1 << 14)
|
||||
#define MDCR_EL2_E2PB_MASK (UL(0x3))
|
||||
#define MDCR_EL2_E2PB_SHIFT (UL(12))
|
||||
|
@@ -316,6 +316,8 @@ struct kvm_vcpu_arch {
|
||||
struct kvm_guest_debug_arch regs;
|
||||
/* Statistical profiling extension */
|
||||
u64 pmscr_el1;
|
||||
/* Self-hosted trace */
|
||||
u64 trfcr_el1;
|
||||
} host_debug_state;
|
||||
|
||||
/* VGIC state */
|
||||
|
@@ -83,6 +83,11 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
|
||||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
|
||||
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifdef __KVM_NVHE_HYPERVISOR__
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
||||
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
|
||||
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
|
||||
|
||||
|
@@ -191,6 +191,7 @@
|
||||
#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
|
||||
|
||||
#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
|
||||
#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
|
||||
|
||||
#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
|
||||
#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
|
||||
@@ -328,6 +329,55 @@
|
||||
|
||||
/*** End of Statistical Profiling Extension ***/
|
||||
|
||||
/*
|
||||
* TRBE Registers
|
||||
*/
|
||||
#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0)
|
||||
#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1)
|
||||
#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2)
|
||||
#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3)
|
||||
#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4)
|
||||
#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6)
|
||||
#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7)
|
||||
|
||||
#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0)
|
||||
#define TRBLIMITR_LIMIT_SHIFT 12
|
||||
#define TRBLIMITR_NVM BIT(5)
|
||||
#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0)
|
||||
#define TRBLIMITR_TRIG_MODE_SHIFT 3
|
||||
#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0)
|
||||
#define TRBLIMITR_FILL_MODE_SHIFT 1
|
||||
#define TRBLIMITR_ENABLE BIT(0)
|
||||
#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0)
|
||||
#define TRBPTR_PTR_SHIFT 0
|
||||
#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0)
|
||||
#define TRBBASER_BASE_SHIFT 12
|
||||
#define TRBSR_EC_MASK GENMASK(5, 0)
|
||||
#define TRBSR_EC_SHIFT 26
|
||||
#define TRBSR_IRQ BIT(22)
|
||||
#define TRBSR_TRG BIT(21)
|
||||
#define TRBSR_WRAP BIT(20)
|
||||
#define TRBSR_ABORT BIT(18)
|
||||
#define TRBSR_STOP BIT(17)
|
||||
#define TRBSR_MSS_MASK GENMASK(15, 0)
|
||||
#define TRBSR_MSS_SHIFT 0
|
||||
#define TRBSR_BSC_MASK GENMASK(5, 0)
|
||||
#define TRBSR_BSC_SHIFT 0
|
||||
#define TRBSR_FSC_MASK GENMASK(5, 0)
|
||||
#define TRBSR_FSC_SHIFT 0
|
||||
#define TRBMAR_SHARE_MASK GENMASK(1, 0)
|
||||
#define TRBMAR_SHARE_SHIFT 8
|
||||
#define TRBMAR_OUTER_MASK GENMASK(3, 0)
|
||||
#define TRBMAR_OUTER_SHIFT 4
|
||||
#define TRBMAR_INNER_MASK GENMASK(3, 0)
|
||||
#define TRBMAR_INNER_SHIFT 0
|
||||
#define TRBTRG_TRG_MASK GENMASK(31, 0)
|
||||
#define TRBTRG_TRG_SHIFT 0
|
||||
#define TRBIDR_FLAG BIT(5)
|
||||
#define TRBIDR_PROG BIT(4)
|
||||
#define TRBIDR_ALIGN_MASK GENMASK(3, 0)
|
||||
#define TRBIDR_ALIGN_SHIFT 0
|
||||
|
||||
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
|
||||
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
|
||||
|
||||
@@ -471,6 +521,7 @@
|
||||
|
||||
#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
|
||||
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
|
||||
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
|
||||
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
|
||||
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
|
||||
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
|
||||
@@ -830,6 +881,8 @@
|
||||
#define ID_AA64MMFR2_CNP_SHIFT 0
|
||||
|
||||
/* id_aa64dfr0 */
|
||||
#define ID_AA64DFR0_TRBE_SHIFT 44
|
||||
#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
|
||||
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
|
||||
#define ID_AA64DFR0_PMSVER_SHIFT 32
|
||||
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
|
||||
@@ -1007,6 +1060,14 @@
|
||||
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
|
||||
#define SYS_MPIDR_SAFE_VAL (BIT(31))
|
||||
|
||||
#define TRFCR_ELx_TS_SHIFT 5
|
||||
#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT)
|
||||
#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT)
|
||||
#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT)
|
||||
#define TRFCR_EL2_CX BIT(3)
|
||||
#define TRFCR_ELx_ExTRE BIT(1)
|
||||
#define TRFCR_ELx_E0TRE BIT(0)
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
|
||||
|
@@ -406,7 +406,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
|
||||
* of support.
|
||||
*/
|
||||
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
@@ -115,9 +115,10 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
|
||||
mrs_s x0, SYS_VBAR_EL12
|
||||
msr vbar_el1, x0
|
||||
|
||||
// Use EL2 translations for SPE and disable access from EL1
|
||||
// Use EL2 translations for SPE & TRBE and disable access from EL1
|
||||
mrs x0, mdcr_el2
|
||||
bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
||||
bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
|
||||
msr mdcr_el2, x0
|
||||
|
||||
// Transfer the MM state from EL1 to EL2
|
||||
|
@@ -89,6 +89,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
|
||||
* - Debug ROM Address (MDCR_EL2_TDRA)
|
||||
* - OS related registers (MDCR_EL2_TDOSA)
|
||||
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
|
||||
* - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
|
||||
*
|
||||
* Additionally, KVM only traps guest accesses to the debug registers if
|
||||
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
|
||||
@@ -106,12 +107,13 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
|
||||
trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
|
||||
|
||||
/*
|
||||
* This also clears MDCR_EL2_E2PB_MASK to disable guest access
|
||||
* to the profiling buffer.
|
||||
* This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
|
||||
* to disable guest access to the profiling and trace buffers
|
||||
*/
|
||||
vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
|
||||
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
|
||||
MDCR_EL2_TPMS |
|
||||
MDCR_EL2_TTRF |
|
||||
MDCR_EL2_TPMCR |
|
||||
MDCR_EL2_TDRA |
|
||||
MDCR_EL2_TDOSA);
|
||||
|
@@ -58,16 +58,66 @@ static void __debug_restore_spe(u64 pmscr_el1)
|
||||
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
static void __debug_save_trace(u64 *trfcr_el1)
|
||||
{
|
||||
|
||||
*trfcr_el1 = 0;
|
||||
|
||||
/* Check if we have TRBE */
|
||||
if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
|
||||
ID_AA64DFR0_TRBE_SHIFT))
|
||||
return;
|
||||
|
||||
/* Check we can access the TRBE */
|
||||
if ((read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
|
||||
return;
|
||||
|
||||
/* Check if the TRBE is enabled */
|
||||
if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_ENABLE))
|
||||
return;
|
||||
/*
|
||||
* Prohibit trace generation while we are in guest.
|
||||
* Since access to TRFCR_EL1 is trapped, the guest can't
|
||||
* modify the filtering set by the host.
|
||||
*/
|
||||
*trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1);
|
||||
write_sysreg_s(0, SYS_TRFCR_EL1);
|
||||
isb();
|
||||
/* Drain the trace buffer to memory */
|
||||
tsb_csync();
|
||||
dsb(nsh);
|
||||
}
|
||||
|
||||
static void __debug_restore_trace(u64 trfcr_el1)
|
||||
{
|
||||
if (!trfcr_el1)
|
||||
return;
|
||||
|
||||
/* Restore trace filter controls */
|
||||
write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
|
||||
}
|
||||
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Disable and flush SPE data generation */
|
||||
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
|
||||
/* Disable and flush Self-Hosted Trace generation */
|
||||
__debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_switch_to_guest_common(vcpu);
|
||||
}
|
||||
|
||||
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
__debug_switch_to_host_common(vcpu);
|
||||
}
|
||||
|
||||
|
@@ -95,6 +95,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
|
||||
mdcr_el2 &= MDCR_EL2_HPMN_MASK;
|
||||
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
|
||||
mdcr_el2 |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
|
||||
|
||||
write_sysreg(mdcr_el2, mdcr_el2);
|
||||
if (is_protected_kvm_enabled())
|
||||
@@ -192,6 +193,15 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
|
||||
|
||||
__sysreg_save_state_nvhe(host_ctxt);
|
||||
/*
|
||||
* For nVHE, we must save and disable any SPE
|
||||
* buffers, as the translation regime is going
|
||||
* to be loaded with that of the guest. And we must
|
||||
* save host context for SPE, before we change the
|
||||
* ownership to EL2 (via MDCR_EL2_E2PB == 0) and before
|
||||
* we load guest Stage1.
|
||||
*/
|
||||
__debug_save_host_buffers_nvhe(vcpu);
|
||||
|
||||
__adjust_pc(vcpu);
|
||||
|
||||
@@ -234,11 +244,12 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
|
||||
__fpsimd_save_fpexc32(vcpu);
|
||||
|
||||
__debug_switch_to_host(vcpu);
|
||||
/*
|
||||
* This must come after restoring the host sysregs, since a non-VHE
|
||||
* system may enable SPE here and make use of the TTBRs.
|
||||
*/
|
||||
__debug_switch_to_host(vcpu);
|
||||
__debug_restore_host_buffers_nvhe(vcpu);
|
||||
|
||||
if (pmu_switch_needed)
|
||||
__pmu_switch_to_host(host_ctxt);
|
||||
|
@@ -136,6 +136,7 @@ CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TEE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
|
||||
@@ -427,8 +428,6 @@ CONFIG_VHOST_VSOCK=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_DEBUG_KINFO=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_ION_SYSTEM_HEAP=y
|
||||
CONFIG_REMOTEPROC=y
|
||||
CONFIG_REMOTEPROC_CDEV=y
|
||||
CONFIG_RPMSG_CHAR=y
|
||||
|
@@ -299,10 +299,9 @@ static int amba_remove(struct device *dev)
|
||||
{
|
||||
struct amba_device *pcdev = to_amba_device(dev);
|
||||
struct amba_driver *drv = to_amba_driver(dev->driver);
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
ret = drv->remove(pcdev);
|
||||
drv->remove(pcdev);
|
||||
pm_runtime_put_noidle(dev);
|
||||
|
||||
/* Undo the runtime PM settings in amba_probe() */
|
||||
@@ -313,7 +312,7 @@ static int amba_remove(struct device *dev)
|
||||
amba_put_disable_pclk(pcdev);
|
||||
dev_pm_domain_detach(dev, true);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amba_shutdown(struct device *dev)
|
||||
|
@@ -37,6 +37,7 @@
|
||||
#include <trace/hooks/fault.h>
|
||||
#include <trace/hooks/iommu.h>
|
||||
#include <trace/hooks/thermal.h>
|
||||
#include <trace/hooks/ufshcd.h>
|
||||
|
||||
/*
|
||||
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
||||
@@ -159,3 +160,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_wakeup);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pagecache_get_page);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_genl_check);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_prepare_command);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_compl_command);
|
||||
|
@@ -69,11 +69,10 @@ out_clk:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nmk_rng_remove(struct amba_device *dev)
|
||||
static void nmk_rng_remove(struct amba_device *dev)
|
||||
{
|
||||
amba_release_regions(dev);
|
||||
clk_disable(rng_clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amba_id nmk_rng_ids[] = {
|
||||
|
@@ -3197,7 +3197,7 @@ probe_err2:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pl330_remove(struct amba_device *adev)
|
||||
static void pl330_remove(struct amba_device *adev)
|
||||
{
|
||||
struct pl330_dmac *pl330 = amba_get_drvdata(adev);
|
||||
struct dma_pl330_chan *pch, *_p;
|
||||
@@ -3237,7 +3237,6 @@ static int pl330_remove(struct amba_device *adev)
|
||||
|
||||
if (pl330->rstc)
|
||||
reset_control_assert(pl330->rstc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amba_id pl330_ids[] = {
|
||||
|
@@ -324,7 +324,7 @@ dev_put:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pl111_amba_remove(struct amba_device *amba_dev)
|
||||
static void pl111_amba_remove(struct amba_device *amba_dev)
|
||||
{
|
||||
struct device *dev = &amba_dev->dev;
|
||||
struct drm_device *drm = amba_get_drvdata(amba_dev);
|
||||
@@ -335,8 +335,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
|
||||
drm_panel_bridge_remove(priv->bridge);
|
||||
drm_dev_put(drm);
|
||||
of_reserved_mem_device_release(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -98,19 +98,27 @@ config CORESIGHT_SOURCE_ETM3X
|
||||
module will be called coresight-etm3x.
|
||||
|
||||
config CORESIGHT_SOURCE_ETM4X
|
||||
tristate "CoreSight Embedded Trace Macrocell 4.x driver"
|
||||
tristate "CoreSight ETMv4.x / ETE driver"
|
||||
depends on ARM64
|
||||
select CORESIGHT_LINKS_AND_SINKS
|
||||
select PID_IN_CONTEXTIDR
|
||||
help
|
||||
This driver provides support for the ETM4.x tracer module, tracing the
|
||||
instructions that a processor is executing. This is primarily useful
|
||||
for instruction level tracing. Depending on the implemented version
|
||||
data tracing may also be available.
|
||||
This driver provides support for the CoreSight Embedded Trace Macrocell
|
||||
version 4.x and the Embedded Trace Extensions (ETE). Both are CPU tracer
|
||||
modules, tracing the instructions that a processor is executing. This is
|
||||
primarily useful for instruction level tracing.
|
||||
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called coresight-etm4x.
|
||||
|
||||
config ETM4X_IMPDEF_FEATURE
|
||||
bool "Control implementation defined overflow support in ETM 4.x driver"
|
||||
depends on CORESIGHT_SOURCE_ETM4X
|
||||
help
|
||||
This control provides implementation define control for CoreSight
|
||||
ETM 4.x tracer module that can't reduce commit rate automatically.
|
||||
This avoids overflow between the ETM tracer module and the cpu core.
|
||||
|
||||
config CORESIGHT_STM
|
||||
tristate "CoreSight System Trace Macrocell driver"
|
||||
depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
|
||||
@@ -248,4 +256,18 @@ config CORESIGHT_HWEVENT
|
||||
Hardware Event across STM interface. It configures Coresight
|
||||
Hardware Event mux control registers to select hardware events
|
||||
based on user input.
|
||||
|
||||
config CORESIGHT_TRBE
|
||||
tristate "Trace Buffer Extension (TRBE) driver"
|
||||
depends on ARM64 && CORESIGHT_SOURCE_ETM4X
|
||||
help
|
||||
This driver provides support for percpu Trace Buffer Extension (TRBE).
|
||||
TRBE always needs to be used along with it's corresponding percpu ETE
|
||||
component. ETE generates trace data which is then captured with TRBE.
|
||||
Unlike traditional sink devices, TRBE is a CPU feature accessible via
|
||||
system registers. But it's explicit dependency with trace unit (ETE)
|
||||
requires it to be plugged in as a coresight sink device.
|
||||
|
||||
To compile this driver as a module, choose M here: the module will be
|
||||
called coresight-trbe.
|
||||
endif
|
||||
|
@@ -22,6 +22,7 @@ obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
|
||||
obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
|
||||
obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
|
||||
obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
|
||||
obj-$(CONFIG_CORESIGHT_TRBE) += coresight-trbe.o
|
||||
coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \
|
||||
coresight-cti-sysfs.o
|
||||
obj-$(CONFIG_CORESIGHT_TPDA) += coresight-tpda.o
|
||||
|
@@ -401,8 +401,9 @@ static const struct attribute_group *catu_groups[] = {
|
||||
|
||||
static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
|
||||
{
|
||||
return coresight_timeout(drvdata->base,
|
||||
CATU_STATUS, CATU_STATUS_READY, 1);
|
||||
struct csdev_access *csa = &drvdata->csdev->access;
|
||||
|
||||
return coresight_timeout(csa, CATU_STATUS, CATU_STATUS_READY, 1);
|
||||
}
|
||||
|
||||
static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
|
||||
@@ -411,6 +412,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
|
||||
u32 control, mode;
|
||||
struct etr_buf *etr_buf = data;
|
||||
struct device *dev = &drvdata->csdev->dev;
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
if (catu_wait_for_ready(drvdata))
|
||||
dev_warn(dev, "Timeout while waiting for READY\n");
|
||||
@@ -421,7 +423,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
rc = coresight_claim_device_unlocked(drvdata->base);
|
||||
rc = coresight_claim_device_unlocked(csdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@@ -465,9 +467,10 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
|
||||
{
|
||||
int rc = 0;
|
||||
struct device *dev = &drvdata->csdev->dev;
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
catu_write_control(drvdata, 0);
|
||||
coresight_disclaim_device_unlocked(drvdata->base);
|
||||
coresight_disclaim_device_unlocked(csdev);
|
||||
if (catu_wait_for_ready(drvdata)) {
|
||||
dev_info(dev, "Timeout while waiting for READY\n");
|
||||
rc = -EAGAIN;
|
||||
@@ -551,6 +554,7 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
dev->platform_data = pdata;
|
||||
|
||||
drvdata->base = base;
|
||||
catu_desc.access = CSDEV_ACCESS_IOMEM(base);
|
||||
catu_desc.pdata = pdata;
|
||||
catu_desc.dev = dev;
|
||||
catu_desc.groups = catu_groups;
|
||||
@@ -567,12 +571,11 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int catu_remove(struct amba_device *adev)
|
||||
static void catu_remove(struct amba_device *adev)
|
||||
{
|
||||
struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
coresight_unregister(drvdata->csdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct amba_id catu_ids[] = {
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#define MAX_SINK_NAME 20
|
||||
|
||||
static DEFINE_MUTEX(coresight_mutex);
|
||||
DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
|
||||
|
||||
/**
|
||||
* struct coresight_node - elements of a path, from source to sink
|
||||
@@ -87,6 +88,18 @@ void coresight_remove_csr_ops(void)
|
||||
}
|
||||
EXPORT_SYMBOL(coresight_remove_csr_ops);
|
||||
|
||||
void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev)
|
||||
{
|
||||
per_cpu(csdev_sink, cpu) = csdev;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(coresight_set_percpu_sink);
|
||||
|
||||
struct coresight_device *coresight_get_percpu_sink(int cpu)
|
||||
{
|
||||
return per_cpu(csdev_sink, cpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
|
||||
|
||||
static int coresight_id_match(struct device *dev, void *data)
|
||||
{
|
||||
int trace_id, i_trace_id;
|
||||
@@ -210,30 +223,32 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline u32 coresight_read_claim_tags(void __iomem *base)
|
||||
static inline u32 coresight_read_claim_tags(struct coresight_device *csdev)
|
||||
{
|
||||
return readl_relaxed(base + CORESIGHT_CLAIMCLR);
|
||||
return csdev_access_relaxed_read32(&csdev->access, CORESIGHT_CLAIMCLR);
|
||||
}
|
||||
|
||||
static inline bool coresight_is_claimed_self_hosted(void __iomem *base)
|
||||
static inline bool coresight_is_claimed_self_hosted(struct coresight_device *csdev)
|
||||
{
|
||||
return coresight_read_claim_tags(base) == CORESIGHT_CLAIM_SELF_HOSTED;
|
||||
return coresight_read_claim_tags(csdev) == CORESIGHT_CLAIM_SELF_HOSTED;
|
||||
}
|
||||
|
||||
static inline bool coresight_is_claimed_any(void __iomem *base)
|
||||
static inline bool coresight_is_claimed_any(struct coresight_device *csdev)
|
||||
{
|
||||
return coresight_read_claim_tags(base) != 0;
|
||||
return coresight_read_claim_tags(csdev) != 0;
|
||||
}
|
||||
|
||||
static inline void coresight_set_claim_tags(void __iomem *base)
|
||||
static inline void coresight_set_claim_tags(struct coresight_device *csdev)
|
||||
{
|
||||
writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMSET);
|
||||
csdev_access_relaxed_write32(&csdev->access, CORESIGHT_CLAIM_SELF_HOSTED,
|
||||
CORESIGHT_CLAIMSET);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void coresight_clear_claim_tags(void __iomem *base)
|
||||
static inline void coresight_clear_claim_tags(struct coresight_device *csdev)
|
||||
{
|
||||
writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMCLR);
|
||||
csdev_access_relaxed_write32(&csdev->access, CORESIGHT_CLAIM_SELF_HOSTED,
|
||||
CORESIGHT_CLAIMCLR);
|
||||
isb();
|
||||
}
|
||||
|
||||
@@ -247,27 +262,33 @@ static inline void coresight_clear_claim_tags(void __iomem *base)
|
||||
* Called with CS_UNLOCKed for the component.
|
||||
* Returns : 0 on success
|
||||
*/
|
||||
int coresight_claim_device_unlocked(void __iomem *base)
|
||||
int coresight_claim_device_unlocked(struct coresight_device *csdev)
|
||||
{
|
||||
if (coresight_is_claimed_any(base))
|
||||
if (WARN_ON(!csdev))
|
||||
return -EINVAL;
|
||||
|
||||
if (coresight_is_claimed_any(csdev))
|
||||
return -EBUSY;
|
||||
|
||||
coresight_set_claim_tags(base);
|
||||
if (coresight_is_claimed_self_hosted(base))
|
||||
coresight_set_claim_tags(csdev);
|
||||
if (coresight_is_claimed_self_hosted(csdev))
|
||||
return 0;
|
||||
/* There was a race setting the tags, clean up and fail */
|
||||
coresight_clear_claim_tags(base);
|
||||
coresight_clear_claim_tags(csdev);
|
||||
return -EBUSY;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(coresight_claim_device_unlocked);
|
||||
|
||||
int coresight_claim_device(void __iomem *base)
|
||||
int coresight_claim_device(struct coresight_device *csdev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
CS_UNLOCK(base);
|
||||
rc = coresight_claim_device_unlocked(base);
|
||||
CS_LOCK(base);
|
||||
if (WARN_ON(!csdev))
|
||||
return -EINVAL;
|
||||
|
||||
CS_UNLOCK(csdev->access.base);
|
||||
rc = coresight_claim_device_unlocked(csdev);
|
||||
CS_LOCK(csdev->access.base);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -277,11 +298,14 @@ EXPORT_SYMBOL_GPL(coresight_claim_device);
|
||||
* coresight_disclaim_device_unlocked : Clear the claim tags for the device.
|
||||
* Called with CS_UNLOCKed for the component.
|
||||
*/
|
||||
void coresight_disclaim_device_unlocked(void __iomem *base)
|
||||
void coresight_disclaim_device_unlocked(struct coresight_device *csdev)
|
||||
{
|
||||
|
||||
if (coresight_is_claimed_self_hosted(base))
|
||||
coresight_clear_claim_tags(base);
|
||||
if (WARN_ON(!csdev))
|
||||
return;
|
||||
|
||||
if (coresight_is_claimed_self_hosted(csdev))
|
||||
coresight_clear_claim_tags(csdev);
|
||||
else
|
||||
/*
|
||||
* The external agent may have not honoured our claim
|
||||
@@ -292,11 +316,14 @@ void coresight_disclaim_device_unlocked(void __iomem *base)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(coresight_disclaim_device_unlocked);
|
||||
|
||||
void coresight_disclaim_device(void __iomem *base)
|
||||
void coresight_disclaim_device(struct coresight_device *csdev)
|
||||
{
|
||||
CS_UNLOCK(base);
|
||||
coresight_disclaim_device_unlocked(base);
|
||||
CS_LOCK(base);
|
||||
if (WARN_ON(!csdev))
|
||||
return;
|
||||
|
||||
CS_UNLOCK(csdev->access.base);
|
||||
coresight_disclaim_device_unlocked(csdev);
|
||||
CS_LOCK(csdev->access.base);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(coresight_disclaim_device);
|
||||
|
||||
@@ -942,6 +969,14 @@ static int _coresight_build_path(struct coresight_device *csdev,
|
||||
if (csdev == sink)
|
||||
goto out;
|
||||
|
||||
if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) &&
|
||||
sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) {
|
||||
if (_coresight_build_path(sink, sink, path, source) == 0) {
|
||||
found = true;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Not a sink - recursively explore each port found on this element */
|
||||
for (i = 0; i < csdev->pdata->nr_outport; i++) {
|
||||
struct coresight_device *child_dev;
|
||||
@@ -1161,8 +1196,12 @@ coresight_find_default_sink(struct coresight_device *csdev)
|
||||
int depth = 0;
|
||||
|
||||
/* look for a default sink if we have not found for this device */
|
||||
if (!csdev->def_sink)
|
||||
csdev->def_sink = coresight_find_sink(csdev, &depth);
|
||||
if (!csdev->def_sink) {
|
||||
if (coresight_is_percpu_source(csdev))
|
||||
csdev->def_sink = per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev));
|
||||
if (!csdev->def_sink)
|
||||
csdev->def_sink = coresight_find_sink(csdev, &depth);
|
||||
}
|
||||
return csdev->def_sink;
|
||||
}
|
||||
|
||||
@@ -1660,23 +1699,24 @@ static void coresight_remove_conns(struct coresight_device *csdev)
|
||||
}
|
||||
|
||||
/**
|
||||
* coresight_timeout - loop until a bit has changed to a specific state.
|
||||
* @addr: base address of the area of interest.
|
||||
* @offset: address of a register, starting from @addr.
|
||||
* coresight_timeout - loop until a bit has changed to a specific register
|
||||
* state.
|
||||
* @csa: coresight device access for the device
|
||||
* @offset: Offset of the register from the base of the device.
|
||||
* @position: the position of the bit of interest.
|
||||
* @value: the value the bit should have.
|
||||
*
|
||||
* Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
|
||||
* TIMEOUT_US has elapsed, which ever happens first.
|
||||
*/
|
||||
|
||||
int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
|
||||
int coresight_timeout(struct csdev_access *csa, u32 offset,
|
||||
int position, int value)
|
||||
{
|
||||
int i;
|
||||
u32 val;
|
||||
|
||||
for (i = TIMEOUT_US; i > 0; i--) {
|
||||
val = __raw_readl(addr + offset);
|
||||
val = csdev_access_read32(csa, offset);
|
||||
/* waiting on the bit to go from 0 to 1 */
|
||||
if (value) {
|
||||
if (val & BIT(position))
|
||||
@@ -1700,6 +1740,48 @@ int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(coresight_timeout);
|
||||
|
||||
u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset)
|
||||
{
|
||||
return csdev_access_relaxed_read32(&csdev->access, offset);
|
||||
}
|
||||
|
||||
u32 coresight_read32(struct coresight_device *csdev, u32 offset)
|
||||
{
|
||||
return csdev_access_read32(&csdev->access, offset);
|
||||
}
|
||||
|
||||
void coresight_relaxed_write32(struct coresight_device *csdev,
|
||||
u32 val, u32 offset)
|
||||
{
|
||||
csdev_access_relaxed_write32(&csdev->access, val, offset);
|
||||
}
|
||||
|
||||
void coresight_write32(struct coresight_device *csdev, u32 val, u32 offset)
|
||||
{
|
||||
csdev_access_write32(&csdev->access, val, offset);
|
||||
}
|
||||
|
||||
u64 coresight_relaxed_read64(struct coresight_device *csdev, u32 offset)
|
||||
{
|
||||
return csdev_access_relaxed_read64(&csdev->access, offset);
|
||||
}
|
||||
|
||||
u64 coresight_read64(struct coresight_device *csdev, u32 offset)
|
||||
{
|
||||
return csdev_access_read64(&csdev->access, offset);
|
||||
}
|
||||
|
||||
void coresight_relaxed_write64(struct coresight_device *csdev,
|
||||
u64 val, u32 offset)
|
||||
{
|
||||
csdev_access_relaxed_write64(&csdev->access, val, offset);
|
||||
}
|
||||
|
||||
void coresight_write64(struct coresight_device *csdev, u64 val, u32 offset)
|
||||
{
|
||||
csdev_access_write64(&csdev->access, val, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* coresight_release_platform_data: Release references to the devices connected
|
||||
* to the output port of this device.
|
||||
@@ -1764,6 +1846,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
|
||||
csdev->type = desc->type;
|
||||
csdev->subtype = desc->subtype;
|
||||
csdev->ops = desc->ops;
|
||||
csdev->access = desc->access;
|
||||
csdev->orphan = false;
|
||||
|
||||
csdev->dev.type = &coresight_dev_type[desc->type];
|
||||
|
@@ -627,7 +627,7 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int debug_remove(struct amba_device *adev)
|
||||
static void debug_remove(struct amba_device *adev)
|
||||
{
|
||||
struct device *dev = &adev->dev;
|
||||
struct debug_drvdata *drvdata = amba_get_drvdata(adev);
|
||||
@@ -642,8 +642,6 @@ static int debug_remove(struct amba_device *adev)
|
||||
|
||||
if (!--debug_count)
|
||||
debug_func_exit();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amba_cs_uci_id uci_id_debug[] = {
|
||||
|
@@ -102,7 +102,7 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
|
||||
goto cti_state_unchanged;
|
||||
|
||||
/* claim the device */
|
||||
rc = coresight_claim_device(drvdata->base);
|
||||
rc = coresight_claim_device(drvdata->csdev);
|
||||
if (rc)
|
||||
goto cti_err_not_enabled;
|
||||
|
||||
@@ -136,7 +136,7 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
|
||||
goto cti_hp_not_enabled;
|
||||
|
||||
/* try to claim the device */
|
||||
if (coresight_claim_device(drvdata->base))
|
||||
if (coresight_claim_device(drvdata->csdev))
|
||||
goto cti_hp_not_enabled;
|
||||
|
||||
cti_write_all_hw_regs(drvdata);
|
||||
@@ -154,6 +154,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
|
||||
{
|
||||
struct cti_config *config = &drvdata->config;
|
||||
struct device *dev = &drvdata->csdev->dev;
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
spin_lock(&drvdata->spinlock);
|
||||
|
||||
@@ -171,7 +172,7 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
|
||||
writel_relaxed(0, drvdata->base + CTICONTROL);
|
||||
config->hw_enabled = false;
|
||||
|
||||
coresight_disclaim_device_unlocked(drvdata->base);
|
||||
coresight_disclaim_device_unlocked(csdev);
|
||||
CS_LOCK(drvdata->base);
|
||||
spin_unlock(&drvdata->spinlock);
|
||||
pm_runtime_put(dev);
|
||||
@@ -655,6 +656,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
|
||||
void *v)
|
||||
{
|
||||
struct cti_drvdata *drvdata;
|
||||
struct coresight_device *csdev;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
int notify_res = NOTIFY_OK;
|
||||
|
||||
@@ -662,6 +664,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
|
||||
return NOTIFY_OK;
|
||||
|
||||
drvdata = cti_cpu_drvdata[cpu];
|
||||
csdev = drvdata->csdev;
|
||||
|
||||
if (WARN_ON_ONCE(drvdata->ctidev.cpu != cpu))
|
||||
return NOTIFY_BAD;
|
||||
@@ -673,13 +676,13 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
|
||||
/* CTI regs all static - we have a copy & nothing to save */
|
||||
drvdata->config.hw_powered = false;
|
||||
if (drvdata->config.hw_enabled)
|
||||
coresight_disclaim_device(drvdata->base);
|
||||
coresight_disclaim_device(csdev);
|
||||
break;
|
||||
|
||||
case CPU_PM_ENTER_FAILED:
|
||||
drvdata->config.hw_powered = true;
|
||||
if (drvdata->config.hw_enabled) {
|
||||
if (coresight_claim_device(drvdata->base))
|
||||
if (coresight_claim_device(csdev))
|
||||
drvdata->config.hw_enabled = false;
|
||||
}
|
||||
break;
|
||||
@@ -692,7 +695,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
|
||||
/* check enable reference count to enable HW */
|
||||
if (atomic_read(&drvdata->config.enable_req_count)) {
|
||||
/* check we can claim the device as we re-power */
|
||||
if (coresight_claim_device(drvdata->base))
|
||||
if (coresight_claim_device(csdev))
|
||||
goto cti_notify_exit;
|
||||
|
||||
drvdata->config.hw_enabled = true;
|
||||
@@ -736,7 +739,7 @@ static int cti_dying_cpu(unsigned int cpu)
|
||||
spin_lock(&drvdata->spinlock);
|
||||
drvdata->config.hw_powered = false;
|
||||
if (drvdata->config.hw_enabled)
|
||||
coresight_disclaim_device(drvdata->base);
|
||||
coresight_disclaim_device(drvdata->csdev);
|
||||
spin_unlock(&drvdata->spinlock);
|
||||
return 0;
|
||||
}
|
||||
@@ -836,7 +839,7 @@ static void cti_device_release(struct device *dev)
|
||||
if (drvdata->csdev_release)
|
||||
drvdata->csdev_release(dev);
|
||||
}
|
||||
static int cti_remove(struct amba_device *adev)
|
||||
static void cti_remove(struct amba_device *adev)
|
||||
{
|
||||
struct cti_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
@@ -845,8 +848,6 @@ static int cti_remove(struct amba_device *adev)
|
||||
mutex_unlock(&ect_mutex);
|
||||
|
||||
coresight_unregister(drvdata->csdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cti_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
@@ -870,6 +871,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
return PTR_ERR(base);
|
||||
|
||||
drvdata->base = base;
|
||||
cti_desc.access = CSDEV_ACCESS_IOMEM(base);
|
||||
|
||||
dev_set_drvdata(dev, drvdata);
|
||||
|
||||
|
@@ -343,7 +343,6 @@ static int cti_plat_create_connection(struct device *dev,
|
||||
{
|
||||
struct cti_trig_con *tc = NULL;
|
||||
int cpuid = -1, err = 0;
|
||||
struct fwnode_handle *cs_fwnode = NULL;
|
||||
struct coresight_device *csdev = NULL;
|
||||
const char *assoc_name = "unknown";
|
||||
char cpu_name_str[16];
|
||||
@@ -397,8 +396,9 @@ static int cti_plat_create_connection(struct device *dev,
|
||||
assoc_name = cpu_name_str;
|
||||
} else {
|
||||
/* associated device ? */
|
||||
cs_fwnode = fwnode_find_reference(fwnode,
|
||||
CTI_DT_CSDEV_ASSOC, 0);
|
||||
struct fwnode_handle *cs_fwnode = fwnode_find_reference(fwnode,
|
||||
CTI_DT_CSDEV_ASSOC,
|
||||
0);
|
||||
if (!IS_ERR(cs_fwnode)) {
|
||||
assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode,
|
||||
&csdev);
|
||||
|
@@ -132,7 +132,7 @@ static void __etb_enable_hw(struct etb_drvdata *drvdata)
|
||||
|
||||
static int etb_enable_hw(struct etb_drvdata *drvdata)
|
||||
{
|
||||
int rc = coresight_claim_device(drvdata->base);
|
||||
int rc = coresight_claim_device(drvdata->csdev);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
@@ -252,6 +252,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
|
||||
{
|
||||
u32 ffcr;
|
||||
struct device *dev = &drvdata->csdev->dev;
|
||||
struct csdev_access *csa = &drvdata->csdev->access;
|
||||
|
||||
CS_UNLOCK(drvdata->base);
|
||||
|
||||
@@ -263,7 +264,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
|
||||
ffcr |= ETB_FFCR_FON_MAN;
|
||||
writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
|
||||
|
||||
if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
|
||||
if (coresight_timeout(csa, ETB_FFCR, ETB_FFCR_BIT, 0)) {
|
||||
dev_err(dev,
|
||||
"timeout while waiting for completion of Manual Flush\n");
|
||||
}
|
||||
@@ -271,7 +272,7 @@ static void __etb_disable_hw(struct etb_drvdata *drvdata)
|
||||
/* disable trace capture */
|
||||
writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
|
||||
|
||||
if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
|
||||
if (coresight_timeout(csa, ETB_FFSR, ETB_FFSR_BIT, 1)) {
|
||||
dev_err(dev,
|
||||
"timeout while waiting for Formatter to Stop\n");
|
||||
}
|
||||
@@ -344,7 +345,7 @@ static void etb_disable_hw(struct etb_drvdata *drvdata)
|
||||
{
|
||||
__etb_disable_hw(drvdata);
|
||||
etb_dump_hw(drvdata);
|
||||
coresight_disclaim_device(drvdata->base);
|
||||
coresight_disclaim_device(drvdata->csdev);
|
||||
}
|
||||
|
||||
static int etb_disable(struct coresight_device *csdev)
|
||||
@@ -757,6 +758,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
return PTR_ERR(base);
|
||||
|
||||
drvdata->base = base;
|
||||
desc.access = CSDEV_ACCESS_IOMEM(base);
|
||||
|
||||
spin_lock_init(&drvdata->spinlock);
|
||||
|
||||
@@ -803,7 +805,7 @@ err_misc_register:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int etb_remove(struct amba_device *adev)
|
||||
static void etb_remove(struct amba_device *adev)
|
||||
{
|
||||
struct etb_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
@@ -814,8 +816,6 @@ static int etb_remove(struct amba_device *adev)
|
||||
*/
|
||||
misc_deregister(&drvdata->miscdev);
|
||||
coresight_unregister(drvdata->csdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@@ -24,20 +24,67 @@
|
||||
static struct pmu etm_pmu;
|
||||
static bool etm_perf_up;
|
||||
|
||||
static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
|
||||
/*
|
||||
* An ETM context for a running event includes the perf aux handle
|
||||
* and aux_data. For ETM, the aux_data (etm_event_data), consists of
|
||||
* the trace path and the sink configuration. The event data is accessible
|
||||
* via perf_get_aux(handle). However, a sink could "end" a perf output
|
||||
* handle via the IRQ handler. And if the "sink" encounters a failure
|
||||
* to "begin" another session (e.g due to lack of space in the buffer),
|
||||
* the handle will be cleared. Thus, the event_data may not be accessible
|
||||
* from the handle when we get to the etm_event_stop(), which is required
|
||||
* for stopping the trace path. The event_data is guaranteed to stay alive
|
||||
* until "free_aux()", which cannot happen as long as the event is active on
|
||||
* the ETM. Thus the event_data for the session must be part of the ETM context
|
||||
* to make sure we can disable the trace path.
|
||||
*/
|
||||
struct etm_ctxt {
|
||||
struct perf_output_handle handle;
|
||||
struct etm_event_data *event_data;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt);
|
||||
static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
|
||||
|
||||
/* ETMv3.5/PTM's ETMCR is 'config' */
|
||||
/*
|
||||
* The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config';
|
||||
* now take them as general formats and apply on all ETMs.
|
||||
*/
|
||||
PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
|
||||
PMU_FORMAT_ATTR(contextid, "config:" __stringify(ETM_OPT_CTXTID));
|
||||
/* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */
|
||||
PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
|
||||
/* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */
|
||||
PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
|
||||
PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
|
||||
PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
|
||||
/* Sink ID - same for all ETMs */
|
||||
PMU_FORMAT_ATTR(sinkid, "config2:0-31");
|
||||
|
||||
/*
|
||||
* contextid always traces the "PID". The PID is in CONTEXTIDR_EL1
|
||||
* when the kernel is running at EL1; when the kernel is at EL2,
|
||||
* the PID is in CONTEXTIDR_EL2.
|
||||
*/
|
||||
static ssize_t format_attr_contextid_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
int pid_fmt = ETM_OPT_CTXTID;
|
||||
|
||||
#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
|
||||
pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
|
||||
#endif
|
||||
return sprintf(page, "config:%d\n", pid_fmt);
|
||||
}
|
||||
|
||||
struct device_attribute format_attr_contextid =
|
||||
__ATTR(contextid, 0444, format_attr_contextid_show, NULL);
|
||||
|
||||
static struct attribute *etm_config_formats_attr[] = {
|
||||
&format_attr_cycacc.attr,
|
||||
&format_attr_contextid.attr,
|
||||
&format_attr_contextid1.attr,
|
||||
&format_attr_contextid2.attr,
|
||||
&format_attr_timestamp.attr,
|
||||
&format_attr_retstack.attr,
|
||||
&format_attr_sinkid.attr,
|
||||
@@ -204,6 +251,25 @@ static void etm_free_aux(void *data)
|
||||
schedule_work(&event_data->work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if two given sinks are compatible with each other,
|
||||
* so that they can use the same sink buffers, when an event
|
||||
* moves around.
|
||||
*/
|
||||
static bool sinks_compatible(struct coresight_device *a,
|
||||
struct coresight_device *b)
|
||||
{
|
||||
if (!a || !b)
|
||||
return false;
|
||||
/*
|
||||
* If the sinks are of the same subtype and driven
|
||||
* by the same driver, we can use the same buffer
|
||||
* on these sinks.
|
||||
*/
|
||||
return (a->subtype.sink_subtype == b->subtype.sink_subtype) &&
|
||||
(sink_ops(a) == sink_ops(b));
|
||||
}
|
||||
|
||||
static void *etm_setup_aux(struct perf_event *event, void **pages,
|
||||
int nr_pages, bool overwrite)
|
||||
{
|
||||
@@ -211,6 +277,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
|
||||
int cpu = event->cpu;
|
||||
cpumask_t *mask;
|
||||
struct coresight_device *sink = NULL;
|
||||
struct coresight_device *user_sink = NULL, *last_sink = NULL;
|
||||
struct etm_event_data *event_data = NULL;
|
||||
|
||||
event_data = alloc_event_data(cpu);
|
||||
@@ -221,7 +288,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
|
||||
/* First get the selected sink from user space. */
|
||||
if (event->attr.config2) {
|
||||
id = (u32)event->attr.config2;
|
||||
sink = coresight_get_sink_by_id(id);
|
||||
sink = user_sink = coresight_get_sink_by_id(id);
|
||||
}
|
||||
|
||||
mask = &event_data->mask;
|
||||
@@ -249,14 +316,33 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
|
||||
}
|
||||
|
||||
/*
|
||||
* No sink provided - look for a default sink for one of the
|
||||
* devices. At present we only support topology where all CPUs
|
||||
* use the same sink [N:1], so only need to find one sink. The
|
||||
* coresight_build_path later will remove any CPU that does not
|
||||
* attach to the sink, or if we have not found a sink.
|
||||
* No sink provided - look for a default sink for all the ETMs,
|
||||
* where this event can be scheduled.
|
||||
* We allocate the sink specific buffers only once for this
|
||||
* event. If the ETMs have different default sink devices, we
|
||||
* can only use a single "type" of sink as the event can carry
|
||||
* only one sink specific buffer. Thus we have to make sure
|
||||
* that the sinks are of the same type and driven by the same
|
||||
* driver, as the one we allocate the buffer for. As such
|
||||
* we choose the first sink and check if the remaining ETMs
|
||||
* have a compatible default sink. We don't trace on a CPU
|
||||
* if the sink is not compatible.
|
||||
*/
|
||||
if (!sink)
|
||||
if (!user_sink) {
|
||||
/* Find the default sink for this ETM */
|
||||
sink = coresight_find_default_sink(csdev);
|
||||
if (!sink) {
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check if this sink compatible with the last sink */
|
||||
if (last_sink && !sinks_compatible(last_sink, sink)) {
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
continue;
|
||||
}
|
||||
last_sink = sink;
|
||||
}
|
||||
|
||||
/*
|
||||
* Building a path doesn't enable it, it simply builds a
|
||||
@@ -284,7 +370,12 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
|
||||
if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
|
||||
goto err;
|
||||
|
||||
/* Allocate the sink buffer for this session */
|
||||
/*
|
||||
* Allocate the sink buffer for this session. All the sinks
|
||||
* where this event can be scheduled are ensured to be of the
|
||||
* same type. Thus the same sink configuration is used by the
|
||||
* sinks.
|
||||
*/
|
||||
event_data->snk_config =
|
||||
sink_ops(sink)->alloc_buffer(sink, event, pages,
|
||||
nr_pages, overwrite);
|
||||
@@ -304,13 +395,18 @@ static void etm_event_start(struct perf_event *event, int flags)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct etm_event_data *event_data;
|
||||
struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
|
||||
struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
|
||||
struct perf_output_handle *handle = &ctxt->handle;
|
||||
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
|
||||
struct list_head *path;
|
||||
|
||||
if (!csdev)
|
||||
goto fail;
|
||||
|
||||
/* Have we messed up our tracking ? */
|
||||
if (WARN_ON(ctxt->event_data))
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Deal with the ring buffer API and get a handle on the
|
||||
* session's information.
|
||||
@@ -346,6 +442,8 @@ static void etm_event_start(struct perf_event *event, int flags)
|
||||
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
|
||||
goto fail_disable_path;
|
||||
|
||||
/* Save the event_data for this ETM */
|
||||
ctxt->event_data = event_data;
|
||||
out:
|
||||
return;
|
||||
|
||||
@@ -364,13 +462,30 @@ static void etm_event_stop(struct perf_event *event, int mode)
|
||||
int cpu = smp_processor_id();
|
||||
unsigned long size;
|
||||
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
|
||||
struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
|
||||
struct etm_event_data *event_data = perf_get_aux(handle);
|
||||
struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
|
||||
struct perf_output_handle *handle = &ctxt->handle;
|
||||
struct etm_event_data *event_data;
|
||||
struct list_head *path;
|
||||
|
||||
/*
|
||||
* If we still have access to the event_data via handle,
|
||||
* confirm that we haven't messed up the tracking.
|
||||
*/
|
||||
if (handle->event &&
|
||||
WARN_ON(perf_get_aux(handle) != ctxt->event_data))
|
||||
return;
|
||||
|
||||
event_data = ctxt->event_data;
|
||||
/* Clear the event_data as this ETM is stopping the trace. */
|
||||
ctxt->event_data = NULL;
|
||||
|
||||
if (event->hw.state == PERF_HES_STOPPED)
|
||||
return;
|
||||
|
||||
/* We must have a valid event_data for a running event */
|
||||
if (WARN_ON(!event_data))
|
||||
return;
|
||||
|
||||
if (!csdev)
|
||||
return;
|
||||
|
||||
@@ -388,7 +503,13 @@ static void etm_event_stop(struct perf_event *event, int mode)
|
||||
/* tell the core */
|
||||
event->hw.state = PERF_HES_STOPPED;
|
||||
|
||||
if (mode & PERF_EF_UPDATE) {
|
||||
/*
|
||||
* If the handle is not bound to an event anymore
|
||||
* (e.g, the sink driver was unable to restart the
|
||||
* handle due to lack of buffer space), we don't
|
||||
* have to do anything here.
|
||||
*/
|
||||
if (handle->event && (mode & PERF_EF_UPDATE)) {
|
||||
if (WARN_ON_ONCE(handle->event != event))
|
||||
return;
|
||||
|
||||
|
@@ -358,10 +358,11 @@ static int etm_enable_hw(struct etm_drvdata *drvdata)
|
||||
int i, rc;
|
||||
u32 etmcr;
|
||||
struct etm_config *config = &drvdata->config;
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
CS_UNLOCK(drvdata->base);
|
||||
|
||||
rc = coresight_claim_device_unlocked(drvdata->base);
|
||||
rc = coresight_claim_device_unlocked(csdev);
|
||||
if (rc)
|
||||
goto done;
|
||||
|
||||
@@ -566,6 +567,7 @@ static void etm_disable_hw(void *info)
|
||||
int i;
|
||||
struct etm_drvdata *drvdata = info;
|
||||
struct etm_config *config = &drvdata->config;
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
CS_UNLOCK(drvdata->base);
|
||||
etm_set_prog(drvdata);
|
||||
@@ -577,7 +579,7 @@ static void etm_disable_hw(void *info)
|
||||
config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
|
||||
|
||||
etm_set_pwrdwn(drvdata);
|
||||
coresight_disclaim_device_unlocked(drvdata->base);
|
||||
coresight_disclaim_device_unlocked(csdev);
|
||||
|
||||
CS_LOCK(drvdata->base);
|
||||
|
||||
@@ -602,7 +604,7 @@ static void etm_disable_perf(struct coresight_device *csdev)
|
||||
* power down the tracer.
|
||||
*/
|
||||
etm_set_pwrdwn(drvdata);
|
||||
coresight_disclaim_device_unlocked(drvdata->base);
|
||||
coresight_disclaim_device_unlocked(csdev);
|
||||
|
||||
CS_LOCK(drvdata->base);
|
||||
}
|
||||
@@ -839,6 +841,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
return PTR_ERR(base);
|
||||
|
||||
drvdata->base = base;
|
||||
desc.access = CSDEV_ACCESS_IOMEM(base);
|
||||
|
||||
spin_lock_init(&drvdata->spinlock);
|
||||
|
||||
@@ -909,7 +912,7 @@ static void clear_etmdrvdata(void *info)
|
||||
etmdrvdata[cpu] = NULL;
|
||||
}
|
||||
|
||||
static int etm_remove(struct amba_device *adev)
|
||||
static void etm_remove(struct amba_device *adev)
|
||||
{
|
||||
struct etm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
@@ -932,8 +935,6 @@ static int etm_remove(struct amba_device *adev)
|
||||
cpus_read_unlock();
|
||||
|
||||
coresight_unregister(drvdata->csdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -389,7 +389,7 @@ static ssize_t mode_store(struct device *dev,
|
||||
config->eventctrl1 &= ~BIT(12);
|
||||
|
||||
/* bit[8], Instruction stall bit */
|
||||
if (config->mode & ETM_MODE_ISTALL_EN)
|
||||
if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
|
||||
config->stall_ctrl |= BIT(8);
|
||||
else
|
||||
config->stall_ctrl &= ~BIT(8);
|
||||
@@ -743,7 +743,7 @@ static ssize_t s_exlevel_vinst_show(struct device *dev,
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
||||
struct etmv4_config *config = &drvdata->config;
|
||||
|
||||
val = (config->vinst_ctrl & ETM_EXLEVEL_S_VICTLR_MASK) >> 16;
|
||||
val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT;
|
||||
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
|
||||
}
|
||||
|
||||
@@ -760,10 +760,10 @@ static ssize_t s_exlevel_vinst_store(struct device *dev,
|
||||
|
||||
spin_lock(&drvdata->spinlock);
|
||||
/* clear all EXLEVEL_S bits */
|
||||
config->vinst_ctrl &= ~(ETM_EXLEVEL_S_VICTLR_MASK);
|
||||
config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK);
|
||||
/* enable instruction tracing for corresponding exception level */
|
||||
val &= drvdata->s_ex_level;
|
||||
config->vinst_ctrl |= (val << 16);
|
||||
config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT);
|
||||
spin_unlock(&drvdata->spinlock);
|
||||
return size;
|
||||
}
|
||||
@@ -778,7 +778,7 @@ static ssize_t ns_exlevel_vinst_show(struct device *dev,
|
||||
struct etmv4_config *config = &drvdata->config;
|
||||
|
||||
/* EXLEVEL_NS, bits[23:20] */
|
||||
val = (config->vinst_ctrl & ETM_EXLEVEL_NS_VICTLR_MASK) >> 20;
|
||||
val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT;
|
||||
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
|
||||
}
|
||||
|
||||
@@ -795,10 +795,10 @@ static ssize_t ns_exlevel_vinst_store(struct device *dev,
|
||||
|
||||
spin_lock(&drvdata->spinlock);
|
||||
/* clear EXLEVEL_NS bits */
|
||||
config->vinst_ctrl &= ~(ETM_EXLEVEL_NS_VICTLR_MASK);
|
||||
config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK);
|
||||
/* enable instruction tracing for corresponding exception level */
|
||||
val &= drvdata->ns_ex_level;
|
||||
config->vinst_ctrl |= (val << 20);
|
||||
config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT);
|
||||
spin_unlock(&drvdata->spinlock);
|
||||
return size;
|
||||
}
|
||||
@@ -2319,7 +2319,8 @@ static struct attribute *coresight_etmv4_attrs[] = {
|
||||
};
|
||||
|
||||
struct etmv4_reg {
|
||||
void __iomem *addr;
|
||||
struct coresight_device *csdev;
|
||||
u32 offset;
|
||||
u32 data;
|
||||
};
|
||||
|
||||
@@ -2327,15 +2328,16 @@ static void do_smp_cross_read(void *data)
|
||||
{
|
||||
struct etmv4_reg *reg = data;
|
||||
|
||||
reg->data = readl_relaxed(reg->addr);
|
||||
reg->data = etm4x_relaxed_read32(®->csdev->access, reg->offset);
|
||||
}
|
||||
|
||||
static u32 etmv4_cross_read(const struct device *dev, u32 offset)
|
||||
static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
|
||||
{
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
|
||||
struct etmv4_reg reg;
|
||||
|
||||
reg.addr = drvdata->base + offset;
|
||||
reg.offset = offset;
|
||||
reg.csdev = drvdata->csdev;
|
||||
|
||||
/*
|
||||
* smp cross call ensures the CPU will be powered up before
|
||||
* accessing the ETMv4 trace core registers
|
||||
@@ -2344,72 +2346,133 @@ static u32 etmv4_cross_read(const struct device *dev, u32 offset)
|
||||
return reg.data;
|
||||
}
|
||||
|
||||
#define coresight_etm4x_reg(name, offset) \
|
||||
coresight_simple_reg32(struct etmv4_drvdata, name, offset)
|
||||
static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
|
||||
{
|
||||
struct dev_ext_attribute *eattr;
|
||||
|
||||
#define coresight_etm4x_cross_read(name, offset) \
|
||||
coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read, \
|
||||
name, offset)
|
||||
eattr = container_of(attr, struct dev_ext_attribute, attr);
|
||||
return (u32)(unsigned long)eattr->var;
|
||||
}
|
||||
|
||||
coresight_etm4x_reg(trcpdcr, TRCPDCR);
|
||||
coresight_etm4x_reg(trcpdsr, TRCPDSR);
|
||||
coresight_etm4x_reg(trclsr, TRCLSR);
|
||||
coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
|
||||
coresight_etm4x_reg(trcdevid, TRCDEVID);
|
||||
coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
|
||||
coresight_etm4x_reg(trcpidr0, TRCPIDR0);
|
||||
coresight_etm4x_reg(trcpidr1, TRCPIDR1);
|
||||
coresight_etm4x_reg(trcpidr2, TRCPIDR2);
|
||||
coresight_etm4x_reg(trcpidr3, TRCPIDR3);
|
||||
coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
|
||||
coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
|
||||
coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
|
||||
static ssize_t coresight_etm4x_reg_show(struct device *dev,
|
||||
struct device_attribute *d_attr,
|
||||
char *buf)
|
||||
{
|
||||
u32 val, offset;
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
||||
|
||||
offset = coresight_etm4x_attr_to_offset(d_attr);
|
||||
|
||||
pm_runtime_get_sync(dev->parent);
|
||||
val = etmv4_cross_read(drvdata, offset);
|
||||
pm_runtime_put_sync(dev->parent);
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
|
||||
{
|
||||
switch (offset) {
|
||||
ETM_COMMON_SYSREG_LIST_CASES
|
||||
/*
|
||||
* Common registers to ETE & ETM4x accessible via system
|
||||
* instructions are always implemented.
|
||||
*/
|
||||
return true;
|
||||
|
||||
ETM4x_ONLY_SYSREG_LIST_CASES
|
||||
/*
|
||||
* We only support etm4x and ete. So if the device is not
|
||||
* ETE, it must be ETMv4x.
|
||||
*/
|
||||
return !etm4x_is_ete(drvdata);
|
||||
|
||||
ETM4x_MMAP_LIST_CASES
|
||||
/*
|
||||
* Registers accessible only via memory-mapped registers
|
||||
* must not be accessed via system instructions.
|
||||
* We cannot access the drvdata->csdev here, as this
|
||||
* function is called during the device creation, via
|
||||
* coresight_register() and the csdev is not initialized
|
||||
* until that is done. So rely on the drvdata->base to
|
||||
* detect if we have a memory mapped access.
|
||||
* Also ETE doesn't implement memory mapped access, thus
|
||||
* it is sufficient to check that we are using mmio.
|
||||
*/
|
||||
return !!drvdata->base;
|
||||
|
||||
ETE_ONLY_SYSREG_LIST_CASES
|
||||
return etm4x_is_ete(drvdata);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hide the ETM4x registers that may not be available on the
|
||||
* hardware.
|
||||
* There are certain management registers unavailable via system
|
||||
* instructions. Make those sysfs attributes hidden on such
|
||||
* systems.
|
||||
*/
|
||||
static umode_t
|
||||
coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
|
||||
struct attribute *attr, int unused)
|
||||
{
|
||||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
|
||||
struct device_attribute *d_attr;
|
||||
u32 offset;
|
||||
|
||||
d_attr = container_of(attr, struct device_attribute, attr);
|
||||
offset = coresight_etm4x_attr_to_offset(d_attr);
|
||||
|
||||
if (etm4x_register_implemented(drvdata, offset))
|
||||
return attr->mode;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define coresight_etm4x_reg(name, offset) \
|
||||
&((struct dev_ext_attribute[]) { \
|
||||
{ \
|
||||
__ATTR(name, 0444, coresight_etm4x_reg_show, NULL), \
|
||||
(void *)(unsigned long)offset \
|
||||
} \
|
||||
})[0].attr.attr
|
||||
|
||||
static struct attribute *coresight_etmv4_mgmt_attrs[] = {
|
||||
&dev_attr_trcoslsr.attr,
|
||||
&dev_attr_trcpdcr.attr,
|
||||
&dev_attr_trcpdsr.attr,
|
||||
&dev_attr_trclsr.attr,
|
||||
&dev_attr_trcconfig.attr,
|
||||
&dev_attr_trctraceid.attr,
|
||||
&dev_attr_trcauthstatus.attr,
|
||||
&dev_attr_trcdevid.attr,
|
||||
&dev_attr_trcdevtype.attr,
|
||||
&dev_attr_trcpidr0.attr,
|
||||
&dev_attr_trcpidr1.attr,
|
||||
&dev_attr_trcpidr2.attr,
|
||||
&dev_attr_trcpidr3.attr,
|
||||
coresight_etm4x_reg(trcpdcr, TRCPDCR),
|
||||
coresight_etm4x_reg(trcpdsr, TRCPDSR),
|
||||
coresight_etm4x_reg(trclsr, TRCLSR),
|
||||
coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
|
||||
coresight_etm4x_reg(trcdevid, TRCDEVID),
|
||||
coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
|
||||
coresight_etm4x_reg(trcpidr0, TRCPIDR0),
|
||||
coresight_etm4x_reg(trcpidr1, TRCPIDR1),
|
||||
coresight_etm4x_reg(trcpidr2, TRCPIDR2),
|
||||
coresight_etm4x_reg(trcpidr3, TRCPIDR3),
|
||||
coresight_etm4x_reg(trcoslsr, TRCOSLSR),
|
||||
coresight_etm4x_reg(trcconfig, TRCCONFIGR),
|
||||
coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
|
||||
coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
|
||||
NULL,
|
||||
};
|
||||
|
||||
coresight_etm4x_cross_read(trcidr0, TRCIDR0);
|
||||
coresight_etm4x_cross_read(trcidr1, TRCIDR1);
|
||||
coresight_etm4x_cross_read(trcidr2, TRCIDR2);
|
||||
coresight_etm4x_cross_read(trcidr3, TRCIDR3);
|
||||
coresight_etm4x_cross_read(trcidr4, TRCIDR4);
|
||||
coresight_etm4x_cross_read(trcidr5, TRCIDR5);
|
||||
/* trcidr[6,7] are reserved */
|
||||
coresight_etm4x_cross_read(trcidr8, TRCIDR8);
|
||||
coresight_etm4x_cross_read(trcidr9, TRCIDR9);
|
||||
coresight_etm4x_cross_read(trcidr10, TRCIDR10);
|
||||
coresight_etm4x_cross_read(trcidr11, TRCIDR11);
|
||||
coresight_etm4x_cross_read(trcidr12, TRCIDR12);
|
||||
coresight_etm4x_cross_read(trcidr13, TRCIDR13);
|
||||
|
||||
static struct attribute *coresight_etmv4_trcidr_attrs[] = {
|
||||
&dev_attr_trcidr0.attr,
|
||||
&dev_attr_trcidr1.attr,
|
||||
&dev_attr_trcidr2.attr,
|
||||
&dev_attr_trcidr3.attr,
|
||||
&dev_attr_trcidr4.attr,
|
||||
&dev_attr_trcidr5.attr,
|
||||
coresight_etm4x_reg(trcidr0, TRCIDR0),
|
||||
coresight_etm4x_reg(trcidr1, TRCIDR1),
|
||||
coresight_etm4x_reg(trcidr2, TRCIDR2),
|
||||
coresight_etm4x_reg(trcidr3, TRCIDR3),
|
||||
coresight_etm4x_reg(trcidr4, TRCIDR4),
|
||||
coresight_etm4x_reg(trcidr5, TRCIDR5),
|
||||
/* trcidr[6,7] are reserved */
|
||||
&dev_attr_trcidr8.attr,
|
||||
&dev_attr_trcidr9.attr,
|
||||
&dev_attr_trcidr10.attr,
|
||||
&dev_attr_trcidr11.attr,
|
||||
&dev_attr_trcidr12.attr,
|
||||
&dev_attr_trcidr13.attr,
|
||||
coresight_etm4x_reg(trcidr8, TRCIDR8),
|
||||
coresight_etm4x_reg(trcidr9, TRCIDR9),
|
||||
coresight_etm4x_reg(trcidr10, TRCIDR10),
|
||||
coresight_etm4x_reg(trcidr11, TRCIDR11),
|
||||
coresight_etm4x_reg(trcidr12, TRCIDR12),
|
||||
coresight_etm4x_reg(trcidr13, TRCIDR13),
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -2418,6 +2481,7 @@ static const struct attribute_group coresight_etmv4_group = {
|
||||
};
|
||||
|
||||
static const struct attribute_group coresight_etmv4_mgmt_group = {
|
||||
.is_visible = coresight_etm4x_attr_reg_implemented,
|
||||
.attrs = coresight_etmv4_mgmt_attrs,
|
||||
.name = "mgmt",
|
||||
};
|
||||
|
@@ -8,6 +8,7 @@
|
||||
|
||||
#include <asm/local.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include "coresight-priv.h"
|
||||
|
||||
/*
|
||||
@@ -28,6 +29,7 @@
|
||||
#define TRCAUXCTLR 0x018
|
||||
#define TRCEVENTCTL0R 0x020
|
||||
#define TRCEVENTCTL1R 0x024
|
||||
#define TRCRSR 0x028
|
||||
#define TRCSTALLCTLR 0x02C
|
||||
#define TRCTSCTLR 0x030
|
||||
#define TRCSYNCPR 0x034
|
||||
@@ -44,13 +46,14 @@
|
||||
#define TRCVDSACCTLR 0x0A4
|
||||
#define TRCVDARCCTLR 0x0A8
|
||||
/* Derived resources registers */
|
||||
#define TRCSEQEVRn(n) (0x100 + (n * 4))
|
||||
#define TRCSEQEVRn(n) (0x100 + (n * 4)) /* n = 0-2 */
|
||||
#define TRCSEQRSTEVR 0x118
|
||||
#define TRCSEQSTR 0x11C
|
||||
#define TRCEXTINSELR 0x120
|
||||
#define TRCCNTRLDVRn(n) (0x140 + (n * 4))
|
||||
#define TRCCNTCTLRn(n) (0x150 + (n * 4))
|
||||
#define TRCCNTVRn(n) (0x160 + (n * 4))
|
||||
#define TRCEXTINSELRn(n) (0x120 + (n * 4)) /* n = 0-3 */
|
||||
#define TRCCNTRLDVRn(n) (0x140 + (n * 4)) /* n = 0-3 */
|
||||
#define TRCCNTCTLRn(n) (0x150 + (n * 4)) /* n = 0-3 */
|
||||
#define TRCCNTVRn(n) (0x160 + (n * 4)) /* n = 0-3 */
|
||||
/* ID registers */
|
||||
#define TRCIDR8 0x180
|
||||
#define TRCIDR9 0x184
|
||||
@@ -59,7 +62,7 @@
|
||||
#define TRCIDR12 0x190
|
||||
#define TRCIDR13 0x194
|
||||
#define TRCIMSPEC0 0x1C0
|
||||
#define TRCIMSPECn(n) (0x1C0 + (n * 4))
|
||||
#define TRCIMSPECn(n) (0x1C0 + (n * 4)) /* n = 1-7 */
|
||||
#define TRCIDR0 0x1E0
|
||||
#define TRCIDR1 0x1E4
|
||||
#define TRCIDR2 0x1E8
|
||||
@@ -68,9 +71,12 @@
|
||||
#define TRCIDR5 0x1F4
|
||||
#define TRCIDR6 0x1F8
|
||||
#define TRCIDR7 0x1FC
|
||||
/* Resource selection registers */
|
||||
/*
|
||||
* Resource selection registers, n = 2-31.
|
||||
* First pair (regs 0, 1) is always present and is reserved.
|
||||
*/
|
||||
#define TRCRSCTLRn(n) (0x200 + (n * 4))
|
||||
/* Single-shot comparator registers */
|
||||
/* Single-shot comparator registers, n = 0-7 */
|
||||
#define TRCSSCCRn(n) (0x280 + (n * 4))
|
||||
#define TRCSSCSRn(n) (0x2A0 + (n * 4))
|
||||
#define TRCSSPCICRn(n) (0x2C0 + (n * 4))
|
||||
@@ -80,11 +86,13 @@
|
||||
#define TRCPDCR 0x310
|
||||
#define TRCPDSR 0x314
|
||||
/* Trace registers (0x318-0xEFC) */
|
||||
/* Comparator registers */
|
||||
/* Address Comparator registers n = 0-15 */
|
||||
#define TRCACVRn(n) (0x400 + (n * 8))
|
||||
#define TRCACATRn(n) (0x480 + (n * 8))
|
||||
/* Data Value Comparator Value registers, n = 0-7 */
|
||||
#define TRCDVCVRn(n) (0x500 + (n * 16))
|
||||
#define TRCDVCMRn(n) (0x580 + (n * 16))
|
||||
/* ContextID/Virtual ContextID comparators, n = 0-7 */
|
||||
#define TRCCIDCVRn(n) (0x600 + (n * 8))
|
||||
#define TRCVMIDCVRn(n) (0x640 + (n * 8))
|
||||
#define TRCCIDCCTLR0 0x680
|
||||
@@ -120,6 +128,368 @@
|
||||
#define TRCCIDR2 0xFF8
|
||||
#define TRCCIDR3 0xFFC
|
||||
|
||||
#define TRCRSR_TA BIT(12)
|
||||
|
||||
/*
|
||||
* System instructions to access ETM registers.
|
||||
* See ETMv4.4 spec ARM IHI0064F section 4.3.6 System instructions
|
||||
*/
|
||||
#define ETM4x_OFFSET_TO_REG(x) ((x) >> 2)
|
||||
|
||||
#define ETM4x_CRn(n) (((n) >> 7) & 0x7)
|
||||
#define ETM4x_Op2(n) (((n) >> 4) & 0x7)
|
||||
#define ETM4x_CRm(n) ((n) & 0xf)
|
||||
|
||||
#include <asm/sysreg.h>
|
||||
#define ETM4x_REG_NUM_TO_SYSREG(n) \
|
||||
sys_reg(2, 1, ETM4x_CRn(n), ETM4x_CRm(n), ETM4x_Op2(n))
|
||||
|
||||
#define READ_ETM4x_REG(reg) \
|
||||
read_sysreg_s(ETM4x_REG_NUM_TO_SYSREG((reg)))
|
||||
#define WRITE_ETM4x_REG(val, reg) \
|
||||
write_sysreg_s(val, ETM4x_REG_NUM_TO_SYSREG((reg)))
|
||||
|
||||
#define read_etm4x_sysreg_const_offset(offset) \
|
||||
READ_ETM4x_REG(ETM4x_OFFSET_TO_REG(offset))
|
||||
|
||||
#define write_etm4x_sysreg_const_offset(val, offset) \
|
||||
WRITE_ETM4x_REG(val, ETM4x_OFFSET_TO_REG(offset))
|
||||
|
||||
#define CASE_READ(res, x) \
|
||||
case (x): { (res) = read_etm4x_sysreg_const_offset((x)); break; }
|
||||
|
||||
#define CASE_WRITE(val, x) \
|
||||
case (x): { write_etm4x_sysreg_const_offset((val), (x)); break; }
|
||||
|
||||
#define CASE_NOP(__unused, x) \
|
||||
case (x): /* fall through */
|
||||
|
||||
#define ETE_ONLY_SYSREG_LIST(op, val) \
|
||||
CASE_##op((val), TRCRSR) \
|
||||
CASE_##op((val), TRCEXTINSELRn(1)) \
|
||||
CASE_##op((val), TRCEXTINSELRn(2)) \
|
||||
CASE_##op((val), TRCEXTINSELRn(3))
|
||||
|
||||
/* List of registers accessible via System instructions */
|
||||
#define ETM4x_ONLY_SYSREG_LIST(op, val) \
|
||||
CASE_##op((val), TRCPROCSELR) \
|
||||
CASE_##op((val), TRCVDCTLR) \
|
||||
CASE_##op((val), TRCVDSACCTLR) \
|
||||
CASE_##op((val), TRCVDARCCTLR) \
|
||||
CASE_##op((val), TRCOSLAR)
|
||||
|
||||
#define ETM_COMMON_SYSREG_LIST(op, val) \
|
||||
CASE_##op((val), TRCPRGCTLR) \
|
||||
CASE_##op((val), TRCSTATR) \
|
||||
CASE_##op((val), TRCCONFIGR) \
|
||||
CASE_##op((val), TRCAUXCTLR) \
|
||||
CASE_##op((val), TRCEVENTCTL0R) \
|
||||
CASE_##op((val), TRCEVENTCTL1R) \
|
||||
CASE_##op((val), TRCSTALLCTLR) \
|
||||
CASE_##op((val), TRCTSCTLR) \
|
||||
CASE_##op((val), TRCSYNCPR) \
|
||||
CASE_##op((val), TRCCCCTLR) \
|
||||
CASE_##op((val), TRCBBCTLR) \
|
||||
CASE_##op((val), TRCTRACEIDR) \
|
||||
CASE_##op((val), TRCQCTLR) \
|
||||
CASE_##op((val), TRCVICTLR) \
|
||||
CASE_##op((val), TRCVIIECTLR) \
|
||||
CASE_##op((val), TRCVISSCTLR) \
|
||||
CASE_##op((val), TRCVIPCSSCTLR) \
|
||||
CASE_##op((val), TRCSEQEVRn(0)) \
|
||||
CASE_##op((val), TRCSEQEVRn(1)) \
|
||||
CASE_##op((val), TRCSEQEVRn(2)) \
|
||||
CASE_##op((val), TRCSEQRSTEVR) \
|
||||
CASE_##op((val), TRCSEQSTR) \
|
||||
CASE_##op((val), TRCEXTINSELR) \
|
||||
CASE_##op((val), TRCCNTRLDVRn(0)) \
|
||||
CASE_##op((val), TRCCNTRLDVRn(1)) \
|
||||
CASE_##op((val), TRCCNTRLDVRn(2)) \
|
||||
CASE_##op((val), TRCCNTRLDVRn(3)) \
|
||||
CASE_##op((val), TRCCNTCTLRn(0)) \
|
||||
CASE_##op((val), TRCCNTCTLRn(1)) \
|
||||
CASE_##op((val), TRCCNTCTLRn(2)) \
|
||||
CASE_##op((val), TRCCNTCTLRn(3)) \
|
||||
CASE_##op((val), TRCCNTVRn(0)) \
|
||||
CASE_##op((val), TRCCNTVRn(1)) \
|
||||
CASE_##op((val), TRCCNTVRn(2)) \
|
||||
CASE_##op((val), TRCCNTVRn(3)) \
|
||||
CASE_##op((val), TRCIDR8) \
|
||||
CASE_##op((val), TRCIDR9) \
|
||||
CASE_##op((val), TRCIDR10) \
|
||||
CASE_##op((val), TRCIDR11) \
|
||||
CASE_##op((val), TRCIDR12) \
|
||||
CASE_##op((val), TRCIDR13) \
|
||||
CASE_##op((val), TRCIMSPECn(0)) \
|
||||
CASE_##op((val), TRCIMSPECn(1)) \
|
||||
CASE_##op((val), TRCIMSPECn(2)) \
|
||||
CASE_##op((val), TRCIMSPECn(3)) \
|
||||
CASE_##op((val), TRCIMSPECn(4)) \
|
||||
CASE_##op((val), TRCIMSPECn(5)) \
|
||||
CASE_##op((val), TRCIMSPECn(6)) \
|
||||
CASE_##op((val), TRCIMSPECn(7)) \
|
||||
CASE_##op((val), TRCIDR0) \
|
||||
CASE_##op((val), TRCIDR1) \
|
||||
CASE_##op((val), TRCIDR2) \
|
||||
CASE_##op((val), TRCIDR3) \
|
||||
CASE_##op((val), TRCIDR4) \
|
||||
CASE_##op((val), TRCIDR5) \
|
||||
CASE_##op((val), TRCIDR6) \
|
||||
CASE_##op((val), TRCIDR7) \
|
||||
CASE_##op((val), TRCRSCTLRn(2)) \
|
||||
CASE_##op((val), TRCRSCTLRn(3)) \
|
||||
CASE_##op((val), TRCRSCTLRn(4)) \
|
||||
CASE_##op((val), TRCRSCTLRn(5)) \
|
||||
CASE_##op((val), TRCRSCTLRn(6)) \
|
||||
CASE_##op((val), TRCRSCTLRn(7)) \
|
||||
CASE_##op((val), TRCRSCTLRn(8)) \
|
||||
CASE_##op((val), TRCRSCTLRn(9)) \
|
||||
CASE_##op((val), TRCRSCTLRn(10)) \
|
||||
CASE_##op((val), TRCRSCTLRn(11)) \
|
||||
CASE_##op((val), TRCRSCTLRn(12)) \
|
||||
CASE_##op((val), TRCRSCTLRn(13)) \
|
||||
CASE_##op((val), TRCRSCTLRn(14)) \
|
||||
CASE_##op((val), TRCRSCTLRn(15)) \
|
||||
CASE_##op((val), TRCRSCTLRn(16)) \
|
||||
CASE_##op((val), TRCRSCTLRn(17)) \
|
||||
CASE_##op((val), TRCRSCTLRn(18)) \
|
||||
CASE_##op((val), TRCRSCTLRn(19)) \
|
||||
CASE_##op((val), TRCRSCTLRn(20)) \
|
||||
CASE_##op((val), TRCRSCTLRn(21)) \
|
||||
CASE_##op((val), TRCRSCTLRn(22)) \
|
||||
CASE_##op((val), TRCRSCTLRn(23)) \
|
||||
CASE_##op((val), TRCRSCTLRn(24)) \
|
||||
CASE_##op((val), TRCRSCTLRn(25)) \
|
||||
CASE_##op((val), TRCRSCTLRn(26)) \
|
||||
CASE_##op((val), TRCRSCTLRn(27)) \
|
||||
CASE_##op((val), TRCRSCTLRn(28)) \
|
||||
CASE_##op((val), TRCRSCTLRn(29)) \
|
||||
CASE_##op((val), TRCRSCTLRn(30)) \
|
||||
CASE_##op((val), TRCRSCTLRn(31)) \
|
||||
CASE_##op((val), TRCSSCCRn(0)) \
|
||||
CASE_##op((val), TRCSSCCRn(1)) \
|
||||
CASE_##op((val), TRCSSCCRn(2)) \
|
||||
CASE_##op((val), TRCSSCCRn(3)) \
|
||||
CASE_##op((val), TRCSSCCRn(4)) \
|
||||
CASE_##op((val), TRCSSCCRn(5)) \
|
||||
CASE_##op((val), TRCSSCCRn(6)) \
|
||||
CASE_##op((val), TRCSSCCRn(7)) \
|
||||
CASE_##op((val), TRCSSCSRn(0)) \
|
||||
CASE_##op((val), TRCSSCSRn(1)) \
|
||||
CASE_##op((val), TRCSSCSRn(2)) \
|
||||
CASE_##op((val), TRCSSCSRn(3)) \
|
||||
CASE_##op((val), TRCSSCSRn(4)) \
|
||||
CASE_##op((val), TRCSSCSRn(5)) \
|
||||
CASE_##op((val), TRCSSCSRn(6)) \
|
||||
CASE_##op((val), TRCSSCSRn(7)) \
|
||||
CASE_##op((val), TRCSSPCICRn(0)) \
|
||||
CASE_##op((val), TRCSSPCICRn(1)) \
|
||||
CASE_##op((val), TRCSSPCICRn(2)) \
|
||||
CASE_##op((val), TRCSSPCICRn(3)) \
|
||||
CASE_##op((val), TRCSSPCICRn(4)) \
|
||||
CASE_##op((val), TRCSSPCICRn(5)) \
|
||||
CASE_##op((val), TRCSSPCICRn(6)) \
|
||||
CASE_##op((val), TRCSSPCICRn(7)) \
|
||||
CASE_##op((val), TRCOSLSR) \
|
||||
CASE_##op((val), TRCACVRn(0)) \
|
||||
CASE_##op((val), TRCACVRn(1)) \
|
||||
CASE_##op((val), TRCACVRn(2)) \
|
||||
CASE_##op((val), TRCACVRn(3)) \
|
||||
CASE_##op((val), TRCACVRn(4)) \
|
||||
CASE_##op((val), TRCACVRn(5)) \
|
||||
CASE_##op((val), TRCACVRn(6)) \
|
||||
CASE_##op((val), TRCACVRn(7)) \
|
||||
CASE_##op((val), TRCACVRn(8)) \
|
||||
CASE_##op((val), TRCACVRn(9)) \
|
||||
CASE_##op((val), TRCACVRn(10)) \
|
||||
CASE_##op((val), TRCACVRn(11)) \
|
||||
CASE_##op((val), TRCACVRn(12)) \
|
||||
CASE_##op((val), TRCACVRn(13)) \
|
||||
CASE_##op((val), TRCACVRn(14)) \
|
||||
CASE_##op((val), TRCACVRn(15)) \
|
||||
CASE_##op((val), TRCACATRn(0)) \
|
||||
CASE_##op((val), TRCACATRn(1)) \
|
||||
CASE_##op((val), TRCACATRn(2)) \
|
||||
CASE_##op((val), TRCACATRn(3)) \
|
||||
CASE_##op((val), TRCACATRn(4)) \
|
||||
CASE_##op((val), TRCACATRn(5)) \
|
||||
CASE_##op((val), TRCACATRn(6)) \
|
||||
CASE_##op((val), TRCACATRn(7)) \
|
||||
CASE_##op((val), TRCACATRn(8)) \
|
||||
CASE_##op((val), TRCACATRn(9)) \
|
||||
CASE_##op((val), TRCACATRn(10)) \
|
||||
CASE_##op((val), TRCACATRn(11)) \
|
||||
CASE_##op((val), TRCACATRn(12)) \
|
||||
CASE_##op((val), TRCACATRn(13)) \
|
||||
CASE_##op((val), TRCACATRn(14)) \
|
||||
CASE_##op((val), TRCACATRn(15)) \
|
||||
CASE_##op((val), TRCDVCVRn(0)) \
|
||||
CASE_##op((val), TRCDVCVRn(1)) \
|
||||
CASE_##op((val), TRCDVCVRn(2)) \
|
||||
CASE_##op((val), TRCDVCVRn(3)) \
|
||||
CASE_##op((val), TRCDVCVRn(4)) \
|
||||
CASE_##op((val), TRCDVCVRn(5)) \
|
||||
CASE_##op((val), TRCDVCVRn(6)) \
|
||||
CASE_##op((val), TRCDVCVRn(7)) \
|
||||
CASE_##op((val), TRCDVCMRn(0)) \
|
||||
CASE_##op((val), TRCDVCMRn(1)) \
|
||||
CASE_##op((val), TRCDVCMRn(2)) \
|
||||
CASE_##op((val), TRCDVCMRn(3)) \
|
||||
CASE_##op((val), TRCDVCMRn(4)) \
|
||||
CASE_##op((val), TRCDVCMRn(5)) \
|
||||
CASE_##op((val), TRCDVCMRn(6)) \
|
||||
CASE_##op((val), TRCDVCMRn(7)) \
|
||||
CASE_##op((val), TRCCIDCVRn(0)) \
|
||||
CASE_##op((val), TRCCIDCVRn(1)) \
|
||||
CASE_##op((val), TRCCIDCVRn(2)) \
|
||||
CASE_##op((val), TRCCIDCVRn(3)) \
|
||||
CASE_##op((val), TRCCIDCVRn(4)) \
|
||||
CASE_##op((val), TRCCIDCVRn(5)) \
|
||||
CASE_##op((val), TRCCIDCVRn(6)) \
|
||||
CASE_##op((val), TRCCIDCVRn(7)) \
|
||||
CASE_##op((val), TRCVMIDCVRn(0)) \
|
||||
CASE_##op((val), TRCVMIDCVRn(1)) \
|
||||
CASE_##op((val), TRCVMIDCVRn(2)) \
|
||||
CASE_##op((val), TRCVMIDCVRn(3)) \
|
||||
CASE_##op((val), TRCVMIDCVRn(4)) \
|
||||
CASE_##op((val), TRCVMIDCVRn(5)) \
|
||||
CASE_##op((val), TRCVMIDCVRn(6)) \
|
||||
CASE_##op((val), TRCVMIDCVRn(7)) \
|
||||
CASE_##op((val), TRCCIDCCTLR0) \
|
||||
CASE_##op((val), TRCCIDCCTLR1) \
|
||||
CASE_##op((val), TRCVMIDCCTLR0) \
|
||||
CASE_##op((val), TRCVMIDCCTLR1) \
|
||||
CASE_##op((val), TRCCLAIMSET) \
|
||||
CASE_##op((val), TRCCLAIMCLR) \
|
||||
CASE_##op((val), TRCAUTHSTATUS) \
|
||||
CASE_##op((val), TRCDEVARCH) \
|
||||
CASE_##op((val), TRCDEVID)
|
||||
|
||||
/* List of registers only accessible via memory-mapped interface */
|
||||
#define ETM_MMAP_LIST(op, val) \
|
||||
CASE_##op((val), TRCDEVTYPE) \
|
||||
CASE_##op((val), TRCPDCR) \
|
||||
CASE_##op((val), TRCPDSR) \
|
||||
CASE_##op((val), TRCDEVAFF0) \
|
||||
CASE_##op((val), TRCDEVAFF1) \
|
||||
CASE_##op((val), TRCLAR) \
|
||||
CASE_##op((val), TRCLSR) \
|
||||
CASE_##op((val), TRCITCTRL) \
|
||||
CASE_##op((val), TRCPIDR4) \
|
||||
CASE_##op((val), TRCPIDR0) \
|
||||
CASE_##op((val), TRCPIDR1) \
|
||||
CASE_##op((val), TRCPIDR2) \
|
||||
CASE_##op((val), TRCPIDR3)
|
||||
|
||||
#define ETM4x_READ_SYSREG_CASES(res) \
|
||||
ETM_COMMON_SYSREG_LIST(READ, (res)) \
|
||||
ETM4x_ONLY_SYSREG_LIST(READ, (res))
|
||||
|
||||
#define ETM4x_WRITE_SYSREG_CASES(val) \
|
||||
ETM_COMMON_SYSREG_LIST(WRITE, (val)) \
|
||||
ETM4x_ONLY_SYSREG_LIST(WRITE, (val))
|
||||
|
||||
#define ETM_COMMON_SYSREG_LIST_CASES \
|
||||
ETM_COMMON_SYSREG_LIST(NOP, __unused)
|
||||
|
||||
#define ETM4x_ONLY_SYSREG_LIST_CASES \
|
||||
ETM4x_ONLY_SYSREG_LIST(NOP, __unused)
|
||||
|
||||
#define ETM4x_SYSREG_LIST_CASES \
|
||||
ETM_COMMON_SYSREG_LIST_CASES \
|
||||
ETM4x_ONLY_SYSREG_LIST(NOP, __unused)
|
||||
|
||||
#define ETM4x_MMAP_LIST_CASES ETM_MMAP_LIST(NOP, __unused)
|
||||
|
||||
/* ETE only supports system register access */
|
||||
#define ETE_READ_CASES(res) \
|
||||
ETM_COMMON_SYSREG_LIST(READ, (res)) \
|
||||
ETE_ONLY_SYSREG_LIST(READ, (res))
|
||||
|
||||
#define ETE_WRITE_CASES(val) \
|
||||
ETM_COMMON_SYSREG_LIST(WRITE, (val)) \
|
||||
ETE_ONLY_SYSREG_LIST(WRITE, (val))
|
||||
|
||||
#define ETE_ONLY_SYSREG_LIST_CASES \
|
||||
ETE_ONLY_SYSREG_LIST(NOP, __unused)
|
||||
|
||||
#define read_etm4x_sysreg_offset(offset, _64bit) \
|
||||
({ \
|
||||
u64 __val; \
|
||||
\
|
||||
if (__builtin_constant_p((offset))) \
|
||||
__val = read_etm4x_sysreg_const_offset((offset)); \
|
||||
else \
|
||||
__val = etm4x_sysreg_read((offset), true, (_64bit)); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define write_etm4x_sysreg_offset(val, offset, _64bit) \
|
||||
do { \
|
||||
if (__builtin_constant_p((offset))) \
|
||||
write_etm4x_sysreg_const_offset((val), \
|
||||
(offset)); \
|
||||
else \
|
||||
etm4x_sysreg_write((val), (offset), true, \
|
||||
(_64bit)); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define etm4x_relaxed_read32(csa, offset) \
|
||||
((u32)((csa)->io_mem ? \
|
||||
readl_relaxed((csa)->base + (offset)) : \
|
||||
read_etm4x_sysreg_offset((offset), false)))
|
||||
|
||||
#define etm4x_relaxed_read64(csa, offset) \
|
||||
((u64)((csa)->io_mem ? \
|
||||
readq_relaxed((csa)->base + (offset)) : \
|
||||
read_etm4x_sysreg_offset((offset), true)))
|
||||
|
||||
#define etm4x_read32(csa, offset) \
|
||||
({ \
|
||||
u32 __val = etm4x_relaxed_read32((csa), (offset)); \
|
||||
__iormb(__val); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define etm4x_read64(csa, offset) \
|
||||
({ \
|
||||
u64 __val = etm4x_relaxed_read64((csa), (offset)); \
|
||||
__iormb(__val); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define etm4x_relaxed_write32(csa, val, offset) \
|
||||
do { \
|
||||
if ((csa)->io_mem) \
|
||||
writel_relaxed((val), (csa)->base + (offset)); \
|
||||
else \
|
||||
write_etm4x_sysreg_offset((val), (offset), \
|
||||
false); \
|
||||
} while (0)
|
||||
|
||||
#define etm4x_relaxed_write64(csa, val, offset) \
|
||||
do { \
|
||||
if ((csa)->io_mem) \
|
||||
writeq_relaxed((val), (csa)->base + (offset)); \
|
||||
else \
|
||||
write_etm4x_sysreg_offset((val), (offset), \
|
||||
true); \
|
||||
} while (0)
|
||||
|
||||
#define etm4x_write32(csa, val, offset) \
|
||||
do { \
|
||||
__iowmb(); \
|
||||
etm4x_relaxed_write32((csa), (val), (offset)); \
|
||||
} while (0)
|
||||
|
||||
#define etm4x_write64(csa, val, offset) \
|
||||
do { \
|
||||
__iowmb(); \
|
||||
etm4x_relaxed_write64((csa), (val), (offset)); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/* ETMv4 resources */
|
||||
#define ETM_MAX_NR_PE 8
|
||||
#define ETMv4_MAX_CNTR 4
|
||||
@@ -136,7 +506,6 @@
|
||||
#define ETM_MAX_RES_SEL 32
|
||||
#define ETM_MAX_SS_CMP 8
|
||||
|
||||
#define ETM_ARCH_V4 0x40
|
||||
#define ETMv4_SYNC_MASK 0x1F
|
||||
#define ETM_CYC_THRESHOLD_MASK 0xFFF
|
||||
#define ETM_CYC_THRESHOLD_DEFAULT 0x100
|
||||
@@ -174,34 +543,174 @@
|
||||
ETM_MODE_EXCL_KERN | \
|
||||
ETM_MODE_EXCL_USER)
|
||||
|
||||
/*
|
||||
* TRCOSLSR.OSLM advertises the OS Lock model.
|
||||
* OSLM[2:0] = TRCOSLSR[4:3,0]
|
||||
*
|
||||
* 0b000 - Trace OS Lock is not implemented.
|
||||
* 0b010 - Trace OS Lock is implemented.
|
||||
* 0b100 - Trace OS Lock is not implemented, unit is controlled by PE OS Lock.
|
||||
*/
|
||||
#define ETM_OSLOCK_NI 0b000
|
||||
#define ETM_OSLOCK_PRESENT 0b010
|
||||
#define ETM_OSLOCK_PE 0b100
|
||||
|
||||
#define ETM_OSLSR_OSLM(oslsr) ((((oslsr) & GENMASK(4, 3)) >> 2) | (oslsr & 0x1))
|
||||
|
||||
/*
|
||||
* TRCDEVARCH Bit field definitions
|
||||
* Bits[31:21] - ARCHITECT = Always Arm Ltd.
|
||||
* * Bits[31:28] = 0x4
|
||||
* * Bits[27:21] = 0b0111011
|
||||
* Bit[20] - PRESENT, Indicates the presence of this register.
|
||||
*
|
||||
* Bit[19:16] - REVISION, Revision of the architecture.
|
||||
*
|
||||
* Bit[15:0] - ARCHID, Identifies this component as an ETM
|
||||
* * Bits[15:12] - architecture version of ETM
|
||||
* * = 4 for ETMv4
|
||||
* * Bits[11:0] = 0xA13, architecture part number for ETM.
|
||||
*/
|
||||
#define ETM_DEVARCH_ARCHITECT_MASK GENMASK(31, 21)
|
||||
#define ETM_DEVARCH_ARCHITECT_ARM ((0x4 << 28) | (0b0111011 << 21))
|
||||
#define ETM_DEVARCH_PRESENT BIT(20)
|
||||
#define ETM_DEVARCH_REVISION_SHIFT 16
|
||||
#define ETM_DEVARCH_REVISION_MASK GENMASK(19, 16)
|
||||
#define ETM_DEVARCH_REVISION(x) \
|
||||
(((x) & ETM_DEVARCH_REVISION_MASK) >> ETM_DEVARCH_REVISION_SHIFT)
|
||||
#define ETM_DEVARCH_ARCHID_MASK GENMASK(15, 0)
|
||||
#define ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT 12
|
||||
#define ETM_DEVARCH_ARCHID_ARCH_VER_MASK GENMASK(15, 12)
|
||||
#define ETM_DEVARCH_ARCHID_ARCH_VER(x) \
|
||||
(((x) & ETM_DEVARCH_ARCHID_ARCH_VER_MASK) >> ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT)
|
||||
|
||||
#define ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(ver) \
|
||||
(((ver) << ETM_DEVARCH_ARCHID_ARCH_VER_SHIFT) & ETM_DEVARCH_ARCHID_ARCH_VER_MASK)
|
||||
|
||||
#define ETM_DEVARCH_ARCHID_ARCH_PART(x) ((x) & 0xfffUL)
|
||||
|
||||
#define ETM_DEVARCH_MAKE_ARCHID(major) \
|
||||
((ETM_DEVARCH_MAKE_ARCHID_ARCH_VER(major)) | ETM_DEVARCH_ARCHID_ARCH_PART(0xA13))
|
||||
|
||||
#define ETM_DEVARCH_ARCHID_ETMv4x ETM_DEVARCH_MAKE_ARCHID(0x4)
|
||||
#define ETM_DEVARCH_ARCHID_ETE ETM_DEVARCH_MAKE_ARCHID(0x5)
|
||||
|
||||
#define ETM_DEVARCH_ID_MASK \
|
||||
(ETM_DEVARCH_ARCHITECT_MASK | ETM_DEVARCH_ARCHID_MASK | ETM_DEVARCH_PRESENT)
|
||||
#define ETM_DEVARCH_ETMv4x_ARCH \
|
||||
(ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETMv4x | ETM_DEVARCH_PRESENT)
|
||||
#define ETM_DEVARCH_ETE_ARCH \
|
||||
(ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETE | ETM_DEVARCH_PRESENT)
|
||||
|
||||
#define TRCSTATR_IDLE_BIT 0
|
||||
#define TRCSTATR_PMSTABLE_BIT 1
|
||||
#define ETM_DEFAULT_ADDR_COMP 0
|
||||
|
||||
#define TRCSSCSRn_PC BIT(3)
|
||||
|
||||
/* PowerDown Control Register bits */
|
||||
#define TRCPDCR_PU BIT(3)
|
||||
|
||||
/* secure state access levels - TRCACATRn */
|
||||
#define ETM_EXLEVEL_S_APP BIT(8)
|
||||
#define ETM_EXLEVEL_S_OS BIT(9)
|
||||
#define ETM_EXLEVEL_S_HYP BIT(10)
|
||||
#define ETM_EXLEVEL_S_MON BIT(11)
|
||||
/* non-secure state access levels - TRCACATRn */
|
||||
#define ETM_EXLEVEL_NS_APP BIT(12)
|
||||
#define ETM_EXLEVEL_NS_OS BIT(13)
|
||||
#define ETM_EXLEVEL_NS_HYP BIT(14)
|
||||
#define ETM_EXLEVEL_NS_NA BIT(15)
|
||||
#define TRCACATR_EXLEVEL_SHIFT 8
|
||||
|
||||
/* access level control in TRCVICTLR - same bits as TRCACATRn but shifted */
|
||||
#define ETM_EXLEVEL_LSHIFT_TRCVICTLR 8
|
||||
/*
|
||||
* Exception level mask for Secure and Non-Secure ELs.
|
||||
* ETM defines the bits for EL control (e.g, TRVICTLR, TRCACTRn).
|
||||
* The Secure and Non-Secure ELs are always to gether.
|
||||
* Non-secure EL3 is never implemented.
|
||||
* We use the following generic mask as they appear in different
|
||||
* registers and this can be shifted for the appropriate
|
||||
* fields.
|
||||
*/
|
||||
#define ETM_EXLEVEL_S_APP BIT(0) /* Secure EL0 */
|
||||
#define ETM_EXLEVEL_S_OS BIT(1) /* Secure EL1 */
|
||||
#define ETM_EXLEVEL_S_HYP BIT(2) /* Secure EL2 */
|
||||
#define ETM_EXLEVEL_S_MON BIT(3) /* Secure EL3/Monitor */
|
||||
#define ETM_EXLEVEL_NS_APP BIT(4) /* NonSecure EL0 */
|
||||
#define ETM_EXLEVEL_NS_OS BIT(5) /* NonSecure EL1 */
|
||||
#define ETM_EXLEVEL_NS_HYP BIT(6) /* NonSecure EL2 */
|
||||
|
||||
#define ETM_EXLEVEL_MASK (GENMASK(6, 0))
|
||||
#define ETM_EXLEVEL_S_MASK (GENMASK(3, 0))
|
||||
#define ETM_EXLEVEL_NS_MASK (GENMASK(6, 4))
|
||||
|
||||
/* access level controls in TRCACATRn */
|
||||
#define TRCACATR_EXLEVEL_SHIFT 8
|
||||
|
||||
/* access level control in TRCVICTLR */
|
||||
#define TRCVICTLR_EXLEVEL_SHIFT 16
|
||||
#define TRCVICTLR_EXLEVEL_S_SHIFT 16
|
||||
#define TRCVICTLR_EXLEVEL_NS_SHIFT 20
|
||||
|
||||
/* secure / non secure masks - TRCVICTLR, IDR3 */
|
||||
#define ETM_EXLEVEL_S_VICTLR_MASK GENMASK(19, 16)
|
||||
/* NS MON (EL3) mode never implemented */
|
||||
#define ETM_EXLEVEL_NS_VICTLR_MASK GENMASK(22, 20)
|
||||
#define TRCVICTLR_EXLEVEL_MASK (ETM_EXLEVEL_MASK << TRCVICTLR_EXLEVEL_SHIFT)
|
||||
#define TRCVICTLR_EXLEVEL_S_MASK (ETM_EXLEVEL_S_MASK << TRCVICTLR_EXLEVEL_SHIFT)
|
||||
#define TRCVICTLR_EXLEVEL_NS_MASK (ETM_EXLEVEL_NS_MASK << TRCVICTLR_EXLEVEL_SHIFT)
|
||||
|
||||
#define ETM_TRCIDR1_ARCH_MAJOR_SHIFT 8
|
||||
#define ETM_TRCIDR1_ARCH_MAJOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
|
||||
#define ETM_TRCIDR1_ARCH_MAJOR(x) \
|
||||
(((x) & ETM_TRCIDR1_ARCH_MAJOR_MASK) >> ETM_TRCIDR1_ARCH_MAJOR_SHIFT)
|
||||
#define ETM_TRCIDR1_ARCH_MINOR_SHIFT 4
|
||||
#define ETM_TRCIDR1_ARCH_MINOR_MASK (0xfU << ETM_TRCIDR1_ARCH_MINOR_SHIFT)
|
||||
#define ETM_TRCIDR1_ARCH_MINOR(x) \
|
||||
(((x) & ETM_TRCIDR1_ARCH_MINOR_MASK) >> ETM_TRCIDR1_ARCH_MINOR_SHIFT)
|
||||
#define ETM_TRCIDR1_ARCH_SHIFT ETM_TRCIDR1_ARCH_MINOR_SHIFT
|
||||
#define ETM_TRCIDR1_ARCH_MASK \
|
||||
(ETM_TRCIDR1_ARCH_MAJOR_MASK | ETM_TRCIDR1_ARCH_MINOR_MASK)
|
||||
|
||||
#define ETM_TRCIDR1_ARCH_ETMv4 0x4
|
||||
|
||||
/*
|
||||
* Driver representation of the ETM architecture.
|
||||
* The version of an ETM component can be detected from
|
||||
*
|
||||
* TRCDEVARCH - CoreSight architected register
|
||||
* - Bits[15:12] - Major version
|
||||
* - Bits[19:16] - Minor version
|
||||
* TRCIDR1 - ETM architected register
|
||||
* - Bits[11:8] - Major version
|
||||
* - Bits[7:4] - Minor version
|
||||
* We must rely on TRCDEVARCH for the version information,
|
||||
* however we don't want to break the support for potential
|
||||
* old implementations which might not implement it. Thus
|
||||
* we fall back to TRCIDR1 if TRCDEVARCH is not implemented
|
||||
* for memory mapped components.
|
||||
* Now to make certain decisions easier based on the version
|
||||
* we use an internal representation of the version in the
|
||||
* driver, as follows :
|
||||
*
|
||||
* ETM_ARCH_VERSION[7:0], where :
|
||||
* Bits[7:4] - Major version
|
||||
* Bits[3:0] - Minro version
|
||||
*/
|
||||
#define ETM_ARCH_VERSION(major, minor) \
|
||||
((((major) & 0xfU) << 4) | (((minor) & 0xfU)))
|
||||
#define ETM_ARCH_MAJOR_VERSION(arch) (((arch) >> 4) & 0xfU)
|
||||
#define ETM_ARCH_MINOR_VERSION(arch) ((arch) & 0xfU)
|
||||
|
||||
#define ETM_ARCH_V4 ETM_ARCH_VERSION(4, 0)
|
||||
#define ETM_ARCH_ETE ETM_ARCH_VERSION(5, 0)
|
||||
|
||||
/* Interpretation of resource numbers change at ETM v4.3 architecture */
|
||||
#define ETM4X_ARCH_4V3 0x43
|
||||
#define ETM_ARCH_V4_3 ETM_ARCH_VERSION(4, 3)
|
||||
|
||||
static inline u8 etm_devarch_to_arch(u32 devarch)
|
||||
{
|
||||
return ETM_ARCH_VERSION(ETM_DEVARCH_ARCHID_ARCH_VER(devarch),
|
||||
ETM_DEVARCH_REVISION(devarch));
|
||||
}
|
||||
|
||||
static inline u8 etm_trcidr_to_arch(u32 trcidr1)
|
||||
{
|
||||
return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
|
||||
ETM_TRCIDR1_ARCH_MINOR(trcidr1));
|
||||
}
|
||||
|
||||
enum etm_impdef_type {
|
||||
ETM4_IMPDEF_HISI_CORE_COMMIT,
|
||||
ETM4_IMPDEF_FEATURE_MAX,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct etmv4_config - configuration information related to an ETMv4
|
||||
@@ -250,7 +759,7 @@
|
||||
* @vmid_mask0: VM ID comparator mask for comparator 0-3.
|
||||
* @vmid_mask1: VM ID comparator mask for comparator 4-7.
|
||||
* @ext_inp: External input selection.
|
||||
* @arch: ETM architecture version (for arch dependent config).
|
||||
* @s_ex_level: Secure ELs where tracing is supported.
|
||||
*/
|
||||
struct etmv4_config {
|
||||
u32 mode;
|
||||
@@ -294,7 +803,7 @@ struct etmv4_config {
|
||||
u32 vmid_mask0;
|
||||
u32 vmid_mask1;
|
||||
u32 ext_inp;
|
||||
u8 arch;
|
||||
u8 s_ex_level;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -363,7 +872,7 @@ struct etmv4_save_state {
|
||||
* @spinlock: Only one at a time pls.
|
||||
* @mode: This tracer's mode, i.e sysFS, Perf or disabled.
|
||||
* @cpu: The cpu this component is affined to.
|
||||
* @arch: ETM version number.
|
||||
* @arch: ETM architecture version.
|
||||
* @nr_pe: The number of processing entity available for tracing.
|
||||
* @nr_pe_cmp: The number of processing entity comparator inputs that are
|
||||
* available for tracing.
|
||||
@@ -410,11 +919,13 @@ struct etmv4_save_state {
|
||||
* @nooverflow: Indicate if overflow prevention is supported.
|
||||
* @atbtrig: If the implementation can support ATB triggers
|
||||
* @lpoverride: If the implementation can support low-power state over.
|
||||
* @trfc: If the implementation supports Arm v8.4 trace filter controls.
|
||||
* @config: structure holding configuration parameters.
|
||||
* @save_state: State to be preserved across power loss
|
||||
* @state_needs_restore: True when there is context to restore after PM exit
|
||||
* @skip_power_up: Indicates if an implementation can skip powering up
|
||||
* the trace unit.
|
||||
* @arch_features: Bitmap of arch features of etmv4 devices.
|
||||
*/
|
||||
struct etmv4_drvdata {
|
||||
void __iomem *base;
|
||||
@@ -444,6 +955,7 @@ struct etmv4_drvdata {
|
||||
u8 s_ex_level;
|
||||
u8 ns_ex_level;
|
||||
u8 q_support;
|
||||
u8 os_lock_model;
|
||||
bool sticky_enable;
|
||||
bool boot_enable;
|
||||
bool os_unlock;
|
||||
@@ -459,10 +971,12 @@ struct etmv4_drvdata {
|
||||
bool nooverflow;
|
||||
bool atbtrig;
|
||||
bool lpoverride;
|
||||
bool trfc;
|
||||
struct etmv4_config config;
|
||||
struct etmv4_save_state *save_state;
|
||||
bool state_needs_restore;
|
||||
bool skip_power_up;
|
||||
DECLARE_BITMAP(arch_features, ETM4_IMPDEF_FEATURE_MAX);
|
||||
};
|
||||
|
||||
/* Address comparator access types */
|
||||
@@ -483,4 +997,12 @@ enum etm_addr_ctxtype {
|
||||
|
||||
extern const struct attribute_group *coresight_etmv4_groups[];
|
||||
void etm4_config_trace_mode(struct etmv4_config *config);
|
||||
|
||||
u64 etm4x_sysreg_read(u32 offset, bool _relaxed, bool _64bit);
|
||||
void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit);
|
||||
|
||||
static inline bool etm4x_is_ete(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
return drvdata->arch >= ETM_ARCH_ETE;
|
||||
}
|
||||
#endif
|
||||
|
@@ -53,13 +53,14 @@ static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
|
||||
{
|
||||
u32 functl;
|
||||
int rc = 0;
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
CS_UNLOCK(drvdata->base);
|
||||
|
||||
functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
|
||||
/* Claim the device only when we enable the first slave */
|
||||
if (!(functl & FUNNEL_ENSx_MASK)) {
|
||||
rc = coresight_claim_device_unlocked(drvdata->base);
|
||||
rc = coresight_claim_device_unlocked(csdev);
|
||||
if (rc)
|
||||
goto done;
|
||||
}
|
||||
@@ -102,6 +103,7 @@ static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
|
||||
int inport)
|
||||
{
|
||||
u32 functl;
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
CS_UNLOCK(drvdata->base);
|
||||
|
||||
@@ -111,7 +113,7 @@ static void dynamic_funnel_disable_hw(struct funnel_drvdata *drvdata,
|
||||
|
||||
/* Disclaim the device if none of the slaves are now active */
|
||||
if (!(functl & FUNNEL_ENSx_MASK))
|
||||
coresight_disclaim_device_unlocked(drvdata->base);
|
||||
coresight_disclaim_device_unlocked(csdev);
|
||||
|
||||
CS_LOCK(drvdata->base);
|
||||
}
|
||||
@@ -282,6 +284,7 @@ static int funnel_probe(struct device *dev, struct resource *res)
|
||||
}
|
||||
drvdata->base = base;
|
||||
desc.groups = coresight_funnel_groups;
|
||||
desc.access = CSDEV_ACCESS_IOMEM(base);
|
||||
}
|
||||
|
||||
dev_set_drvdata(dev, drvdata);
|
||||
@@ -410,9 +413,9 @@ static int dynamic_funnel_probe(struct amba_device *adev,
|
||||
return funnel_probe(&adev->dev, &adev->res);
|
||||
}
|
||||
|
||||
static int dynamic_funnel_remove(struct amba_device *adev)
|
||||
static void dynamic_funnel_remove(struct amba_device *adev)
|
||||
{
|
||||
return funnel_remove(&adev->dev);
|
||||
funnel_remove(&adev->dev);
|
||||
}
|
||||
|
||||
static const struct amba_id dynamic_funnel_ids[] = {
|
||||
|
@@ -90,6 +90,12 @@ static void of_coresight_get_ports_legacy(const struct device_node *node,
|
||||
struct of_endpoint endpoint;
|
||||
int in = 0, out = 0;
|
||||
|
||||
/*
|
||||
* Avoid warnings in of_graph_get_next_endpoint()
|
||||
* if the device doesn't have any graph connections
|
||||
*/
|
||||
if (!of_graph_is_present(node))
|
||||
return;
|
||||
do {
|
||||
ep = of_graph_get_next_endpoint(node, ep);
|
||||
if (!ep)
|
||||
|
@@ -241,4 +241,7 @@ coresight_find_csdev_by_fwnode(struct fwnode_handle *r_fwnode);
|
||||
void coresight_set_assoc_ectdev_mutex(struct coresight_device *csdev,
|
||||
struct coresight_device *ect_csdev);
|
||||
|
||||
void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
|
||||
struct coresight_device *coresight_get_percpu_sink(int cpu);
|
||||
|
||||
#endif
|
||||
|
@@ -45,12 +45,14 @@ struct replicator_drvdata {
|
||||
|
||||
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
|
||||
{
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
CS_UNLOCK(drvdata->base);
|
||||
|
||||
if (!coresight_claim_device_unlocked(drvdata->base)) {
|
||||
if (!coresight_claim_device_unlocked(csdev)) {
|
||||
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
|
||||
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
|
||||
coresight_disclaim_device_unlocked(drvdata->base);
|
||||
coresight_disclaim_device_unlocked(csdev);
|
||||
}
|
||||
|
||||
CS_LOCK(drvdata->base);
|
||||
@@ -70,6 +72,7 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
|
||||
{
|
||||
int rc = 0;
|
||||
u32 id0val, id1val;
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
CS_UNLOCK(drvdata->base);
|
||||
|
||||
@@ -84,7 +87,7 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
|
||||
id0val = id1val = 0xff;
|
||||
|
||||
if (id0val == 0xff && id1val == 0xff)
|
||||
rc = coresight_claim_device_unlocked(drvdata->base);
|
||||
rc = coresight_claim_device_unlocked(csdev);
|
||||
|
||||
if (!rc) {
|
||||
switch (outport) {
|
||||
@@ -140,6 +143,7 @@ static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
|
||||
int inport, int outport)
|
||||
{
|
||||
u32 reg;
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
switch (outport) {
|
||||
case 0:
|
||||
@@ -160,7 +164,7 @@ static void dynamic_replicator_disable(struct replicator_drvdata *drvdata,
|
||||
|
||||
if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
|
||||
(readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
|
||||
coresight_disclaim_device_unlocked(drvdata->base);
|
||||
coresight_disclaim_device_unlocked(csdev);
|
||||
CS_LOCK(drvdata->base);
|
||||
}
|
||||
|
||||
@@ -254,6 +258,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
|
||||
}
|
||||
drvdata->base = base;
|
||||
desc.groups = replicator_groups;
|
||||
desc.access = CSDEV_ACCESS_IOMEM(base);
|
||||
}
|
||||
|
||||
if (fwnode_property_present(dev_fwnode(dev),
|
||||
@@ -388,9 +393,9 @@ static int dynamic_replicator_probe(struct amba_device *adev,
|
||||
return replicator_probe(&adev->dev, &adev->res);
|
||||
}
|
||||
|
||||
static int dynamic_replicator_remove(struct amba_device *adev)
|
||||
static void dynamic_replicator_remove(struct amba_device *adev)
|
||||
{
|
||||
return replicator_remove(&adev->dev);
|
||||
replicator_remove(&adev->dev);
|
||||
}
|
||||
|
||||
static const struct amba_id dynamic_replicator_ids[] = {
|
||||
|
@@ -258,6 +258,7 @@ static void stm_disable(struct coresight_device *csdev,
|
||||
struct perf_event *event)
|
||||
{
|
||||
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
||||
struct csdev_access *csa = &csdev->access;
|
||||
|
||||
/*
|
||||
* For as long as the tracer isn't disabled another entity can't
|
||||
@@ -270,7 +271,7 @@ static void stm_disable(struct coresight_device *csdev,
|
||||
spin_unlock(&drvdata->spinlock);
|
||||
|
||||
/* Wait until the engine has completely stopped */
|
||||
coresight_timeout(drvdata->base, STMTCSR, STMTCSR_BUSY_BIT, 0);
|
||||
coresight_timeout(csa, STMTCSR, STMTCSR_BUSY_BIT, 0);
|
||||
|
||||
pm_runtime_put(csdev->dev.parent);
|
||||
|
||||
@@ -884,6 +885,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
drvdata->base = base;
|
||||
desc.access = CSDEV_ACCESS_IOMEM(base);
|
||||
|
||||
ret = stm_get_stimulus_area(dev, &ch_res);
|
||||
if (ret)
|
||||
@@ -951,15 +953,13 @@ stm_unregister:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int stm_remove(struct amba_device *adev)
|
||||
static void stm_remove(struct amba_device *adev)
|
||||
{
|
||||
struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
coresight_unregister(drvdata->csdev);
|
||||
|
||||
stm_unregister_device(&drvdata->stm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@@ -34,16 +34,20 @@ DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
|
||||
|
||||
void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
|
||||
{
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
struct csdev_access *csa = &csdev->access;
|
||||
|
||||
/* Ensure formatter, unformatter and hardware fifo are empty */
|
||||
if (coresight_timeout(drvdata->base,
|
||||
TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
|
||||
dev_err(&drvdata->csdev->dev,
|
||||
if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
|
||||
dev_err(&csdev->dev,
|
||||
"timeout while waiting for TMC to be Ready\n");
|
||||
}
|
||||
}
|
||||
|
||||
void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
|
||||
{
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
struct csdev_access *csa = &csdev->access;
|
||||
u32 ffcr;
|
||||
|
||||
ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
|
||||
@@ -52,9 +56,8 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
|
||||
ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
|
||||
writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
|
||||
/* Ensure flush completes */
|
||||
if (coresight_timeout(drvdata->base,
|
||||
TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
|
||||
dev_err(&drvdata->csdev->dev,
|
||||
if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
|
||||
dev_err(&csdev->dev,
|
||||
"timeout while waiting for completion of Manual Flush\n");
|
||||
}
|
||||
|
||||
@@ -538,6 +541,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
}
|
||||
|
||||
drvdata->base = base;
|
||||
desc.access = CSDEV_ACCESS_IOMEM(base);
|
||||
|
||||
spin_lock_init(&drvdata->spinlock);
|
||||
|
||||
@@ -667,7 +671,7 @@ out:
|
||||
spin_unlock_irqrestore(&drvdata->spinlock, flags);
|
||||
}
|
||||
|
||||
static int tmc_remove(struct amba_device *adev)
|
||||
static void tmc_remove(struct amba_device *adev)
|
||||
{
|
||||
struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
@@ -684,8 +688,6 @@ static int tmc_remove(struct amba_device *adev)
|
||||
coresight_remove_csr_ops();
|
||||
misc_deregister(&drvdata->miscdev);
|
||||
coresight_unregister(drvdata->csdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amba_id tmc_ids[] = {
|
||||
|
@@ -37,7 +37,7 @@ static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
|
||||
|
||||
static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
|
||||
{
|
||||
int rc = coresight_claim_device(drvdata->base);
|
||||
int rc = coresight_claim_device(drvdata->csdev);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
@@ -88,7 +88,7 @@ static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
|
||||
static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
|
||||
{
|
||||
__tmc_etb_disable_hw(drvdata);
|
||||
coresight_disclaim_device(drvdata->base);
|
||||
coresight_disclaim_device(drvdata->csdev);
|
||||
}
|
||||
|
||||
static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
|
||||
@@ -109,7 +109,7 @@ static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
|
||||
|
||||
static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
|
||||
{
|
||||
int rc = coresight_claim_device(drvdata->base);
|
||||
int rc = coresight_claim_device(drvdata->csdev);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
@@ -120,11 +120,13 @@ static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
|
||||
|
||||
static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
|
||||
{
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
|
||||
CS_UNLOCK(drvdata->base);
|
||||
|
||||
tmc_flush_and_stop(drvdata);
|
||||
tmc_disable_hw(drvdata);
|
||||
coresight_disclaim_device_unlocked(drvdata->base);
|
||||
coresight_disclaim_device_unlocked(csdev);
|
||||
CS_LOCK(drvdata->base);
|
||||
}
|
||||
|
||||
|
@@ -1053,7 +1053,7 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
|
||||
rc = tmc_etr_enable_catu(drvdata, etr_buf);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = coresight_claim_device(drvdata->base);
|
||||
rc = coresight_claim_device(drvdata->csdev);
|
||||
if (!rc) {
|
||||
drvdata->etr_buf = etr_buf;
|
||||
__tmc_etr_enable_hw(drvdata);
|
||||
@@ -1147,7 +1147,7 @@ void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
|
||||
__tmc_etr_disable_hw(drvdata);
|
||||
/* Disable CATU device if this ETR is connected to one */
|
||||
tmc_etr_disable_catu(drvdata);
|
||||
coresight_disclaim_device(drvdata->base);
|
||||
coresight_disclaim_device(drvdata->csdev);
|
||||
/* Reset the ETR buf used by hardware */
|
||||
drvdata->etr_buf = NULL;
|
||||
}
|
||||
|
@@ -804,13 +804,11 @@ err:
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
static int __exit tpda_remove(struct amba_device *adev)
|
||||
static void __exit tpda_remove(struct amba_device *adev)
|
||||
{
|
||||
struct tpda_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
coresight_unregister(drvdata->csdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct amba_id tpda_ids[] = {
|
||||
|
@@ -4232,13 +4232,11 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __exit tpdm_remove(struct amba_device *adev)
|
||||
static void __exit tpdm_remove(struct amba_device *adev)
|
||||
{
|
||||
struct tpdm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
coresight_unregister(drvdata->csdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct amba_id tpdm_ids[] = {
|
||||
|
@@ -60,49 +60,45 @@ struct tpiu_drvdata {
|
||||
struct coresight_device *csdev;
|
||||
};
|
||||
|
||||
static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
|
||||
static void tpiu_enable_hw(struct csdev_access *csa)
|
||||
{
|
||||
CS_UNLOCK(drvdata->base);
|
||||
CS_UNLOCK(csa->base);
|
||||
|
||||
/* TODO: fill this up */
|
||||
|
||||
CS_LOCK(drvdata->base);
|
||||
CS_LOCK(csa->base);
|
||||
}
|
||||
|
||||
static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
|
||||
{
|
||||
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
||||
|
||||
tpiu_enable_hw(drvdata);
|
||||
tpiu_enable_hw(&csdev->access);
|
||||
atomic_inc(csdev->refcnt);
|
||||
dev_dbg(&csdev->dev, "TPIU enabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
|
||||
static void tpiu_disable_hw(struct csdev_access *csa)
|
||||
{
|
||||
CS_UNLOCK(drvdata->base);
|
||||
CS_UNLOCK(csa->base);
|
||||
|
||||
/* Clear formatter and stop on flush */
|
||||
writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR);
|
||||
csdev_access_relaxed_write32(csa, FFCR_STOP_FI, TPIU_FFCR);
|
||||
/* Generate manual flush */
|
||||
writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
|
||||
csdev_access_relaxed_write32(csa, FFCR_STOP_FI | FFCR_FON_MAN, TPIU_FFCR);
|
||||
/* Wait for flush to complete */
|
||||
coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
|
||||
coresight_timeout(csa, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
|
||||
/* Wait for formatter to stop */
|
||||
coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
|
||||
coresight_timeout(csa, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
|
||||
|
||||
CS_LOCK(drvdata->base);
|
||||
CS_LOCK(csa->base);
|
||||
}
|
||||
|
||||
static int tpiu_disable(struct coresight_device *csdev)
|
||||
{
|
||||
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
|
||||
|
||||
if (atomic_dec_return(csdev->refcnt))
|
||||
return -EBUSY;
|
||||
|
||||
tpiu_disable_hw(drvdata);
|
||||
tpiu_disable_hw(&csdev->access);
|
||||
|
||||
dev_dbg(&csdev->dev, "TPIU disabled\n");
|
||||
return 0;
|
||||
@@ -149,9 +145,10 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
return PTR_ERR(base);
|
||||
|
||||
drvdata->base = base;
|
||||
desc.access = CSDEV_ACCESS_IOMEM(base);
|
||||
|
||||
/* Disable tpiu to support older devices */
|
||||
tpiu_disable_hw(drvdata);
|
||||
tpiu_disable_hw(&desc.access);
|
||||
|
||||
pdata = coresight_get_platform_data(dev);
|
||||
if (IS_ERR(pdata))
|
||||
@@ -173,13 +170,11 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
return PTR_ERR(drvdata->csdev);
|
||||
}
|
||||
|
||||
static int tpiu_remove(struct amba_device *adev)
|
||||
static void tpiu_remove(struct amba_device *adev)
|
||||
{
|
||||
struct tpiu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
coresight_unregister(drvdata->csdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
1149
drivers/hwtracing/coresight/coresight-trbe.c
Normal file
1149
drivers/hwtracing/coresight/coresight-trbe.c
Normal file
File diff suppressed because it is too large
Load Diff
153
drivers/hwtracing/coresight/coresight-trbe.h
Normal file
153
drivers/hwtracing/coresight/coresight-trbe.h
Normal file
@@ -0,0 +1,153 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* This contains all required hardware related helper functions for
|
||||
* Trace Buffer Extension (TRBE) driver in the coresight framework.
|
||||
*
|
||||
* Copyright (C) 2020 ARM Ltd.
|
||||
*
|
||||
* Author: Anshuman Khandual <anshuman.khandual@arm.com>
|
||||
*/
|
||||
#include <linux/coresight.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include "coresight-etm-perf.h"
|
||||
|
||||
static inline bool is_trbe_available(void)
|
||||
{
|
||||
u64 aa64dfr0 = read_sysreg_s(SYS_ID_AA64DFR0_EL1);
|
||||
unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0, ID_AA64DFR0_TRBE_SHIFT);
|
||||
|
||||
return trbe >= 0b0001;
|
||||
}
|
||||
|
||||
static inline bool is_trbe_enabled(void)
|
||||
{
|
||||
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
|
||||
|
||||
return trblimitr & TRBLIMITR_ENABLE;
|
||||
}
|
||||
|
||||
#define TRBE_EC_OTHERS 0
|
||||
#define TRBE_EC_STAGE1_ABORT 36
|
||||
#define TRBE_EC_STAGE2_ABORT 37
|
||||
|
||||
static inline int get_trbe_ec(u64 trbsr)
|
||||
{
|
||||
return (trbsr >> TRBSR_EC_SHIFT) & TRBSR_EC_MASK;
|
||||
}
|
||||
|
||||
#define TRBE_BSC_NOT_STOPPED 0
|
||||
#define TRBE_BSC_FILLED 1
|
||||
#define TRBE_BSC_TRIGGERED 2
|
||||
|
||||
static inline int get_trbe_bsc(u64 trbsr)
|
||||
{
|
||||
return (trbsr >> TRBSR_BSC_SHIFT) & TRBSR_BSC_MASK;
|
||||
}
|
||||
|
||||
static inline void clr_trbe_irq(void)
|
||||
{
|
||||
u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
|
||||
|
||||
trbsr &= ~TRBSR_IRQ;
|
||||
write_sysreg_s(trbsr, SYS_TRBSR_EL1);
|
||||
}
|
||||
|
||||
static inline bool is_trbe_irq(u64 trbsr)
|
||||
{
|
||||
return trbsr & TRBSR_IRQ;
|
||||
}
|
||||
|
||||
static inline bool is_trbe_trg(u64 trbsr)
|
||||
{
|
||||
return trbsr & TRBSR_TRG;
|
||||
}
|
||||
|
||||
static inline bool is_trbe_wrap(u64 trbsr)
|
||||
{
|
||||
return trbsr & TRBSR_WRAP;
|
||||
}
|
||||
|
||||
static inline bool is_trbe_abort(u64 trbsr)
|
||||
{
|
||||
return trbsr & TRBSR_ABORT;
|
||||
}
|
||||
|
||||
static inline bool is_trbe_running(u64 trbsr)
|
||||
{
|
||||
return !(trbsr & TRBSR_STOP);
|
||||
}
|
||||
|
||||
#define TRBE_TRIG_MODE_STOP 0
|
||||
#define TRBE_TRIG_MODE_IRQ 1
|
||||
#define TRBE_TRIG_MODE_IGNORE 3
|
||||
|
||||
#define TRBE_FILL_MODE_FILL 0
|
||||
#define TRBE_FILL_MODE_WRAP 1
|
||||
#define TRBE_FILL_MODE_CIRCULAR_BUFFER 3
|
||||
|
||||
static inline void set_trbe_disabled(void)
|
||||
{
|
||||
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
|
||||
|
||||
trblimitr &= ~TRBLIMITR_ENABLE;
|
||||
write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
|
||||
}
|
||||
|
||||
static inline bool get_trbe_flag_update(u64 trbidr)
|
||||
{
|
||||
return trbidr & TRBIDR_FLAG;
|
||||
}
|
||||
|
||||
static inline bool is_trbe_programmable(u64 trbidr)
|
||||
{
|
||||
return !(trbidr & TRBIDR_PROG);
|
||||
}
|
||||
|
||||
static inline int get_trbe_address_align(u64 trbidr)
|
||||
{
|
||||
return (trbidr >> TRBIDR_ALIGN_SHIFT) & TRBIDR_ALIGN_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned long get_trbe_write_pointer(void)
|
||||
{
|
||||
return read_sysreg_s(SYS_TRBPTR_EL1);
|
||||
}
|
||||
|
||||
static inline void set_trbe_write_pointer(unsigned long addr)
|
||||
{
|
||||
WARN_ON(is_trbe_enabled());
|
||||
write_sysreg_s(addr, SYS_TRBPTR_EL1);
|
||||
}
|
||||
|
||||
static inline unsigned long get_trbe_limit_pointer(void)
|
||||
{
|
||||
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
|
||||
unsigned long limit = (trblimitr >> TRBLIMITR_LIMIT_SHIFT) & TRBLIMITR_LIMIT_MASK;
|
||||
unsigned long addr = limit << TRBLIMITR_LIMIT_SHIFT;
|
||||
|
||||
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline unsigned long get_trbe_base_pointer(void)
|
||||
{
|
||||
u64 trbbaser = read_sysreg_s(SYS_TRBBASER_EL1);
|
||||
unsigned long addr = trbbaser & (TRBBASER_BASE_MASK << TRBBASER_BASE_SHIFT);
|
||||
|
||||
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline void set_trbe_base_pointer(unsigned long addr)
|
||||
{
|
||||
WARN_ON(is_trbe_enabled());
|
||||
WARN_ON(!IS_ALIGNED(addr, (1UL << TRBBASER_BASE_SHIFT)));
|
||||
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
|
||||
write_sysreg_s(addr, SYS_TRBBASER_EL1);
|
||||
}
|
@@ -1055,7 +1055,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nmk_i2c_remove(struct amba_device *adev)
|
||||
static void nmk_i2c_remove(struct amba_device *adev)
|
||||
{
|
||||
struct resource *res = &adev->res;
|
||||
struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
|
||||
@@ -1068,8 +1068,6 @@ static int nmk_i2c_remove(struct amba_device *adev)
|
||||
i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
|
||||
clk_disable_unprepare(dev->clk);
|
||||
release_mem_region(res->start, resource_size(res));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct i2c_vendor_data vendor_stn8815 = {
|
||||
|
@@ -159,7 +159,7 @@ static int amba_kmi_probe(struct amba_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_kmi_remove(struct amba_device *dev)
|
||||
static void amba_kmi_remove(struct amba_device *dev)
|
||||
{
|
||||
struct amba_kmi_port *kmi = amba_get_drvdata(dev);
|
||||
|
||||
@@ -168,7 +168,6 @@ static int amba_kmi_remove(struct amba_device *dev)
|
||||
iounmap(kmi->base);
|
||||
kfree(kmi);
|
||||
amba_release_regions(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused amba_kmi_resume(struct device *dev)
|
||||
|
@@ -407,7 +407,6 @@ config ARM_SMMU_V3_SVA
|
||||
|
||||
config QCOM_LAZY_MAPPING
|
||||
tristate "Reference counted iommu-mapping support"
|
||||
depends on ION
|
||||
depends on IOMMU_API
|
||||
help
|
||||
ION buffers may be shared between several software clients.
|
||||
|
@@ -97,11 +97,6 @@ struct mapped_device {
|
||||
*/
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
/*
|
||||
* freeze/thaw support require holding onto a super block
|
||||
*/
|
||||
struct super_block *frozen_sb;
|
||||
|
||||
/* forced geometry settings */
|
||||
struct hd_geometry geometry;
|
||||
|
||||
|
@@ -2407,27 +2407,19 @@ static int lock_fs(struct mapped_device *md)
|
||||
{
|
||||
int r;
|
||||
|
||||
WARN_ON(md->frozen_sb);
|
||||
WARN_ON(test_bit(DMF_FROZEN, &md->flags));
|
||||
|
||||
md->frozen_sb = freeze_bdev(md->bdev);
|
||||
if (IS_ERR(md->frozen_sb)) {
|
||||
r = PTR_ERR(md->frozen_sb);
|
||||
md->frozen_sb = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
set_bit(DMF_FROZEN, &md->flags);
|
||||
|
||||
return 0;
|
||||
r = freeze_bdev(md->bdev);
|
||||
if (!r)
|
||||
set_bit(DMF_FROZEN, &md->flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void unlock_fs(struct mapped_device *md)
|
||||
{
|
||||
if (!test_bit(DMF_FROZEN, &md->flags))
|
||||
return;
|
||||
|
||||
thaw_bdev(md->bdev, md->frozen_sb);
|
||||
md->frozen_sb = NULL;
|
||||
thaw_bdev(md->bdev);
|
||||
clear_bit(DMF_FROZEN, &md->flags);
|
||||
}
|
||||
|
||||
|
@@ -920,6 +920,15 @@ const char *v4l2_ctrl_get_name(u32 id)
|
||||
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP: return "H264 I-Frame Maximum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP: return "H264 P-Frame Minimum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP: return "H264 P-Frame Maximum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP: return "H264 B-Frame Minimum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP: return "H264 B-Frame Maximum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR: return "H264 Hierarchical Lay 0 Bitrate";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR: return "H264 Hierarchical Lay 1 Bitrate";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR: return "H264 Hierarchical Lay 2 Bitrate";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR: return "H264 Hierarchical Lay 3 Bitrate";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR: return "H264 Hierarchical Lay 4 Bitrate";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR: return "H264 Hierarchical Lay 5 Bitrate";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L6_BR: return "H264 Hierarchical Lay 6 Bitrate";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_SPS: return "H264 Sequence Parameter Set";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_PPS: return "H264 Picture Parameter Set";
|
||||
case V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX: return "H264 Scaling Matrix";
|
||||
@@ -949,6 +958,7 @@ const char *v4l2_ctrl_get_name(u32 id)
|
||||
case V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE: return "Vertical MV Search Range";
|
||||
case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
|
||||
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME: return "Force Key Frame";
|
||||
case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID: return "Base Layer Priority ID";
|
||||
case V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS: return "MPEG-2 Slice Parameters";
|
||||
case V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION: return "MPEG-2 Quantization Matrices";
|
||||
case V4L2_CID_MPEG_VIDEO_FWHT_PARAMS: return "FWHT Stateless Parameters";
|
||||
@@ -978,6 +988,12 @@ const char *v4l2_ctrl_get_name(u32 id)
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP: return "HEVC B-Frame QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP: return "HEVC Minimum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP: return "HEVC Maximum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP: return "HEVC I-Frame Minimum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP: return "HEVC I-Frame Maximum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP: return "HEVC P-Frame Minimum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP: return "HEVC P-Frame Maximum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP: return "HEVC B-Frame Minimum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP: return "HEVC B-Frame Maximum QP Value";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE: return "HEVC Profile";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL: return "HEVC Level";
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_TIER: return "HEVC Tier";
|
||||
|
@@ -273,14 +273,12 @@ err_clk_enable:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pl172_remove(struct amba_device *adev)
|
||||
static void pl172_remove(struct amba_device *adev)
|
||||
{
|
||||
struct pl172_data *pl172 = amba_get_drvdata(adev);
|
||||
|
||||
clk_disable_unprepare(pl172->clk);
|
||||
amba_release_regions(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amba_id pl172_ids[] = {
|
||||
|
@@ -426,14 +426,12 @@ out_clk_dis_aper:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int pl353_smc_remove(struct amba_device *adev)
|
||||
static void pl353_smc_remove(struct amba_device *adev)
|
||||
{
|
||||
struct pl353_smc_data *pl353_smc = amba_get_drvdata(adev);
|
||||
|
||||
clk_disable_unprepare(pl353_smc->memclk);
|
||||
clk_disable_unprepare(pl353_smc->aclk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amba_id pl353_ids[] = {
|
||||
|
@@ -2195,7 +2195,7 @@ static int mmci_probe(struct amba_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mmci_remove(struct amba_device *dev)
|
||||
static void mmci_remove(struct amba_device *dev)
|
||||
{
|
||||
struct mmc_host *mmc = amba_get_drvdata(dev);
|
||||
|
||||
@@ -2223,8 +2223,6 @@ static int mmci_remove(struct amba_device *dev)
|
||||
clk_disable_unprepare(host->clk);
|
||||
mmc_free_host(mmc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@@ -137,7 +137,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pl030_remove(struct amba_device *dev)
|
||||
static void pl030_remove(struct amba_device *dev)
|
||||
{
|
||||
struct pl030_rtc *rtc = amba_get_drvdata(dev);
|
||||
|
||||
@@ -146,8 +146,6 @@ static int pl030_remove(struct amba_device *dev)
|
||||
free_irq(dev->irq[0], rtc);
|
||||
iounmap(rtc->base);
|
||||
amba_release_regions(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct amba_id pl030_ids[] = {
|
||||
|
@@ -280,7 +280,7 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pl031_remove(struct amba_device *adev)
|
||||
static void pl031_remove(struct amba_device *adev)
|
||||
{
|
||||
struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
@@ -289,8 +289,6 @@ static int pl031_remove(struct amba_device *adev)
|
||||
if (adev->irq[0])
|
||||
free_irq(adev->irq[0], ldata);
|
||||
amba_release_regions(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
|
@@ -9,6 +9,8 @@
|
||||
#include "ufs.h"
|
||||
#include "ufs-sysfs.h"
|
||||
|
||||
#include <trace/hooks/ufshcd.h>
|
||||
|
||||
static const char *ufschd_uic_link_state_to_string(
|
||||
enum uic_link_state state)
|
||||
{
|
||||
@@ -875,11 +877,7 @@ void ufs_sysfs_add_nodes(struct ufs_hba *hba)
|
||||
return;
|
||||
}
|
||||
|
||||
ret = ufshcd_vops_update_sysfs(hba);
|
||||
if (ret)
|
||||
dev_err(hba->dev,
|
||||
"%s: vops sysfs groups update failed (err = %d)\n",
|
||||
__func__, ret);
|
||||
trace_android_vh_ufs_update_sysfs(hba);
|
||||
}
|
||||
|
||||
void ufs_sysfs_remove_nodes(struct device *dev)
|
||||
|
@@ -28,6 +28,9 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/ufs.h>
|
||||
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#include <trace/hooks/ufshcd.h>
|
||||
|
||||
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
|
||||
UTP_TASK_REQ_COMPL |\
|
||||
UFSHCD_ERROR_MASK)
|
||||
@@ -1978,7 +1981,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
|
||||
lrbp->issue_time_stamp = ktime_get();
|
||||
lrbp->compl_time_stamp = ktime_set(0, 0);
|
||||
ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
|
||||
ufshcd_vops_send_command(hba, lrbp);
|
||||
trace_android_vh_ufs_send_command(hba, lrbp);
|
||||
ufshcd_add_command_trace(hba, task_tag, "send");
|
||||
ufshcd_clk_scaling_start_busy(hba);
|
||||
__set_bit(task_tag, &hba->outstanding_reqs);
|
||||
@@ -2590,7 +2593,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
|
||||
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
|
||||
|
||||
err = ufshcd_vops_prepare_command(hba, cmd->request, lrbp);
|
||||
trace_android_vh_ufs_prepare_command(hba, cmd->request, lrbp, &err);
|
||||
if (err) {
|
||||
lrbp->cmd = NULL;
|
||||
ufshcd_release(hba);
|
||||
@@ -5024,7 +5027,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
lrbp->compl_time_stamp = ktime_get();
|
||||
cmd = lrbp->cmd;
|
||||
if (cmd) {
|
||||
ufshcd_vops_compl_command(hba, lrbp);
|
||||
trace_android_vh_ufs_compl_command(hba, lrbp);
|
||||
ufshcd_add_command_trace(hba, index, "complete");
|
||||
result = ufshcd_transfer_rsp_status(hba, lrbp);
|
||||
scsi_dma_unmap(cmd);
|
||||
|
@@ -293,10 +293,6 @@ struct ufs_pwr_mode_info {
|
||||
* @program_key: program or evict an inline encryption key
|
||||
* @fill_prdt: called after initializing the standard PRDT fields so that any
|
||||
* variant-specific PRDT fields can be initialized too
|
||||
* @prepare_command: called when receiving a request in the first place
|
||||
* @update_sysfs: adds vendor-specific sysfs entries
|
||||
* @send_command: adds vendor-specific work when sending a command
|
||||
* @compl_command: adds vendor-specific work when completing a command
|
||||
*/
|
||||
struct ufs_hba_variant_ops {
|
||||
const char *name;
|
||||
@@ -334,11 +330,6 @@ struct ufs_hba_variant_ops {
|
||||
const union ufs_crypto_cfg_entry *cfg, int slot);
|
||||
int (*fill_prdt)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
|
||||
unsigned int segments);
|
||||
int (*prepare_command)(struct ufs_hba *hba,
|
||||
struct request *rq, struct ufshcd_lrb *lrbp);
|
||||
int (*update_sysfs)(struct ufs_hba *hba);
|
||||
void (*send_command)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
|
||||
void (*compl_command)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
|
||||
};
|
||||
|
||||
/* clock gating state */
|
||||
@@ -1282,35 +1273,6 @@ static inline int ufshcd_vops_fill_prdt(struct ufs_hba *hba,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ufshcd_vops_prepare_command(struct ufs_hba *hba,
|
||||
struct request *rq, struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
if (hba->vops && hba->vops->prepare_command)
|
||||
return hba->vops->prepare_command(hba, rq, lrbp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ufshcd_vops_update_sysfs(struct ufs_hba *hba)
|
||||
{
|
||||
if (hba->vops && hba->vops->update_sysfs)
|
||||
return hba->vops->update_sysfs(hba);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void ufshcd_vops_send_command(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
if (hba->vops && hba->vops->send_command)
|
||||
hba->vops->send_command(hba, lrbp);
|
||||
}
|
||||
|
||||
static inline void ufshcd_vops_compl_command(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
if (hba->vops && hba->vops->compl_command)
|
||||
hba->vops->compl_command(hba, lrbp);
|
||||
}
|
||||
|
||||
extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
|
||||
|
||||
/*
|
||||
|
@@ -2314,13 +2314,13 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
return status;
|
||||
}
|
||||
|
||||
static int
|
||||
static void
|
||||
pl022_remove(struct amba_device *adev)
|
||||
{
|
||||
struct pl022 *pl022 = amba_get_drvdata(adev);
|
||||
|
||||
if (!pl022)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
/*
|
||||
* undo pm_runtime_put() in probe. I assume that we're not
|
||||
@@ -2335,7 +2335,6 @@ pl022_remove(struct amba_device *adev)
|
||||
clk_disable_unprepare(pl022->clk);
|
||||
amba_release_regions(adev);
|
||||
tasklet_disable(&pl022->pump_transfers);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
@@ -754,7 +754,7 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pl010_remove(struct amba_device *dev)
|
||||
static void pl010_remove(struct amba_device *dev)
|
||||
{
|
||||
struct uart_amba_port *uap = amba_get_drvdata(dev);
|
||||
int i;
|
||||
@@ -770,8 +770,6 @@ static int pl010_remove(struct amba_device *dev)
|
||||
|
||||
if (!busy)
|
||||
uart_unregister_driver(&amba_reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
@@ -2679,13 +2679,12 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
|
||||
return pl011_register_port(uap);
|
||||
}
|
||||
|
||||
static int pl011_remove(struct amba_device *dev)
|
||||
static void pl011_remove(struct amba_device *dev)
|
||||
{
|
||||
struct uart_amba_port *uap = amba_get_drvdata(dev);
|
||||
|
||||
uart_remove_one_port(&amba_reg, &uap->port);
|
||||
pl011_unregister_port(uap);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
@@ -71,7 +71,7 @@ static int vfio_amba_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_amba_remove(struct amba_device *adev)
|
||||
static void vfio_amba_remove(struct amba_device *adev)
|
||||
{
|
||||
struct vfio_platform_device *vdev;
|
||||
|
||||
@@ -79,10 +79,7 @@ static int vfio_amba_remove(struct amba_device *adev)
|
||||
if (vdev) {
|
||||
kfree(vdev->name);
|
||||
kfree(vdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct amba_id pl330_ids[] = {
|
||||
|
@@ -925,7 +925,7 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int clcdfb_remove(struct amba_device *dev)
|
||||
static void clcdfb_remove(struct amba_device *dev)
|
||||
{
|
||||
struct clcd_fb *fb = amba_get_drvdata(dev);
|
||||
|
||||
@@ -942,8 +942,6 @@ static int clcdfb_remove(struct amba_device *dev)
|
||||
kfree(fb);
|
||||
|
||||
amba_release_regions(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct amba_id clcdfb_id_table[] = {
|
||||
|
@@ -304,14 +304,12 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sp805_wdt_remove(struct amba_device *adev)
|
||||
static void sp805_wdt_remove(struct amba_device *adev)
|
||||
{
|
||||
struct sp805_wdt *wdt = amba_get_drvdata(adev);
|
||||
|
||||
watchdog_unregister_device(&wdt->wdd);
|
||||
watchdog_set_drvdata(&wdt->wdd, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused sp805_wdt_suspend(struct device *dev)
|
||||
|
@@ -1426,15 +1426,6 @@ drivers/staging/android/ashmem.c
|
||||
drivers/staging/android/ashmem.h
|
||||
drivers/staging/android/debug_kinfo.c
|
||||
drivers/staging/android/debug_kinfo.h
|
||||
drivers/staging/android/ion/heaps/ion_page_pool.c
|
||||
drivers/staging/android/ion/heaps/ion_page_pool.h
|
||||
drivers/staging/android/ion/heaps/ion_system_heap.c
|
||||
drivers/staging/android/ion/ion_buffer.c
|
||||
drivers/staging/android/ion/ion.c
|
||||
drivers/staging/android/ion/ion_dma_buf.c
|
||||
drivers/staging/android/ion/ion_heap.c
|
||||
drivers/staging/android/ion/ion_private.h
|
||||
drivers/staging/android/ion/ion_trace.h
|
||||
drivers/staging/android/uapi/ashmem.h
|
||||
drivers/thermal/cpufreq_cooling.c
|
||||
drivers/thermal/devfreq_cooling.c
|
||||
@@ -1781,7 +1772,6 @@ fs/f2fs/segment.h
|
||||
fs/f2fs/shrinker.c
|
||||
fs/f2fs/super.c
|
||||
fs/f2fs/sysfs.c
|
||||
fs/f2fs/trace.h
|
||||
fs/f2fs/verity.c
|
||||
fs/f2fs/xattr.c
|
||||
fs/f2fs/xattr.h
|
||||
@@ -1827,6 +1817,8 @@ fs/incfs/internal.h
|
||||
fs/incfs/main.c
|
||||
fs/incfs/pseudo_files.c
|
||||
fs/incfs/pseudo_files.h
|
||||
fs/incfs/verity.c
|
||||
fs/incfs/verity.h
|
||||
fs/incfs/vfs.c
|
||||
fs/incfs/vfs.h
|
||||
fs/init.c
|
||||
@@ -2023,6 +2015,7 @@ fs/verity/hash_algs.c
|
||||
fs/verity/init.c
|
||||
fs/verity/measure.c
|
||||
fs/verity/open.c
|
||||
fs/verity/read_metadata.c
|
||||
fs/verity/signature.c
|
||||
fs/verity/verify.c
|
||||
fs/xattr.c
|
||||
@@ -2677,7 +2670,6 @@ include/linux/io.h
|
||||
include/linux/iomap.h
|
||||
include/linux/iommu.h
|
||||
include/linux/iommu-helper.h
|
||||
include/linux/ion.h
|
||||
include/linux/io-pgtable.h
|
||||
include/linux/iopoll.h
|
||||
include/linux/ioport.h
|
||||
@@ -3526,9 +3518,11 @@ include/net/neighbour.h
|
||||
include/net/netevent.h
|
||||
include/net/netfilter/ipv4/nf_conntrack_ipv4.h
|
||||
include/net/netfilter/ipv4/nf_defrag_ipv4.h
|
||||
include/net/netfilter/ipv4/nf_dup_ipv4.h
|
||||
include/net/netfilter/ipv4/nf_reject.h
|
||||
include/net/netfilter/ipv6/nf_conntrack_ipv6.h
|
||||
include/net/netfilter/ipv6/nf_defrag_ipv6.h
|
||||
include/net/netfilter/ipv6/nf_dup_ipv6.h
|
||||
include/net/netfilter/ipv6/nf_reject.h
|
||||
include/net/netfilter/nf_conntrack_acct.h
|
||||
include/net/netfilter/nf_conntrack_bridge.h
|
||||
@@ -3828,6 +3822,7 @@ include/trace/hooks/sysrqcrash.h
|
||||
include/trace/hooks/thermal.h
|
||||
include/trace/hooks/timer.h
|
||||
include/trace/hooks/topology.h
|
||||
include/trace/hooks/ufshcd.h
|
||||
include/trace/hooks/vendor_hooks.h
|
||||
include/trace/hooks/wqlockup.h
|
||||
include/trace/perf.h
|
||||
@@ -3973,7 +3968,6 @@ include/uapi/linux/input.h
|
||||
include/uapi/linux/in_route.h
|
||||
include/uapi/linux/ioctl.h
|
||||
include/uapi/linux/iommu.h
|
||||
include/uapi/linux/ion.h
|
||||
include/uapi/linux/io_uring.h
|
||||
include/uapi/linux/ip6_tunnel.h
|
||||
include/uapi/linux/ipc.h
|
||||
@@ -4086,6 +4080,7 @@ include/uapi/linux/netfilter/xt_statistic.h
|
||||
include/uapi/linux/netfilter/xt_string.h
|
||||
include/uapi/linux/netfilter/xt_TCPMSS.h
|
||||
include/uapi/linux/netfilter/xt_tcpudp.h
|
||||
include/uapi/linux/netfilter/xt_TEE.h
|
||||
include/uapi/linux/netfilter/xt_time.h
|
||||
include/uapi/linux/netfilter/xt_TPROXY.h
|
||||
include/uapi/linux/netfilter/xt_u32.h
|
||||
@@ -4703,6 +4698,7 @@ lib/logic_pio.c
|
||||
lib/lz4/lz4_compress.c
|
||||
lib/lz4/lz4_decompress.c
|
||||
lib/lz4/lz4defs.h
|
||||
lib/lz4/lz4hc_compress.c
|
||||
lib/lzo/lzo1x_compress.c
|
||||
lib/lzo/lzo1x_decompress_safe.c
|
||||
lib/lzo/lzodefs.h
|
||||
@@ -5114,6 +5110,7 @@ net/ipv4/netfilter/ip_tables.c
|
||||
net/ipv4/netfilter/iptable_security.c
|
||||
net/ipv4/netfilter/ipt_REJECT.c
|
||||
net/ipv4/netfilter/nf_defrag_ipv4.c
|
||||
net/ipv4/netfilter/nf_dup_ipv4.c
|
||||
net/ipv4/netfilter/nf_nat_h323.c
|
||||
net/ipv4/netfilter/nf_nat_pptp.c
|
||||
net/ipv4/netfilter/nf_reject_ipv4.c
|
||||
@@ -5198,6 +5195,7 @@ net/ipv6/netfilter/ip6t_REJECT.c
|
||||
net/ipv6/netfilter/ip6t_rpfilter.c
|
||||
net/ipv6/netfilter/nf_conntrack_reasm.c
|
||||
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
|
||||
net/ipv6/netfilter/nf_dup_ipv6.c
|
||||
net/ipv6/netfilter/nf_reject_ipv6.c
|
||||
net/ipv6/netfilter/nf_socket_ipv6.c
|
||||
net/ipv6/netfilter/nf_tproxy_ipv6.c
|
||||
@@ -5402,6 +5400,7 @@ net/netfilter/xt_statistic.c
|
||||
net/netfilter/xt_string.c
|
||||
net/netfilter/xt_TCPMSS.c
|
||||
net/netfilter/xt_tcpudp.c
|
||||
net/netfilter/xt_TEE.c
|
||||
net/netfilter/xt_time.c
|
||||
net/netfilter/xt_TPROXY.c
|
||||
net/netfilter/xt_TRACE.c
|
||||
|
@@ -556,55 +556,47 @@ EXPORT_SYMBOL(fsync_bdev);
|
||||
* count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
|
||||
* actually.
|
||||
*/
|
||||
struct super_block *freeze_bdev(struct block_device *bdev)
|
||||
int freeze_bdev(struct block_device *bdev)
|
||||
{
|
||||
struct super_block *sb;
|
||||
int error = 0;
|
||||
|
||||
mutex_lock(&bdev->bd_fsfreeze_mutex);
|
||||
if (++bdev->bd_fsfreeze_count > 1) {
|
||||
/*
|
||||
* We don't even need to grab a reference - the first call
|
||||
* to freeze_bdev grab an active reference and only the last
|
||||
* thaw_bdev drops it.
|
||||
*/
|
||||
sb = get_super(bdev);
|
||||
if (sb)
|
||||
drop_super(sb);
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
return sb;
|
||||
}
|
||||
if (++bdev->bd_fsfreeze_count > 1)
|
||||
goto done;
|
||||
|
||||
sb = get_active_super(bdev);
|
||||
if (!sb)
|
||||
goto out;
|
||||
goto sync;
|
||||
if (sb->s_op->freeze_super)
|
||||
error = sb->s_op->freeze_super(sb);
|
||||
else
|
||||
error = freeze_super(sb);
|
||||
if (error) {
|
||||
deactivate_super(sb);
|
||||
bdev->bd_fsfreeze_count--;
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
deactivate_super(sb);
|
||||
out:
|
||||
|
||||
if (error) {
|
||||
bdev->bd_fsfreeze_count--;
|
||||
goto done;
|
||||
}
|
||||
bdev->bd_fsfreeze_sb = sb;
|
||||
|
||||
sync:
|
||||
sync_blockdev(bdev);
|
||||
done:
|
||||
mutex_unlock(&bdev->bd_fsfreeze_mutex);
|
||||
return sb; /* thaw_bdev releases s->s_umount */
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL(freeze_bdev);
|
||||
|
||||
/**
|
||||
* thaw_bdev -- unlock filesystem
|
||||
* @bdev: blockdevice to unlock
|
||||
* @sb: associated superblock
|
||||
*
|
||||
* Unlocks the filesystem and marks it writeable again after freeze_bdev().
|
||||
*/
|
||||
int thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
||||
int thaw_bdev(struct block_device *bdev)
|
||||
{
|
||||
struct super_block *sb;
|
||||
int error = -EINVAL;
|
||||
|
||||
mutex_lock(&bdev->bd_fsfreeze_mutex);
|
||||
@@ -615,6 +607,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
|
||||
if (--bdev->bd_fsfreeze_count > 0)
|
||||
goto out;
|
||||
|
||||
sb = bdev->bd_fsfreeze_sb;
|
||||
if (!sb)
|
||||
goto out;
|
||||
|
||||
|
@@ -524,7 +524,7 @@ repeat:
|
||||
|
||||
void emergency_thaw_bdev(struct super_block *sb)
|
||||
{
|
||||
while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
|
||||
while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
|
||||
printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
|
||||
}
|
||||
|
||||
|
@@ -624,7 +624,7 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
|
||||
case EXT4_GOING_FLAGS_DEFAULT:
|
||||
freeze_bdev(sb->s_bdev);
|
||||
set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
|
||||
thaw_bdev(sb->s_bdev, sb);
|
||||
thaw_bdev(sb->s_bdev);
|
||||
break;
|
||||
case EXT4_GOING_FLAGS_LOGFLUSH:
|
||||
set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
|
||||
@@ -1309,6 +1309,12 @@ out:
|
||||
return -EOPNOTSUPP;
|
||||
return fsverity_ioctl_measure(filp, (void __user *)arg);
|
||||
|
||||
case FS_IOC_READ_VERITY_METADATA:
|
||||
if (!ext4_has_feature_verity(sb))
|
||||
return -EOPNOTSUPP;
|
||||
return fsverity_ioctl_read_metadata(filp,
|
||||
(const void __user *)arg);
|
||||
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
@@ -1391,6 +1397,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
case FS_IOC_GETFSMAP:
|
||||
case FS_IOC_ENABLE_VERITY:
|
||||
case FS_IOC_MEASURE_VERITY:
|
||||
case FS_IOC_READ_VERITY_METADATA:
|
||||
case EXT4_IOC_CLEAR_ES_CACHE:
|
||||
case EXT4_IOC_GETSTATE:
|
||||
case EXT4_IOC_GET_ES_CACHE:
|
||||
|
@@ -76,16 +76,6 @@ config F2FS_CHECK_FS
|
||||
|
||||
If you want to improve the performance, say N.
|
||||
|
||||
config F2FS_IO_TRACE
|
||||
bool "F2FS IO tracer"
|
||||
depends on F2FS_FS
|
||||
depends on FUNCTION_TRACER
|
||||
help
|
||||
F2FS IO trace is based on a function trace, which gathers process
|
||||
information and block IO patterns in the filesystem level.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config F2FS_FAULT_INJECTION
|
||||
bool "F2FS fault injection facility"
|
||||
depends on F2FS_FS
|
||||
@@ -119,6 +109,16 @@ config F2FS_FS_LZ4
|
||||
help
|
||||
Support LZ4 compress algorithm, if unsure, say Y.
|
||||
|
||||
config F2FS_FS_LZ4HC
|
||||
bool "LZ4HC compression support"
|
||||
depends on F2FS_FS_COMPRESSION
|
||||
depends on F2FS_FS_LZ4
|
||||
select LZ4HC_COMPRESS
|
||||
default y
|
||||
help
|
||||
Support LZ4HC compress algorithm, LZ4HC has compatible on-disk
|
||||
layout with LZ4, if unsure, say Y.
|
||||
|
||||
config F2FS_FS_ZSTD
|
||||
bool "ZSTD compression support"
|
||||
depends on F2FS_FS_COMPRESSION
|
||||
|
@@ -7,6 +7,5 @@ f2fs-y += shrinker.o extent_cache.o sysfs.o
|
||||
f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
|
||||
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
|
||||
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
|
||||
f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
|
||||
f2fs-$(CONFIG_FS_VERITY) += verity.o
|
||||
f2fs-$(CONFIG_F2FS_FS_COMPRESSION) += compress.o
|
||||
|
@@ -200,6 +200,27 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
|
||||
return __f2fs_get_acl(inode, type, NULL);
|
||||
}
|
||||
|
||||
static int f2fs_acl_update_mode(struct inode *inode, umode_t *mode_p,
|
||||
struct posix_acl **acl)
|
||||
{
|
||||
umode_t mode = inode->i_mode;
|
||||
int error;
|
||||
|
||||
if (is_inode_flag_set(inode, FI_ACL_MODE))
|
||||
mode = F2FS_I(inode)->i_acl_mode;
|
||||
|
||||
error = posix_acl_equiv_mode(*acl, &mode);
|
||||
if (error < 0)
|
||||
return error;
|
||||
if (error == 0)
|
||||
*acl = NULL;
|
||||
if (!in_group_p(inode->i_gid) &&
|
||||
!capable_wrt_inode_uidgid(inode, CAP_FSETID))
|
||||
mode &= ~S_ISGID;
|
||||
*mode_p = mode;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __f2fs_set_acl(struct inode *inode, int type,
|
||||
struct posix_acl *acl, struct page *ipage)
|
||||
{
|
||||
@@ -213,7 +234,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
|
||||
case ACL_TYPE_ACCESS:
|
||||
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
|
||||
if (acl && !ipage) {
|
||||
error = posix_acl_update_mode(inode, &mode, &acl);
|
||||
error = f2fs_acl_update_mode(inode, &mode, &acl);
|
||||
if (error)
|
||||
return error;
|
||||
set_acl_inode(inode, mode);
|
||||
|
@@ -13,13 +13,15 @@
|
||||
#include <linux/f2fs_fs.h>
|
||||
#include <linux/pagevec.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "f2fs.h"
|
||||
#include "node.h"
|
||||
#include "segment.h"
|
||||
#include "trace.h"
|
||||
#include <trace/events/f2fs.h>
|
||||
|
||||
#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
|
||||
|
||||
static struct kmem_cache *ino_entry_slab;
|
||||
struct kmem_cache *f2fs_inode_entry_slab;
|
||||
|
||||
@@ -443,7 +445,6 @@ static int f2fs_set_meta_page_dirty(struct page *page)
|
||||
__set_page_dirty_nobuffers(page);
|
||||
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
|
||||
f2fs_set_page_private(page, 0);
|
||||
f2fs_trace_pid(page);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@@ -1017,7 +1018,6 @@ void f2fs_update_dirty_page(struct inode *inode, struct page *page)
|
||||
spin_unlock(&sbi->inode_lock[type]);
|
||||
|
||||
f2fs_set_page_private(page, 0);
|
||||
f2fs_trace_pid(page);
|
||||
}
|
||||
|
||||
void f2fs_remove_dirty_inode(struct inode *inode)
|
||||
@@ -1387,8 +1387,7 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi,
|
||||
|
||||
static inline u64 get_sectors_written(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_part ?
|
||||
(u64)part_stat_read(bdev->bd_part, sectors[STAT_WRITE]) : 0;
|
||||
return (u64)part_stat_read(bdev->bd_part, sectors[STAT_WRITE]);
|
||||
}
|
||||
|
||||
u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi)
|
||||
@@ -1708,3 +1707,174 @@ void f2fs_destroy_checkpoint_caches(void)
|
||||
kmem_cache_destroy(ino_entry_slab);
|
||||
kmem_cache_destroy(f2fs_inode_entry_slab);
|
||||
}
|
||||
|
||||
static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
struct cp_control cpc = { .reason = CP_SYNC, };
|
||||
int err;
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
err = f2fs_write_checkpoint(sbi, &cpc);
|
||||
up_write(&sbi->gc_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
struct ckpt_req_control *cprc = &sbi->cprc_info;
|
||||
struct ckpt_req *req, *next;
|
||||
struct llist_node *dispatch_list;
|
||||
u64 sum_diff = 0, diff, count = 0;
|
||||
int ret;
|
||||
|
||||
dispatch_list = llist_del_all(&cprc->issue_list);
|
||||
if (!dispatch_list)
|
||||
return;
|
||||
dispatch_list = llist_reverse_order(dispatch_list);
|
||||
|
||||
ret = __write_checkpoint_sync(sbi);
|
||||
atomic_inc(&cprc->issued_ckpt);
|
||||
|
||||
llist_for_each_entry_safe(req, next, dispatch_list, llnode) {
|
||||
diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time);
|
||||
req->ret = ret;
|
||||
complete(&req->wait);
|
||||
|
||||
sum_diff += diff;
|
||||
count++;
|
||||
}
|
||||
atomic_sub(count, &cprc->queued_ckpt);
|
||||
atomic_add(count, &cprc->total_ckpt);
|
||||
|
||||
spin_lock(&cprc->stat_lock);
|
||||
cprc->cur_time = (unsigned int)div64_u64(sum_diff, count);
|
||||
if (cprc->peak_time < cprc->cur_time)
|
||||
cprc->peak_time = cprc->cur_time;
|
||||
spin_unlock(&cprc->stat_lock);
|
||||
}
|
||||
|
||||
static int issue_checkpoint_thread(void *data)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = data;
|
||||
struct ckpt_req_control *cprc = &sbi->cprc_info;
|
||||
wait_queue_head_t *q = &cprc->ckpt_wait_queue;
|
||||
repeat:
|
||||
if (kthread_should_stop())
|
||||
return 0;
|
||||
|
||||
if (!llist_empty(&cprc->issue_list))
|
||||
__checkpoint_and_complete_reqs(sbi);
|
||||
|
||||
wait_event_interruptible(*q,
|
||||
kthread_should_stop() || !llist_empty(&cprc->issue_list));
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
static void flush_remained_ckpt_reqs(struct f2fs_sb_info *sbi,
|
||||
struct ckpt_req *wait_req)
|
||||
{
|
||||
struct ckpt_req_control *cprc = &sbi->cprc_info;
|
||||
|
||||
if (!llist_empty(&cprc->issue_list)) {
|
||||
__checkpoint_and_complete_reqs(sbi);
|
||||
} else {
|
||||
/* already dispatched by issue_checkpoint_thread */
|
||||
if (wait_req)
|
||||
wait_for_completion(&wait_req->wait);
|
||||
}
|
||||
}
|
||||
|
||||
static void init_ckpt_req(struct ckpt_req *req)
|
||||
{
|
||||
memset(req, 0, sizeof(struct ckpt_req));
|
||||
|
||||
init_completion(&req->wait);
|
||||
req->queue_time = ktime_get();
|
||||
}
|
||||
|
||||
int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
struct ckpt_req_control *cprc = &sbi->cprc_info;
|
||||
struct ckpt_req req;
|
||||
struct cp_control cpc;
|
||||
|
||||
cpc.reason = __get_cp_reason(sbi);
|
||||
if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
|
||||
int ret;
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
ret = f2fs_write_checkpoint(sbi, &cpc);
|
||||
up_write(&sbi->gc_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!cprc->f2fs_issue_ckpt)
|
||||
return __write_checkpoint_sync(sbi);
|
||||
|
||||
init_ckpt_req(&req);
|
||||
|
||||
llist_add(&req.llnode, &cprc->issue_list);
|
||||
atomic_inc(&cprc->queued_ckpt);
|
||||
|
||||
/* update issue_list before we wake up issue_checkpoint thread */
|
||||
smp_mb();
|
||||
|
||||
if (waitqueue_active(&cprc->ckpt_wait_queue))
|
||||
wake_up(&cprc->ckpt_wait_queue);
|
||||
|
||||
if (cprc->f2fs_issue_ckpt)
|
||||
wait_for_completion(&req.wait);
|
||||
else
|
||||
flush_remained_ckpt_reqs(sbi, &req);
|
||||
|
||||
return req.ret;
|
||||
}
|
||||
|
||||
int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
dev_t dev = sbi->sb->s_bdev->bd_dev;
|
||||
struct ckpt_req_control *cprc = &sbi->cprc_info;
|
||||
|
||||
if (cprc->f2fs_issue_ckpt)
|
||||
return 0;
|
||||
|
||||
cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi,
|
||||
"f2fs_ckpt-%u:%u", MAJOR(dev), MINOR(dev));
|
||||
if (IS_ERR(cprc->f2fs_issue_ckpt)) {
|
||||
cprc->f2fs_issue_ckpt = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
set_task_ioprio(cprc->f2fs_issue_ckpt, cprc->ckpt_thread_ioprio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
struct ckpt_req_control *cprc = &sbi->cprc_info;
|
||||
|
||||
if (cprc->f2fs_issue_ckpt) {
|
||||
struct task_struct *ckpt_task = cprc->f2fs_issue_ckpt;
|
||||
|
||||
cprc->f2fs_issue_ckpt = NULL;
|
||||
kthread_stop(ckpt_task);
|
||||
|
||||
flush_remained_ckpt_reqs(sbi, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
struct ckpt_req_control *cprc = &sbi->cprc_info;
|
||||
|
||||
atomic_set(&cprc->issued_ckpt, 0);
|
||||
atomic_set(&cprc->total_ckpt, 0);
|
||||
atomic_set(&cprc->queued_ckpt, 0);
|
||||
cprc->ckpt_thread_ioprio = DEFAULT_CHECKPOINT_IOPRIO;
|
||||
init_waitqueue_head(&cprc->ckpt_wait_queue);
|
||||
init_llist_head(&cprc->issue_list);
|
||||
spin_lock_init(&cprc->stat_lock);
|
||||
}
|
||||
|
@@ -252,8 +252,14 @@ static const struct f2fs_compress_ops f2fs_lzo_ops = {
|
||||
#ifdef CONFIG_F2FS_FS_LZ4
|
||||
static int lz4_init_compress_ctx(struct compress_ctx *cc)
|
||||
{
|
||||
cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
|
||||
LZ4_MEM_COMPRESS, GFP_NOFS);
|
||||
unsigned int size = LZ4_MEM_COMPRESS;
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_LZ4HC
|
||||
if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
|
||||
size = LZ4HC_MEM_COMPRESS;
|
||||
#endif
|
||||
|
||||
cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
|
||||
if (!cc->private)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -272,10 +278,34 @@ static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
|
||||
cc->private = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_LZ4HC
|
||||
static int lz4hc_compress_pages(struct compress_ctx *cc)
|
||||
{
|
||||
unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
|
||||
COMPRESS_LEVEL_OFFSET;
|
||||
int len;
|
||||
|
||||
if (level)
|
||||
len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
|
||||
cc->clen, level, cc->private);
|
||||
else
|
||||
len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
|
||||
cc->clen, cc->private);
|
||||
if (!len)
|
||||
return -EAGAIN;
|
||||
|
||||
cc->clen = len;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int lz4_compress_pages(struct compress_ctx *cc)
|
||||
{
|
||||
int len;
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_LZ4HC
|
||||
return lz4hc_compress_pages(cc);
|
||||
#endif
|
||||
len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
|
||||
cc->clen, cc->private);
|
||||
if (!len)
|
||||
@@ -325,8 +355,13 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
|
||||
ZSTD_CStream *stream;
|
||||
void *workspace;
|
||||
unsigned int workspace_size;
|
||||
unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
|
||||
COMPRESS_LEVEL_OFFSET;
|
||||
|
||||
params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
|
||||
if (!level)
|
||||
level = F2FS_ZSTD_DEFAULT_CLEVEL;
|
||||
|
||||
params = ZSTD_getParams(level, cc->rlen, 0);
|
||||
workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
|
||||
|
||||
workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
|
||||
@@ -721,38 +756,27 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
|
||||
static void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
|
||||
{
|
||||
struct decompress_io_ctx *dic =
|
||||
(struct decompress_io_ctx *)page_private(page);
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
|
||||
struct f2fs_inode_info *fi= F2FS_I(dic->inode);
|
||||
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
|
||||
const struct f2fs_compress_ops *cops =
|
||||
f2fs_cops[fi->i_compress_algorithm];
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
dec_page_count(sbi, F2FS_RD_DATA);
|
||||
|
||||
if (bio->bi_status || PageError(page))
|
||||
dic->failed = true;
|
||||
|
||||
if (atomic_dec_return(&dic->pending_pages))
|
||||
return;
|
||||
|
||||
trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
|
||||
dic->cluster_size, fi->i_compress_algorithm);
|
||||
|
||||
/* submit partial compressed pages */
|
||||
if (dic->failed) {
|
||||
ret = -EIO;
|
||||
goto out_free_dic;
|
||||
goto out_end_io;
|
||||
}
|
||||
|
||||
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
|
||||
if (!dic->tpages) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_dic;
|
||||
goto out_end_io;
|
||||
}
|
||||
|
||||
for (i = 0; i < dic->cluster_size; i++) {
|
||||
@@ -764,20 +788,20 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
|
||||
dic->tpages[i] = f2fs_compress_alloc_page();
|
||||
if (!dic->tpages[i]) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_dic;
|
||||
goto out_end_io;
|
||||
}
|
||||
}
|
||||
|
||||
if (cops->init_decompress_ctx) {
|
||||
ret = cops->init_decompress_ctx(dic);
|
||||
if (ret)
|
||||
goto out_free_dic;
|
||||
goto out_end_io;
|
||||
}
|
||||
|
||||
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
|
||||
if (!dic->rbuf) {
|
||||
ret = -ENOMEM;
|
||||
goto destroy_decompress_ctx;
|
||||
goto out_destroy_decompress_ctx;
|
||||
}
|
||||
|
||||
dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
|
||||
@@ -816,18 +840,34 @@ out_vunmap_cbuf:
|
||||
vm_unmap_ram(dic->cbuf, dic->nr_cpages);
|
||||
out_vunmap_rbuf:
|
||||
vm_unmap_ram(dic->rbuf, dic->cluster_size);
|
||||
destroy_decompress_ctx:
|
||||
out_destroy_decompress_ctx:
|
||||
if (cops->destroy_decompress_ctx)
|
||||
cops->destroy_decompress_ctx(dic);
|
||||
out_free_dic:
|
||||
if (!verity)
|
||||
f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
|
||||
ret, false);
|
||||
|
||||
out_end_io:
|
||||
trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
|
||||
dic->clen, ret);
|
||||
if (!verity)
|
||||
f2fs_free_dic(dic);
|
||||
f2fs_decompress_end_io(dic, ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called when a page of a compressed cluster has been read from disk
|
||||
* (or failed to be read from disk). It checks whether this page was the last
|
||||
* page being waited on in the cluster, and if so, it decompresses the cluster
|
||||
* (or in the case of a failure, cleans up without actually decompressing).
|
||||
*/
|
||||
void f2fs_end_read_compressed_page(struct page *page, bool failed)
|
||||
{
|
||||
struct decompress_io_ctx *dic =
|
||||
(struct decompress_io_ctx *)page_private(page);
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
|
||||
|
||||
dec_page_count(sbi, F2FS_RD_DATA);
|
||||
|
||||
if (failed)
|
||||
WRITE_ONCE(dic->failed, true);
|
||||
|
||||
if (atomic_dec_and_test(&dic->remaining_pages))
|
||||
f2fs_decompress_cluster(dic);
|
||||
}
|
||||
|
||||
static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
|
||||
@@ -1416,7 +1456,7 @@ retry_write:
|
||||
|
||||
ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
|
||||
NULL, NULL, wbc, io_type,
|
||||
compr_blocks);
|
||||
compr_blocks, false);
|
||||
if (ret) {
|
||||
if (ret == AOP_WRITEPAGE_ACTIVATE) {
|
||||
unlock_page(cc->rpages[i]);
|
||||
@@ -1451,6 +1491,9 @@ retry_write:
|
||||
|
||||
*submitted += _submitted;
|
||||
}
|
||||
|
||||
f2fs_balance_fs(F2FS_M_SB(mapping), true);
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
for (++i; i < cc->cluster_size; i++) {
|
||||
@@ -1495,6 +1538,8 @@ destroy_out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void f2fs_free_dic(struct decompress_io_ctx *dic);
|
||||
|
||||
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
|
||||
{
|
||||
struct decompress_io_ctx *dic;
|
||||
@@ -1513,12 +1558,14 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
|
||||
|
||||
dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
|
||||
dic->inode = cc->inode;
|
||||
atomic_set(&dic->pending_pages, cc->nr_cpages);
|
||||
atomic_set(&dic->remaining_pages, cc->nr_cpages);
|
||||
dic->cluster_idx = cc->cluster_idx;
|
||||
dic->cluster_size = cc->cluster_size;
|
||||
dic->log_cluster_size = cc->log_cluster_size;
|
||||
dic->nr_cpages = cc->nr_cpages;
|
||||
refcount_set(&dic->refcnt, 1);
|
||||
dic->failed = false;
|
||||
dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
|
||||
|
||||
for (i = 0; i < dic->cluster_size; i++)
|
||||
dic->rpages[i] = cc->rpages[i];
|
||||
@@ -1547,7 +1594,7 @@ out_free:
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
void f2fs_free_dic(struct decompress_io_ctx *dic)
|
||||
static void f2fs_free_dic(struct decompress_io_ctx *dic)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -1575,30 +1622,88 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
|
||||
kmem_cache_free(dic_entry_slab, dic);
|
||||
}
|
||||
|
||||
void f2fs_decompress_end_io(struct page **rpages,
|
||||
unsigned int cluster_size, bool err, bool verity)
|
||||
static void f2fs_put_dic(struct decompress_io_ctx *dic)
|
||||
{
|
||||
if (refcount_dec_and_test(&dic->refcnt))
|
||||
f2fs_free_dic(dic);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update and unlock the cluster's pagecache pages, and release the reference to
|
||||
* the decompress_io_ctx that was being held for I/O completion.
|
||||
*/
|
||||
static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cluster_size; i++) {
|
||||
struct page *rpage = rpages[i];
|
||||
for (i = 0; i < dic->cluster_size; i++) {
|
||||
struct page *rpage = dic->rpages[i];
|
||||
|
||||
if (!rpage)
|
||||
continue;
|
||||
|
||||
if (err || PageError(rpage))
|
||||
goto clear_uptodate;
|
||||
|
||||
if (!verity || fsverity_verify_page(rpage)) {
|
||||
/* PG_error was set if verity failed. */
|
||||
if (failed || PageError(rpage)) {
|
||||
ClearPageUptodate(rpage);
|
||||
/* will re-read again later */
|
||||
ClearPageError(rpage);
|
||||
} else {
|
||||
SetPageUptodate(rpage);
|
||||
goto unlock;
|
||||
}
|
||||
clear_uptodate:
|
||||
ClearPageUptodate(rpage);
|
||||
ClearPageError(rpage);
|
||||
unlock:
|
||||
unlock_page(rpage);
|
||||
}
|
||||
|
||||
f2fs_put_dic(dic);
|
||||
}
|
||||
|
||||
static void f2fs_verify_cluster(struct work_struct *work)
|
||||
{
|
||||
struct decompress_io_ctx *dic =
|
||||
container_of(work, struct decompress_io_ctx, verity_work);
|
||||
int i;
|
||||
|
||||
/* Verify the cluster's decompressed pages with fs-verity. */
|
||||
for (i = 0; i < dic->cluster_size; i++) {
|
||||
struct page *rpage = dic->rpages[i];
|
||||
|
||||
if (rpage && !fsverity_verify_page(rpage))
|
||||
SetPageError(rpage);
|
||||
}
|
||||
|
||||
__f2fs_decompress_end_io(dic, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called when a compressed cluster has been decompressed
|
||||
* (or failed to be read and/or decompressed).
|
||||
*/
|
||||
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
|
||||
{
|
||||
if (!failed && dic->need_verity) {
|
||||
/*
|
||||
* Note that to avoid deadlocks, the verity work can't be done
|
||||
* on the decompression workqueue. This is because verifying
|
||||
* the data pages can involve reading metadata pages from the
|
||||
* file, and these metadata pages may be compressed.
|
||||
*/
|
||||
INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
|
||||
fsverity_enqueue_verify_work(&dic->verity_work);
|
||||
} else {
|
||||
__f2fs_decompress_end_io(dic, failed);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Put a reference to a compressed page's decompress_io_ctx.
|
||||
*
|
||||
* This is called when the page is no longer needed and can be freed.
|
||||
*/
|
||||
void f2fs_put_page_dic(struct page *page)
|
||||
{
|
||||
struct decompress_io_ctx *dic =
|
||||
(struct decompress_io_ctx *)page_private(page);
|
||||
|
||||
f2fs_put_dic(dic);
|
||||
}
|
||||
|
||||
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
|
||||
|
450
fs/f2fs/data.c
450
fs/f2fs/data.c
@@ -25,7 +25,6 @@
|
||||
#include "f2fs.h"
|
||||
#include "node.h"
|
||||
#include "segment.h"
|
||||
#include "trace.h"
|
||||
#include <trace/events/f2fs.h>
|
||||
#include <trace/events/android_fs.h>
|
||||
|
||||
@@ -51,27 +50,6 @@ void f2fs_destroy_bioset(void)
|
||||
bioset_exit(&f2fs_bioset);
|
||||
}
|
||||
|
||||
static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
|
||||
unsigned int nr_iovecs)
|
||||
{
|
||||
return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
|
||||
}
|
||||
|
||||
struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio)
|
||||
{
|
||||
if (noio) {
|
||||
/* No failure on bio allocation */
|
||||
return __f2fs_bio_alloc(GFP_NOIO, npages);
|
||||
}
|
||||
|
||||
if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
|
||||
f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return __f2fs_bio_alloc(GFP_KERNEL, npages);
|
||||
}
|
||||
|
||||
static bool __is_cp_guaranteed(struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
@@ -116,10 +94,21 @@ static enum count_type __read_io_type(struct page *page)
|
||||
|
||||
/* postprocessing steps for read bios */
|
||||
enum bio_post_read_step {
|
||||
STEP_DECRYPT,
|
||||
STEP_DECOMPRESS_NOWQ, /* handle normal cluster data inplace */
|
||||
STEP_DECOMPRESS, /* handle compressed cluster data in workqueue */
|
||||
STEP_VERITY,
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
STEP_DECRYPT = 1 << 0,
|
||||
#else
|
||||
STEP_DECRYPT = 0, /* compile out the decryption-related code */
|
||||
#endif
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
STEP_DECOMPRESS = 1 << 1,
|
||||
#else
|
||||
STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
|
||||
#endif
|
||||
#ifdef CONFIG_FS_VERITY
|
||||
STEP_VERITY = 1 << 2,
|
||||
#else
|
||||
STEP_VERITY = 0, /* compile out the verity-related code */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bio_post_read_ctx {
|
||||
@@ -129,25 +118,26 @@ struct bio_post_read_ctx {
|
||||
unsigned int enabled_steps;
|
||||
};
|
||||
|
||||
static void __read_end_io(struct bio *bio, bool compr, bool verity)
|
||||
static void f2fs_finish_read_bio(struct bio *bio)
|
||||
{
|
||||
struct page *page;
|
||||
struct bio_vec *bv;
|
||||
struct bvec_iter_all iter_all;
|
||||
|
||||
/*
|
||||
* Update and unlock the bio's pagecache pages, and put the
|
||||
* decompression context for any compressed pages.
|
||||
*/
|
||||
bio_for_each_segment_all(bv, bio, iter_all) {
|
||||
page = bv->bv_page;
|
||||
struct page *page = bv->bv_page;
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (compr && f2fs_is_compressed_page(page)) {
|
||||
f2fs_decompress_pages(bio, page, verity);
|
||||
if (f2fs_is_compressed_page(page)) {
|
||||
if (bio->bi_status)
|
||||
f2fs_end_read_compressed_page(page, true);
|
||||
f2fs_put_page_dic(page);
|
||||
continue;
|
||||
}
|
||||
if (verity)
|
||||
continue;
|
||||
#endif
|
||||
|
||||
/* PG_error was set if any post_read step failed */
|
||||
/* PG_error was set if decryption or verity failed. */
|
||||
if (bio->bi_status || PageError(page)) {
|
||||
ClearPageUptodate(page);
|
||||
/* will re-read again later */
|
||||
@@ -158,106 +148,104 @@ static void __read_end_io(struct bio *bio, bool compr, bool verity)
|
||||
dec_page_count(F2FS_P_SB(page), __read_io_type(page));
|
||||
unlock_page(page);
|
||||
}
|
||||
|
||||
if (bio->bi_private)
|
||||
mempool_free(bio->bi_private, bio_post_read_ctx_pool);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static void f2fs_release_read_bio(struct bio *bio);
|
||||
static void __f2fs_read_end_io(struct bio *bio, bool compr, bool verity)
|
||||
{
|
||||
if (!compr)
|
||||
__read_end_io(bio, false, verity);
|
||||
f2fs_release_read_bio(bio);
|
||||
}
|
||||
|
||||
static void f2fs_decompress_bio(struct bio *bio, bool verity)
|
||||
{
|
||||
__read_end_io(bio, true, verity);
|
||||
}
|
||||
|
||||
static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
|
||||
|
||||
static void f2fs_decrypt_work(struct bio_post_read_ctx *ctx)
|
||||
{
|
||||
fscrypt_decrypt_bio(ctx->bio);
|
||||
}
|
||||
|
||||
static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
|
||||
{
|
||||
f2fs_decompress_bio(ctx->bio, ctx->enabled_steps & (1 << STEP_VERITY));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
|
||||
{
|
||||
f2fs_decompress_end_io(rpages, cluster_size, false, true);
|
||||
}
|
||||
|
||||
static void f2fs_verify_bio(struct bio *bio)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
struct bvec_iter_all iter_all;
|
||||
|
||||
bio_for_each_segment_all(bv, bio, iter_all) {
|
||||
struct page *page = bv->bv_page;
|
||||
struct decompress_io_ctx *dic;
|
||||
|
||||
dic = (struct decompress_io_ctx *)page_private(page);
|
||||
|
||||
if (dic) {
|
||||
if (atomic_dec_return(&dic->verity_pages))
|
||||
continue;
|
||||
f2fs_verify_pages(dic->rpages,
|
||||
dic->cluster_size);
|
||||
f2fs_free_dic(dic);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (bio->bi_status || PageError(page))
|
||||
goto clear_uptodate;
|
||||
|
||||
if (fsverity_verify_page(page)) {
|
||||
SetPageUptodate(page);
|
||||
goto unlock;
|
||||
}
|
||||
clear_uptodate:
|
||||
ClearPageUptodate(page);
|
||||
ClearPageError(page);
|
||||
unlock:
|
||||
dec_page_count(F2FS_P_SB(page), __read_io_type(page));
|
||||
unlock_page(page);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void f2fs_verity_work(struct work_struct *work)
|
||||
static void f2fs_verify_bio(struct work_struct *work)
|
||||
{
|
||||
struct bio_post_read_ctx *ctx =
|
||||
container_of(work, struct bio_post_read_ctx, work);
|
||||
struct bio *bio = ctx->bio;
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
unsigned int enabled_steps = ctx->enabled_steps;
|
||||
#endif
|
||||
bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
|
||||
|
||||
/*
|
||||
* fsverity_verify_bio() may call readpages() again, and while verity
|
||||
* will be disabled for this, decryption may still be needed, resulting
|
||||
* in another bio_post_read_ctx being allocated. So to prevent
|
||||
* deadlocks we need to release the current ctx to the mempool first.
|
||||
* This assumes that verity is the last post-read step.
|
||||
* will be disabled for this, decryption and/or decompression may still
|
||||
* be needed, resulting in another bio_post_read_ctx being allocated.
|
||||
* So to prevent deadlocks we need to release the current ctx to the
|
||||
* mempool first. This assumes that verity is the last post-read step.
|
||||
*/
|
||||
mempool_free(ctx, bio_post_read_ctx_pool);
|
||||
bio->bi_private = NULL;
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
/* previous step is decompression */
|
||||
if (enabled_steps & (1 << STEP_DECOMPRESS)) {
|
||||
f2fs_verify_bio(bio);
|
||||
f2fs_release_read_bio(bio);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Verify the bio's pages with fs-verity. Exclude compressed pages,
|
||||
* as those were handled separately by f2fs_end_read_compressed_page().
|
||||
*/
|
||||
if (may_have_compressed_pages) {
|
||||
struct bio_vec *bv;
|
||||
struct bvec_iter_all iter_all;
|
||||
|
||||
fsverity_verify_bio(bio);
|
||||
__f2fs_read_end_io(bio, false, false);
|
||||
bio_for_each_segment_all(bv, bio, iter_all) {
|
||||
struct page *page = bv->bv_page;
|
||||
|
||||
if (!f2fs_is_compressed_page(page) &&
|
||||
!PageError(page) && !fsverity_verify_page(page))
|
||||
SetPageError(page);
|
||||
}
|
||||
} else {
|
||||
fsverity_verify_bio(bio);
|
||||
}
|
||||
|
||||
f2fs_finish_read_bio(bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the bio's data needs to be verified with fs-verity, then enqueue the
|
||||
* verity work for the bio. Otherwise finish the bio now.
|
||||
*
|
||||
* Note that to avoid deadlocks, the verity work can't be done on the
|
||||
* decryption/decompression workqueue. This is because verifying the data pages
|
||||
* can involve reading verity metadata pages from the file, and these verity
|
||||
* metadata pages may be encrypted and/or compressed.
|
||||
*/
|
||||
static void f2fs_verify_and_finish_bio(struct bio *bio)
|
||||
{
|
||||
struct bio_post_read_ctx *ctx = bio->bi_private;
|
||||
|
||||
if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
|
||||
INIT_WORK(&ctx->work, f2fs_verify_bio);
|
||||
fsverity_enqueue_verify_work(&ctx->work);
|
||||
} else {
|
||||
f2fs_finish_read_bio(bio);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
|
||||
* remaining page was read by @ctx->bio.
|
||||
*
|
||||
* Note that a bio may span clusters (even a mix of compressed and uncompressed
|
||||
* clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
|
||||
* that the bio includes at least one compressed page. The actual decompression
|
||||
* is done on a per-cluster basis, not a per-bio basis.
|
||||
*/
|
||||
static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
struct bvec_iter_all iter_all;
|
||||
bool all_compressed = true;
|
||||
|
||||
bio_for_each_segment_all(bv, ctx->bio, iter_all) {
|
||||
struct page *page = bv->bv_page;
|
||||
|
||||
/* PG_error was set if decryption failed. */
|
||||
if (f2fs_is_compressed_page(page))
|
||||
f2fs_end_read_compressed_page(page, PageError(page));
|
||||
else
|
||||
all_compressed = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Optimization: if all the bio's pages are compressed, then scheduling
|
||||
* the per-bio verity work is unnecessary, as verity will be fully
|
||||
* handled at the compression cluster level.
|
||||
*/
|
||||
if (all_compressed)
|
||||
ctx->enabled_steps &= ~STEP_VERITY;
|
||||
}
|
||||
|
||||
static void f2fs_post_read_work(struct work_struct *work)
|
||||
@@ -265,74 +253,36 @@ static void f2fs_post_read_work(struct work_struct *work)
|
||||
struct bio_post_read_ctx *ctx =
|
||||
container_of(work, struct bio_post_read_ctx, work);
|
||||
|
||||
if (ctx->enabled_steps & (1 << STEP_DECRYPT))
|
||||
f2fs_decrypt_work(ctx);
|
||||
if (ctx->enabled_steps & STEP_DECRYPT)
|
||||
fscrypt_decrypt_bio(ctx->bio);
|
||||
|
||||
if (ctx->enabled_steps & (1 << STEP_DECOMPRESS))
|
||||
f2fs_decompress_work(ctx);
|
||||
if (ctx->enabled_steps & STEP_DECOMPRESS)
|
||||
f2fs_handle_step_decompress(ctx);
|
||||
|
||||
if (ctx->enabled_steps & (1 << STEP_VERITY)) {
|
||||
INIT_WORK(&ctx->work, f2fs_verity_work);
|
||||
fsverity_enqueue_verify_work(&ctx->work);
|
||||
return;
|
||||
}
|
||||
|
||||
__f2fs_read_end_io(ctx->bio,
|
||||
ctx->enabled_steps & (1 << STEP_DECOMPRESS), false);
|
||||
}
|
||||
|
||||
static void f2fs_enqueue_post_read_work(struct f2fs_sb_info *sbi,
|
||||
struct work_struct *work)
|
||||
{
|
||||
queue_work(sbi->post_read_wq, work);
|
||||
}
|
||||
|
||||
static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
|
||||
{
|
||||
/*
|
||||
* We use different work queues for decryption and for verity because
|
||||
* verity may require reading metadata pages that need decryption, and
|
||||
* we shouldn't recurse to the same workqueue.
|
||||
*/
|
||||
|
||||
if (ctx->enabled_steps & (1 << STEP_DECRYPT) ||
|
||||
ctx->enabled_steps & (1 << STEP_DECOMPRESS)) {
|
||||
INIT_WORK(&ctx->work, f2fs_post_read_work);
|
||||
f2fs_enqueue_post_read_work(ctx->sbi, &ctx->work);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ctx->enabled_steps & (1 << STEP_VERITY)) {
|
||||
INIT_WORK(&ctx->work, f2fs_verity_work);
|
||||
fsverity_enqueue_verify_work(&ctx->work);
|
||||
return;
|
||||
}
|
||||
|
||||
__f2fs_read_end_io(ctx->bio, false, false);
|
||||
}
|
||||
|
||||
static bool f2fs_bio_post_read_required(struct bio *bio)
|
||||
{
|
||||
return bio->bi_private;
|
||||
f2fs_verify_and_finish_bio(ctx->bio);
|
||||
}
|
||||
|
||||
static void f2fs_read_end_io(struct bio *bio)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
|
||||
struct bio_post_read_ctx *ctx = bio->bi_private;
|
||||
|
||||
if (time_to_inject(sbi, FAULT_READ_IO)) {
|
||||
f2fs_show_injection_info(sbi, FAULT_READ_IO);
|
||||
bio->bi_status = BLK_STS_IOERR;
|
||||
}
|
||||
|
||||
if (f2fs_bio_post_read_required(bio)) {
|
||||
struct bio_post_read_ctx *ctx = bio->bi_private;
|
||||
|
||||
bio_post_read_processing(ctx);
|
||||
if (bio->bi_status) {
|
||||
f2fs_finish_read_bio(bio);
|
||||
return;
|
||||
}
|
||||
|
||||
__f2fs_read_end_io(bio, false, false);
|
||||
if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
|
||||
INIT_WORK(&ctx->work, f2fs_post_read_work);
|
||||
queue_work(ctx->sbi->post_read_wq, &ctx->work);
|
||||
} else {
|
||||
f2fs_verify_and_finish_bio(bio);
|
||||
}
|
||||
}
|
||||
|
||||
static void f2fs_write_end_io(struct bio *bio)
|
||||
@@ -443,7 +393,7 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
|
||||
struct f2fs_sb_info *sbi = fio->sbi;
|
||||
struct bio *bio;
|
||||
|
||||
bio = f2fs_bio_alloc(sbi, npages, true);
|
||||
bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
|
||||
|
||||
f2fs_target_device(sbi, fio->new_blkaddr, bio);
|
||||
if (is_read_io(fio->op)) {
|
||||
@@ -504,7 +454,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
|
||||
if (f2fs_lfs_mode(sbi) && current->plug)
|
||||
blk_finish_plug(current->plug);
|
||||
|
||||
if (F2FS_IO_ALIGNED(sbi))
|
||||
if (!F2FS_IO_ALIGNED(sbi))
|
||||
goto submit_io;
|
||||
|
||||
start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
|
||||
@@ -712,7 +662,6 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
trace_f2fs_submit_page_bio(page, fio);
|
||||
f2fs_trace_ios(fio, 0);
|
||||
|
||||
/* Allocate a new bio */
|
||||
bio = __bio_alloc(fio, 1);
|
||||
@@ -917,7 +866,6 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
||||
return -EFSCORRUPTED;
|
||||
|
||||
trace_f2fs_submit_page_bio(page, fio);
|
||||
f2fs_trace_ios(fio, 0);
|
||||
|
||||
if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
|
||||
fio->new_blkaddr))
|
||||
@@ -1014,7 +962,6 @@ alloc_new:
|
||||
wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
|
||||
|
||||
io->last_block_in_bio = fio->new_blkaddr;
|
||||
f2fs_trace_ios(fio, 0);
|
||||
|
||||
trace_f2fs_submit_page_write(fio->page, fio);
|
||||
skip:
|
||||
@@ -1027,24 +974,18 @@ out:
|
||||
up_write(&io->io_rwsem);
|
||||
}
|
||||
|
||||
static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
|
||||
{
|
||||
return fsverity_active(inode) &&
|
||||
idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
|
||||
unsigned nr_pages, unsigned op_flag,
|
||||
pgoff_t first_idx, bool for_write,
|
||||
bool for_verity)
|
||||
pgoff_t first_idx, bool for_write)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
struct bio *bio;
|
||||
struct bio_post_read_ctx *ctx;
|
||||
unsigned int post_read_steps = 0;
|
||||
|
||||
bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES),
|
||||
for_write);
|
||||
bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
|
||||
min_t(int, nr_pages, BIO_MAX_PAGES),
|
||||
&f2fs_bioset);
|
||||
if (!bio)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@@ -1055,13 +996,19 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
|
||||
|
||||
if (fscrypt_inode_uses_fs_layer_crypto(inode))
|
||||
post_read_steps |= 1 << STEP_DECRYPT;
|
||||
if (f2fs_compressed_file(inode))
|
||||
post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
|
||||
if (for_verity && f2fs_need_verity(inode, first_idx))
|
||||
post_read_steps |= 1 << STEP_VERITY;
|
||||
post_read_steps |= STEP_DECRYPT;
|
||||
|
||||
if (post_read_steps) {
|
||||
if (f2fs_need_verity(inode, first_idx))
|
||||
post_read_steps |= STEP_VERITY;
|
||||
|
||||
/*
|
||||
* STEP_DECOMPRESS is handled specially, since a compressed file might
|
||||
* contain both compressed and uncompressed clusters. We'll allocate a
|
||||
* bio_post_read_ctx if the file is compressed, but the caller is
|
||||
* responsible for enabling STEP_DECOMPRESS if it's actually needed.
|
||||
*/
|
||||
|
||||
if (post_read_steps || f2fs_compressed_file(inode)) {
|
||||
/* Due to the mempool, this never fails. */
|
||||
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
|
||||
ctx->bio = bio;
|
||||
@@ -1073,13 +1020,6 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
|
||||
return bio;
|
||||
}
|
||||
|
||||
static void f2fs_release_read_bio(struct bio *bio)
|
||||
{
|
||||
if (bio->bi_private)
|
||||
mempool_free(bio->bi_private, bio_post_read_ctx_pool);
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
/* This can handle encryption stuffs */
|
||||
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
|
||||
block_t blkaddr, int op_flags, bool for_write)
|
||||
@@ -1088,7 +1028,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
|
||||
struct bio *bio;
|
||||
|
||||
bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
|
||||
page->index, for_write, true);
|
||||
page->index, for_write);
|
||||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
@@ -1969,6 +1909,7 @@ next:
|
||||
}
|
||||
|
||||
if (size) {
|
||||
flags |= FIEMAP_EXTENT_MERGED;
|
||||
if (IS_ENCRYPTED(inode))
|
||||
flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
|
||||
|
||||
@@ -2126,7 +2067,7 @@ submit_and_realloc:
|
||||
if (bio == NULL) {
|
||||
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
|
||||
is_readahead ? REQ_RAHEAD : 0, page->index,
|
||||
false, true);
|
||||
false);
|
||||
if (IS_ERR(bio)) {
|
||||
ret = PTR_ERR(bio);
|
||||
bio = NULL;
|
||||
@@ -2172,8 +2113,6 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
sector_t last_block_in_file;
|
||||
const unsigned blocksize = blks_to_bytes(inode, 1);
|
||||
struct decompress_io_ctx *dic = NULL;
|
||||
struct bio_post_read_ctx *ctx;
|
||||
bool for_verity = false;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
@@ -2239,29 +2178,10 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
goto out_put_dnode;
|
||||
}
|
||||
|
||||
/*
|
||||
* It's possible to enable fsverity on the fly when handling a cluster,
|
||||
* which requires complicated error handling. Instead of adding more
|
||||
* complexity, let's give a rule where end_io post-processes fsverity
|
||||
* per cluster. In order to do that, we need to submit bio, if previous
|
||||
* bio sets a different post-process policy.
|
||||
*/
|
||||
if (fsverity_active(cc->inode)) {
|
||||
atomic_set(&dic->verity_pages, cc->nr_cpages);
|
||||
for_verity = true;
|
||||
|
||||
if (bio) {
|
||||
ctx = bio->bi_private;
|
||||
if (!(ctx->enabled_steps & (1 << STEP_VERITY))) {
|
||||
__submit_bio(sbi, bio, DATA);
|
||||
bio = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < dic->nr_cpages; i++) {
|
||||
struct page *page = dic->cpages[i];
|
||||
block_t blkaddr;
|
||||
struct bio_post_read_ctx *ctx;
|
||||
|
||||
blkaddr = data_blkaddr(dn.inode, dn.node_page,
|
||||
dn.ofs_in_node + i + 1);
|
||||
@@ -2277,31 +2197,10 @@ submit_and_realloc:
|
||||
if (!bio) {
|
||||
bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
|
||||
is_readahead ? REQ_RAHEAD : 0,
|
||||
page->index, for_write, for_verity);
|
||||
page->index, for_write);
|
||||
if (IS_ERR(bio)) {
|
||||
unsigned int remained = dic->nr_cpages - i;
|
||||
bool release = false;
|
||||
|
||||
ret = PTR_ERR(bio);
|
||||
dic->failed = true;
|
||||
|
||||
if (for_verity) {
|
||||
if (!atomic_sub_return(remained,
|
||||
&dic->verity_pages))
|
||||
release = true;
|
||||
} else {
|
||||
if (!atomic_sub_return(remained,
|
||||
&dic->pending_pages))
|
||||
release = true;
|
||||
}
|
||||
|
||||
if (release) {
|
||||
f2fs_decompress_end_io(dic->rpages,
|
||||
cc->cluster_size, true,
|
||||
false);
|
||||
f2fs_free_dic(dic);
|
||||
}
|
||||
|
||||
f2fs_decompress_end_io(dic, ret);
|
||||
f2fs_put_dnode(&dn);
|
||||
*bio_ret = NULL;
|
||||
return ret;
|
||||
@@ -2313,10 +2212,9 @@ submit_and_realloc:
|
||||
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
|
||||
goto submit_and_realloc;
|
||||
|
||||
/* tag STEP_DECOMPRESS to handle IO in wq */
|
||||
ctx = bio->bi_private;
|
||||
if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS)))
|
||||
ctx->enabled_steps |= 1 << STEP_DECOMPRESS;
|
||||
ctx->enabled_steps |= STEP_DECOMPRESS;
|
||||
refcount_inc(&dic->refcnt);
|
||||
|
||||
inc_page_count(sbi, F2FS_RD_DATA);
|
||||
f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
|
||||
@@ -2333,7 +2231,13 @@ submit_and_realloc:
|
||||
out_put_dnode:
|
||||
f2fs_put_dnode(&dn);
|
||||
out:
|
||||
f2fs_decompress_end_io(cc->rpages, cc->cluster_size, true, false);
|
||||
for (i = 0; i < cc->cluster_size; i++) {
|
||||
if (cc->rpages[i]) {
|
||||
ClearPageUptodate(cc->rpages[i]);
|
||||
ClearPageError(cc->rpages[i]);
|
||||
unlock_page(cc->rpages[i]);
|
||||
}
|
||||
}
|
||||
*bio_ret = bio;
|
||||
return ret;
|
||||
}
|
||||
@@ -2342,11 +2246,6 @@ out:
|
||||
/*
|
||||
* This function was originally taken from fs/mpage.c, and customized for f2fs.
|
||||
* Major change was from block_size == page_size in f2fs by default.
|
||||
*
|
||||
* Note that the aops->readpages() function is ONLY used for read-ahead. If
|
||||
* this function ever deviates from doing just read-ahead, it should either
|
||||
* use ->readpage() or do the necessary surgery to decouple ->readpages()
|
||||
* from read-ahead.
|
||||
*/
|
||||
static int f2fs_mpage_readpages(struct inode *inode,
|
||||
struct readahead_control *rac, struct page *page)
|
||||
@@ -2369,7 +2268,6 @@ static int f2fs_mpage_readpages(struct inode *inode,
|
||||
unsigned nr_pages = rac ? readahead_count(rac) : 1;
|
||||
unsigned max_nr_pages = nr_pages;
|
||||
int ret = 0;
|
||||
bool drop_ra = false;
|
||||
|
||||
map.m_pblk = 0;
|
||||
map.m_lblk = 0;
|
||||
@@ -2380,26 +2278,10 @@ static int f2fs_mpage_readpages(struct inode *inode,
|
||||
map.m_seg_type = NO_CHECK_TYPE;
|
||||
map.m_may_create = false;
|
||||
|
||||
/*
|
||||
* Two readahead threads for same address range can cause race condition
|
||||
* which fragments sequential read IOs. So let's avoid each other.
|
||||
*/
|
||||
if (rac && readahead_count(rac)) {
|
||||
if (READ_ONCE(F2FS_I(inode)->ra_offset) == readahead_index(rac))
|
||||
drop_ra = true;
|
||||
else
|
||||
WRITE_ONCE(F2FS_I(inode)->ra_offset,
|
||||
readahead_index(rac));
|
||||
}
|
||||
|
||||
for (; nr_pages; nr_pages--) {
|
||||
if (rac) {
|
||||
page = readahead_page(rac);
|
||||
prefetchw(&page->flags);
|
||||
if (drop_ra) {
|
||||
f2fs_put_page(page, 1);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
@@ -2462,9 +2344,6 @@ next_page:
|
||||
}
|
||||
if (bio)
|
||||
__submit_bio(F2FS_I_SB(inode), bio, DATA);
|
||||
|
||||
if (rac && readahead_count(rac) && !drop_ra)
|
||||
WRITE_ONCE(F2FS_I(inode)->ra_offset, -1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2748,7 +2627,8 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
|
||||
sector_t *last_block,
|
||||
struct writeback_control *wbc,
|
||||
enum iostat_type io_type,
|
||||
int compr_blocks)
|
||||
int compr_blocks,
|
||||
bool allow_balance)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
@@ -2886,7 +2766,7 @@ out:
|
||||
}
|
||||
unlock_page(page);
|
||||
if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
|
||||
!F2FS_I(inode)->cp_task)
|
||||
!F2FS_I(inode)->cp_task && allow_balance)
|
||||
f2fs_balance_fs(sbi, need_balance_fs);
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi))) {
|
||||
@@ -2933,7 +2813,7 @@ out:
|
||||
#endif
|
||||
|
||||
return f2fs_write_single_data_page(page, NULL, NULL, NULL,
|
||||
wbc, FS_DATA_IO, 0);
|
||||
wbc, FS_DATA_IO, 0, true);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -3101,7 +2981,8 @@ continue_unlock:
|
||||
}
|
||||
#endif
|
||||
ret = f2fs_write_single_data_page(page, &submitted,
|
||||
&bio, &last_block, wbc, io_type, 0);
|
||||
&bio, &last_block, wbc, io_type,
|
||||
0, true);
|
||||
if (ret == AOP_WRITEPAGE_ACTIVATE)
|
||||
unlock_page(page);
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
@@ -3877,7 +3758,7 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
|
||||
filemap_write_and_wait(mapping);
|
||||
|
||||
/* Block number less than F2FS MAX BLOCKS */
|
||||
if (unlikely(block >= F2FS_I_SB(inode)->max_file_blocks))
|
||||
if (unlikely(block >= max_file_blocks(inode)))
|
||||
goto out;
|
||||
|
||||
if (f2fs_compressed_file(inode)) {
|
||||
@@ -4154,12 +4035,13 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||||
if (!f2fs_disable_compressed_file(inode))
|
||||
return -EINVAL;
|
||||
|
||||
f2fs_precache_extents(inode);
|
||||
|
||||
ret = check_swap_activate(sis, file, span);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
set_inode_flag(inode, FI_PIN_FILE);
|
||||
f2fs_precache_extents(inode);
|
||||
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -120,6 +120,13 @@ static void update_general_status(struct f2fs_sb_info *sbi)
|
||||
atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
|
||||
si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks;
|
||||
}
|
||||
si->nr_issued_ckpt = atomic_read(&sbi->cprc_info.issued_ckpt);
|
||||
si->nr_total_ckpt = atomic_read(&sbi->cprc_info.total_ckpt);
|
||||
si->nr_queued_ckpt = atomic_read(&sbi->cprc_info.queued_ckpt);
|
||||
spin_lock(&sbi->cprc_info.stat_lock);
|
||||
si->cur_ckpt_time = sbi->cprc_info.cur_time;
|
||||
si->peak_ckpt_time = sbi->cprc_info.peak_time;
|
||||
spin_unlock(&sbi->cprc_info.stat_lock);
|
||||
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
|
||||
si->rsvd_segs = reserved_segments(sbi);
|
||||
si->overp_segs = overprovision_segments(sbi);
|
||||
@@ -417,6 +424,11 @@ static int stat_show(struct seq_file *s, void *v)
|
||||
si->meta_count[META_NAT]);
|
||||
seq_printf(s, " - ssa blocks : %u\n",
|
||||
si->meta_count[META_SSA]);
|
||||
seq_printf(s, "CP merge (Queued: %4d, Issued: %4d, Total: %4d, "
|
||||
"Cur time: %4d(ms), Peak time: %4d(ms))\n",
|
||||
si->nr_queued_ckpt, si->nr_issued_ckpt,
|
||||
si->nr_total_ckpt, si->cur_ckpt_time,
|
||||
si->peak_ckpt_time);
|
||||
seq_printf(s, "GC calls: %d (BG: %d)\n",
|
||||
si->call_count, si->bg_gc);
|
||||
seq_printf(s, " - data segments : %d (%d)\n",
|
||||
|
106
fs/f2fs/f2fs.h
106
fs/f2fs/f2fs.h
@@ -43,7 +43,6 @@ enum {
|
||||
FAULT_KVMALLOC,
|
||||
FAULT_PAGE_ALLOC,
|
||||
FAULT_PAGE_GET,
|
||||
FAULT_ALLOC_BIO,
|
||||
FAULT_ALLOC_NID,
|
||||
FAULT_ORPHAN,
|
||||
FAULT_BLOCK,
|
||||
@@ -97,6 +96,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
|
||||
#define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
|
||||
#define F2FS_MOUNT_NORECOVERY 0x04000000
|
||||
#define F2FS_MOUNT_ATGC 0x08000000
|
||||
#define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
|
||||
|
||||
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
|
||||
#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
|
||||
@@ -146,6 +146,7 @@ struct f2fs_mount_info {
|
||||
/* For compression */
|
||||
unsigned char compress_algorithm; /* algorithm type */
|
||||
unsigned char compress_log_size; /* cluster log size */
|
||||
unsigned char compress_level; /* compress level */
|
||||
bool compress_chksum; /* compressed data chksum */
|
||||
unsigned char compress_ext_cnt; /* extension count */
|
||||
int compress_mode; /* compression mode */
|
||||
@@ -266,6 +267,26 @@ struct fsync_node_entry {
|
||||
unsigned int seq_id; /* sequence id */
|
||||
};
|
||||
|
||||
struct ckpt_req {
|
||||
struct completion wait; /* completion for checkpoint done */
|
||||
struct llist_node llnode; /* llist_node to be linked in wait queue */
|
||||
int ret; /* return code of checkpoint */
|
||||
ktime_t queue_time; /* request queued time */
|
||||
};
|
||||
|
||||
struct ckpt_req_control {
|
||||
struct task_struct *f2fs_issue_ckpt; /* checkpoint task */
|
||||
int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */
|
||||
wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */
|
||||
atomic_t issued_ckpt; /* # of actually issued ckpts */
|
||||
atomic_t total_ckpt; /* # of total ckpts */
|
||||
atomic_t queued_ckpt; /* # of queued ckpts */
|
||||
struct llist_head issue_list; /* list for command issue */
|
||||
spinlock_t stat_lock; /* lock for below checkpoint time stats */
|
||||
unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */
|
||||
unsigned int peak_time; /* peak wait time in msec until now */
|
||||
};
|
||||
|
||||
/* for the bitmap indicate blocks to be discarded */
|
||||
struct discard_entry {
|
||||
struct list_head list; /* list head */
|
||||
@@ -717,7 +738,6 @@ struct f2fs_inode_info {
|
||||
struct list_head inmem_pages; /* inmemory pages managed by f2fs */
|
||||
struct task_struct *inmem_task; /* store inmemory task */
|
||||
struct mutex inmem_lock; /* lock for inmemory pages */
|
||||
pgoff_t ra_offset; /* ongoing readahead offset */
|
||||
struct extent_tree *extent_tree; /* cached extent_tree entry */
|
||||
|
||||
/* avoid racing between foreground op and gc */
|
||||
@@ -735,6 +755,7 @@ struct f2fs_inode_info {
|
||||
atomic_t i_compr_blocks; /* # of compressed blocks */
|
||||
unsigned char i_compress_algorithm; /* algorithm type */
|
||||
unsigned char i_log_cluster_size; /* log of cluster size */
|
||||
unsigned char i_compress_level; /* compress level (lz4hc,zstd) */
|
||||
unsigned short i_compress_flag; /* compress flag */
|
||||
unsigned int i_cluster_size; /* cluster size */
|
||||
};
|
||||
@@ -1310,6 +1331,8 @@ struct compress_data {
|
||||
|
||||
#define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
|
||||
|
||||
#define COMPRESS_LEVEL_OFFSET 8
|
||||
|
||||
/* compress context */
|
||||
struct compress_ctx {
|
||||
struct inode *inode; /* inode the context belong to */
|
||||
@@ -1337,7 +1360,7 @@ struct compress_io_ctx {
|
||||
atomic_t pending_pages; /* in-flight compressed page count */
|
||||
};
|
||||
|
||||
/* decompress io context for read IO path */
|
||||
/* Context for decompressing one cluster on the read IO path */
|
||||
struct decompress_io_ctx {
|
||||
u32 magic; /* magic number to indicate page is compressed */
|
||||
struct inode *inode; /* inode the context belong to */
|
||||
@@ -1353,11 +1376,37 @@ struct decompress_io_ctx {
|
||||
struct compress_data *cbuf; /* virtual mapped address on cpages */
|
||||
size_t rlen; /* valid data length in rbuf */
|
||||
size_t clen; /* valid data length in cbuf */
|
||||
atomic_t pending_pages; /* in-flight compressed page count */
|
||||
atomic_t verity_pages; /* in-flight page count for verity */
|
||||
bool failed; /* indicate IO error during decompression */
|
||||
|
||||
/*
|
||||
* The number of compressed pages remaining to be read in this cluster.
|
||||
* This is initially nr_cpages. It is decremented by 1 each time a page
|
||||
* has been read (or failed to be read). When it reaches 0, the cluster
|
||||
* is decompressed (or an error is reported).
|
||||
*
|
||||
* If an error occurs before all the pages have been submitted for I/O,
|
||||
* then this will never reach 0. In this case the I/O submitter is
|
||||
* responsible for calling f2fs_decompress_end_io() instead.
|
||||
*/
|
||||
atomic_t remaining_pages;
|
||||
|
||||
/*
|
||||
* Number of references to this decompress_io_ctx.
|
||||
*
|
||||
* One reference is held for I/O completion. This reference is dropped
|
||||
* after the pagecache pages are updated and unlocked -- either after
|
||||
* decompression (and verity if enabled), or after an error.
|
||||
*
|
||||
* In addition, each compressed page holds a reference while it is in a
|
||||
* bio. These references are necessary prevent compressed pages from
|
||||
* being freed while they are still in a bio.
|
||||
*/
|
||||
refcount_t refcnt;
|
||||
|
||||
bool failed; /* IO error occurred before decompression? */
|
||||
bool need_verity; /* need fs-verity verification after decompression? */
|
||||
void *private; /* payload buffer for specified decompression algorithm */
|
||||
void *private2; /* extra payload buffer */
|
||||
struct work_struct verity_work; /* work to verify the decompressed pages */
|
||||
};
|
||||
|
||||
#define NULL_CLUSTER ((unsigned int)(~0))
|
||||
@@ -1404,6 +1453,7 @@ struct f2fs_sb_info {
|
||||
wait_queue_head_t cp_wait;
|
||||
unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
|
||||
long interval_time[MAX_TIME]; /* to store thresholds */
|
||||
struct ckpt_req_control cprc_info; /* for checkpoint request control */
|
||||
|
||||
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
|
||||
|
||||
@@ -1444,7 +1494,6 @@ struct f2fs_sb_info {
|
||||
unsigned int total_sections; /* total section count */
|
||||
unsigned int total_node_count; /* total node block count */
|
||||
unsigned int total_valid_node_count; /* valid node block count */
|
||||
loff_t max_file_blocks; /* max block index of file */
|
||||
int dir_level; /* directory level */
|
||||
int readdir_ra; /* readahead inode in readdir */
|
||||
u64 max_io_bytes; /* max io bytes to merge IOs */
|
||||
@@ -1541,9 +1590,12 @@ struct f2fs_sb_info {
|
||||
unsigned int node_io_flag;
|
||||
|
||||
/* For sysfs suppport */
|
||||
struct kobject s_kobj;
|
||||
struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */
|
||||
struct completion s_kobj_unregister;
|
||||
|
||||
struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
|
||||
struct completion s_stat_kobj_unregister;
|
||||
|
||||
/* For shrinker support */
|
||||
struct list_head s_list;
|
||||
int s_ndevs; /* number of devices */
|
||||
@@ -3232,6 +3284,7 @@ int f2fs_inode_dirtied(struct inode *inode, bool sync);
|
||||
void f2fs_inode_synced(struct inode *inode);
|
||||
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
|
||||
int f2fs_quota_sync(struct super_block *sb, int type);
|
||||
loff_t max_file_blocks(struct inode *inode);
|
||||
void f2fs_quota_off_umount(struct super_block *sb);
|
||||
int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
|
||||
int f2fs_sync_fs(struct super_block *sb, int sync);
|
||||
@@ -3418,13 +3471,16 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
|
||||
void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
|
||||
int __init f2fs_create_checkpoint_caches(void);
|
||||
void f2fs_destroy_checkpoint_caches(void);
|
||||
int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi);
|
||||
int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi);
|
||||
void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi);
|
||||
void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
|
||||
|
||||
/*
|
||||
* data.c
|
||||
*/
|
||||
int __init f2fs_init_bioset(void);
|
||||
void f2fs_destroy_bioset(void);
|
||||
struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio);
|
||||
int f2fs_init_bio_entry_cache(void);
|
||||
void f2fs_destroy_bio_entry_cache(void);
|
||||
void f2fs_submit_bio(struct f2fs_sb_info *sbi,
|
||||
@@ -3469,7 +3525,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
|
||||
struct bio **bio, sector_t *last_block,
|
||||
struct writeback_control *wbc,
|
||||
enum iostat_type io_type,
|
||||
int compr_blocks);
|
||||
int compr_blocks, bool allow_balance);
|
||||
void f2fs_invalidate_page(struct page *page, unsigned int offset,
|
||||
unsigned int length);
|
||||
int f2fs_release_page(struct page *page, gfp_t wait);
|
||||
@@ -3530,6 +3586,8 @@ struct f2fs_stat_info {
|
||||
int nr_discarding, nr_discarded;
|
||||
int nr_discard_cmd;
|
||||
unsigned int undiscard_blks;
|
||||
int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt;
|
||||
unsigned int cur_ckpt_time, peak_ckpt_time;
|
||||
int inline_xattr, inline_inode, inline_dir, append, update, orphans;
|
||||
int compr_inode;
|
||||
unsigned long long compr_blocks;
|
||||
@@ -3715,8 +3773,6 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
|
||||
#define stat_dec_compr_inode(inode) do { } while (0)
|
||||
#define stat_add_compr_blocks(inode, blocks) do { } while (0)
|
||||
#define stat_sub_compr_blocks(inode, blocks) do { } while (0)
|
||||
#define stat_inc_atomic_write(inode) do { } while (0)
|
||||
#define stat_dec_atomic_write(inode) do { } while (0)
|
||||
#define stat_update_max_atomic_write(inode) do { } while (0)
|
||||
#define stat_inc_volatile_write(inode) do { } while (0)
|
||||
#define stat_dec_volatile_write(inode) do { } while (0)
|
||||
@@ -3876,7 +3932,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
|
||||
bool f2fs_is_compress_backend_ready(struct inode *inode);
|
||||
int f2fs_init_compress_mempool(void);
|
||||
void f2fs_destroy_compress_mempool(void);
|
||||
void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
|
||||
void f2fs_end_read_compressed_page(struct page *page, bool failed);
|
||||
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
|
||||
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
|
||||
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
|
||||
@@ -3889,9 +3945,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
unsigned nr_pages, sector_t *last_block_in_bio,
|
||||
bool is_readahead, bool for_write);
|
||||
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
|
||||
void f2fs_free_dic(struct decompress_io_ctx *dic);
|
||||
void f2fs_decompress_end_io(struct page **rpages,
|
||||
unsigned int cluster_size, bool err, bool verity);
|
||||
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
|
||||
void f2fs_put_page_dic(struct page *page);
|
||||
int f2fs_init_compress_ctx(struct compress_ctx *cc);
|
||||
void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
|
||||
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
|
||||
@@ -3915,6 +3970,14 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
|
||||
}
|
||||
static inline int f2fs_init_compress_mempool(void) { return 0; }
|
||||
static inline void f2fs_destroy_compress_mempool(void) { }
|
||||
static inline void f2fs_end_read_compressed_page(struct page *page, bool failed)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
static inline void f2fs_put_page_dic(struct page *page)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
|
||||
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
|
||||
static inline int __init f2fs_init_compress_cache(void) { return 0; }
|
||||
@@ -3934,6 +3997,11 @@ static inline void set_compress_context(struct inode *inode)
|
||||
1 << COMPRESS_CHKSUM : 0;
|
||||
F2FS_I(inode)->i_cluster_size =
|
||||
1 << F2FS_I(inode)->i_log_cluster_size;
|
||||
if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 &&
|
||||
F2FS_OPTION(sbi).compress_level)
|
||||
F2FS_I(inode)->i_compress_flag |=
|
||||
F2FS_OPTION(sbi).compress_level <<
|
||||
COMPRESS_LEVEL_OFFSET;
|
||||
F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
|
||||
set_inode_flag(inode, FI_COMPRESSED_FILE);
|
||||
stat_inc_compr_inode(inode);
|
||||
@@ -4118,6 +4186,12 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
|
||||
{
|
||||
return fsverity_active(inode) &&
|
||||
idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_F2FS_FAULT_INJECTION
|
||||
extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
|
||||
unsigned int type);
|
||||
|
@@ -29,7 +29,6 @@
|
||||
#include "xattr.h"
|
||||
#include "acl.h"
|
||||
#include "gc.h"
|
||||
#include "trace.h"
|
||||
#include <trace/events/f2fs.h>
|
||||
#include <uapi/linux/f2fs.h>
|
||||
|
||||
@@ -60,6 +59,9 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
||||
bool need_alloc = true;
|
||||
int err = 0;
|
||||
|
||||
if (unlikely(IS_IMMUTABLE(inode)))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
if (unlikely(f2fs_cp_error(sbi))) {
|
||||
err = -EIO;
|
||||
goto err;
|
||||
@@ -70,6 +72,10 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = f2fs_convert_inline_inode(inode);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (f2fs_compressed_file(inode)) {
|
||||
int ret = f2fs_is_compressed_cluster(inode, page->index);
|
||||
@@ -366,7 +372,6 @@ flush_out:
|
||||
f2fs_update_time(sbi, REQ_TIME);
|
||||
out:
|
||||
trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
|
||||
f2fs_trace_ios(NULL, 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -483,6 +488,9 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
loff_t maxbytes = inode->i_sb->s_maxbytes;
|
||||
|
||||
if (f2fs_compressed_file(inode))
|
||||
maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
|
||||
|
||||
switch (whence) {
|
||||
case SEEK_SET:
|
||||
case SEEK_CUR:
|
||||
@@ -502,7 +510,6 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
|
||||
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
int err;
|
||||
|
||||
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
|
||||
return -EIO;
|
||||
@@ -510,11 +517,6 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!f2fs_is_compress_backend_ready(inode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* we don't need to use inline_data strictly */
|
||||
err = f2fs_convert_inline_inode(inode);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &f2fs_file_vm_ops;
|
||||
set_inode_flag(inode, FI_MMAP_FILE);
|
||||
@@ -667,7 +669,7 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
|
||||
|
||||
free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
|
||||
|
||||
if (free_from >= sbi->max_file_blocks)
|
||||
if (free_from >= max_file_blocks(inode))
|
||||
goto free_partial;
|
||||
|
||||
if (lock)
|
||||
@@ -767,6 +769,10 @@ int f2fs_truncate(struct inode *inode)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
err = dquot_initialize(inode);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* we should check inline_data size */
|
||||
if (!f2fs_may_inline_data(inode)) {
|
||||
err = f2fs_convert_inline_inode(inode);
|
||||
@@ -848,7 +854,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
|
||||
if (ia_valid & ATTR_MODE) {
|
||||
umode_t mode = attr->ia_mode;
|
||||
|
||||
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
|
||||
if (!in_group_p(inode->i_gid) &&
|
||||
!capable_wrt_inode_uidgid(inode, CAP_FSETID))
|
||||
mode &= ~S_ISGID;
|
||||
set_acl_inode(inode, mode);
|
||||
}
|
||||
@@ -865,6 +872,14 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
|
||||
return -EIO;
|
||||
|
||||
if (unlikely(IS_IMMUTABLE(inode)))
|
||||
return -EPERM;
|
||||
|
||||
if (unlikely(IS_APPEND(inode) &&
|
||||
(attr->ia_valid & (ATTR_MODE | ATTR_UID |
|
||||
ATTR_GID | ATTR_TIMES_SET))))
|
||||
return -EPERM;
|
||||
|
||||
if ((attr->ia_valid & ATTR_SIZE) &&
|
||||
!f2fs_is_compress_backend_ready(inode))
|
||||
return -EOPNOTSUPP;
|
||||
@@ -949,8 +964,10 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
|
||||
if (attr->ia_valid & ATTR_MODE) {
|
||||
err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
|
||||
if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
|
||||
inode->i_mode = F2FS_I(inode)->i_acl_mode;
|
||||
|
||||
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
|
||||
if (!err)
|
||||
inode->i_mode = F2FS_I(inode)->i_acl_mode;
|
||||
clear_inode_flag(inode, FI_ACL_MODE);
|
||||
}
|
||||
}
|
||||
@@ -2236,16 +2253,12 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
|
||||
|
||||
switch (in) {
|
||||
case F2FS_GOING_DOWN_FULLSYNC:
|
||||
sb = freeze_bdev(sb->s_bdev);
|
||||
if (IS_ERR(sb)) {
|
||||
ret = PTR_ERR(sb);
|
||||
ret = freeze_bdev(sb->s_bdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
if (sb) {
|
||||
f2fs_stop_checkpoint(sbi, false);
|
||||
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
|
||||
thaw_bdev(sb->s_bdev, sb);
|
||||
}
|
||||
f2fs_stop_checkpoint(sbi, false);
|
||||
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
|
||||
thaw_bdev(sb->s_bdev);
|
||||
break;
|
||||
case F2FS_GOING_DOWN_METASYNC:
|
||||
/* do checkpoint only */
|
||||
@@ -2734,7 +2747,7 @@ static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely((range.start + range.len) >> PAGE_SHIFT >
|
||||
sbi->max_file_blocks))
|
||||
max_file_blocks(inode)))
|
||||
return -EINVAL;
|
||||
|
||||
err = mnt_want_write_file(filp);
|
||||
@@ -3297,7 +3310,7 @@ int f2fs_precache_extents(struct inode *inode)
|
||||
map.m_next_extent = &m_next_extent;
|
||||
map.m_seg_type = NO_CHECK_TYPE;
|
||||
map.m_may_create = false;
|
||||
end = F2FS_I_SB(inode)->max_file_blocks;
|
||||
end = max_file_blocks(inode);
|
||||
|
||||
while (map.m_lblk < end) {
|
||||
map.m_len = end - map.m_lblk;
|
||||
@@ -3361,6 +3374,14 @@ static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
|
||||
return fsverity_ioctl_measure(filp, (void __user *)arg);
|
||||
}
|
||||
|
||||
static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
|
||||
{
|
||||
if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
|
||||
}
|
||||
|
||||
static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
|
||||
{
|
||||
struct inode *inode = file_inode(filp);
|
||||
@@ -4047,8 +4068,10 @@ static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
|
||||
|
||||
for (i = 0; i < page_len; i++, redirty_idx++) {
|
||||
page = find_lock_page(mapping, redirty_idx);
|
||||
if (!page)
|
||||
ret = -ENOENT;
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
set_page_dirty(page);
|
||||
f2fs_put_page(page, 1);
|
||||
f2fs_put_page(page, 0);
|
||||
@@ -4276,6 +4299,8 @@ static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
return f2fs_ioc_enable_verity(filp, arg);
|
||||
case FS_IOC_MEASURE_VERITY:
|
||||
return f2fs_ioc_measure_verity(filp, arg);
|
||||
case FS_IOC_READ_VERITY_METADATA:
|
||||
return f2fs_ioc_read_verity_metadata(filp, arg);
|
||||
case FS_IOC_GETFSLABEL:
|
||||
return f2fs_ioc_getfslabel(filp, arg);
|
||||
case FS_IOC_SETFSLABEL:
|
||||
@@ -4353,6 +4378,11 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
inode_lock(inode);
|
||||
}
|
||||
|
||||
if (unlikely(IS_IMMUTABLE(inode))) {
|
||||
ret = -EPERM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = generic_write_checks(iocb, from);
|
||||
if (ret > 0) {
|
||||
bool preallocated = false;
|
||||
@@ -4417,6 +4447,7 @@ write:
|
||||
if (ret > 0)
|
||||
f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
|
||||
}
|
||||
unlock:
|
||||
inode_unlock(inode);
|
||||
out:
|
||||
trace_f2fs_file_write_iter(inode, iocb->ki_pos,
|
||||
@@ -4527,6 +4558,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
case F2FS_IOC_RESIZE_FS:
|
||||
case FS_IOC_ENABLE_VERITY:
|
||||
case FS_IOC_MEASURE_VERITY:
|
||||
case FS_IOC_READ_VERITY_METADATA:
|
||||
case FS_IOC_GETFSLABEL:
|
||||
case FS_IOC_SETFSLABEL:
|
||||
case F2FS_IOC_GET_COMPRESS_BLOCKS:
|
||||
|
@@ -1169,8 +1169,6 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
if (err)
|
||||
goto put_out;
|
||||
|
||||
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
|
||||
|
||||
/* read page */
|
||||
fio.page = page;
|
||||
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
|
||||
@@ -1207,6 +1205,9 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
}
|
||||
}
|
||||
|
||||
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
|
||||
|
||||
/* allocate block address */
|
||||
f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
|
||||
&sum, type, NULL);
|
||||
|
||||
@@ -1233,9 +1234,6 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
set_page_writeback(fio.encrypted_page);
|
||||
ClearPageError(page);
|
||||
|
||||
/* allocate block address */
|
||||
f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
|
||||
|
||||
fio.op = REQ_OP_WRITE;
|
||||
fio.op_flags = REQ_SYNC;
|
||||
fio.new_blkaddr = newaddr;
|
||||
|
@@ -210,6 +210,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
|
||||
f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
|
||||
return 0;
|
||||
|
||||
err = dquot_initialize(inode);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
@@ -855,7 +855,11 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
|
||||
|
||||
if (whiteout) {
|
||||
f2fs_i_links_write(inode, false);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
inode->i_state |= I_LINKABLE;
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
*whiteout = inode;
|
||||
} else {
|
||||
d_tmpfile(dentry, inode);
|
||||
@@ -1041,7 +1045,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
err = f2fs_add_link(old_dentry, whiteout);
|
||||
if (err)
|
||||
goto put_out_dir;
|
||||
|
||||
spin_lock(&whiteout->i_lock);
|
||||
whiteout->i_state &= ~I_LINKABLE;
|
||||
spin_unlock(&whiteout->i_lock);
|
||||
|
||||
iput(whiteout);
|
||||
}
|
||||
|
||||
|
@@ -17,7 +17,6 @@
|
||||
#include "node.h"
|
||||
#include "segment.h"
|
||||
#include "xattr.h"
|
||||
#include "trace.h"
|
||||
#include <trace/events/f2fs.h>
|
||||
|
||||
#define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
|
||||
@@ -2089,7 +2088,6 @@ static int f2fs_set_node_page_dirty(struct page *page)
|
||||
__set_page_dirty_nobuffers(page);
|
||||
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
|
||||
f2fs_set_page_private(page, 0);
|
||||
f2fs_trace_pid(page);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
@@ -2696,7 +2694,7 @@ retry:
|
||||
src = F2FS_INODE(page);
|
||||
dst = F2FS_INODE(ipage);
|
||||
|
||||
memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
|
||||
memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
|
||||
dst->i_size = 0;
|
||||
dst->i_blocks = cpu_to_le64(1);
|
||||
dst->i_links = cpu_to_le32(1);
|
||||
|
@@ -20,7 +20,6 @@
|
||||
#include "segment.h"
|
||||
#include "node.h"
|
||||
#include "gc.h"
|
||||
#include "trace.h"
|
||||
#include <trace/events/f2fs.h>
|
||||
|
||||
#define __reverse_ffz(x) __reverse_ffs(~(x))
|
||||
@@ -187,8 +186,6 @@ void f2fs_register_inmem_page(struct inode *inode, struct page *page)
|
||||
{
|
||||
struct inmem_pages *new;
|
||||
|
||||
f2fs_trace_pid(page);
|
||||
|
||||
f2fs_set_page_private(page, ATOMIC_WRITTEN_PAGE);
|
||||
|
||||
new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
|
||||
@@ -566,17 +563,7 @@ do_sync:
|
||||
static int __submit_flush_wait(struct f2fs_sb_info *sbi,
|
||||
struct block_device *bdev)
|
||||
{
|
||||
struct bio *bio;
|
||||
int ret;
|
||||
|
||||
bio = f2fs_bio_alloc(sbi, 0, false);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
|
||||
bio_set_dev(bio, bdev);
|
||||
ret = submit_bio_wait(bio);
|
||||
bio_put(bio);
|
||||
int ret = blkdev_issue_flush(bdev, GFP_NOFS);
|
||||
|
||||
trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
|
||||
test_opt(sbi, FLUSH_MERGE), ret);
|
||||
@@ -610,8 +597,6 @@ repeat:
|
||||
if (kthread_should_stop())
|
||||
return 0;
|
||||
|
||||
sb_start_intwrite(sbi->sb);
|
||||
|
||||
if (!llist_empty(&fcc->issue_list)) {
|
||||
struct flush_cmd *cmd, *next;
|
||||
int ret;
|
||||
@@ -632,8 +617,6 @@ repeat:
|
||||
fcc->dispatch_list = NULL;
|
||||
}
|
||||
|
||||
sb_end_intwrite(sbi->sb);
|
||||
|
||||
wait_event_interruptible(*q,
|
||||
kthread_should_stop() || !llist_empty(&fcc->issue_list));
|
||||
goto repeat;
|
||||
|
@@ -101,11 +101,11 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
|
||||
#define BLKS_PER_SEC(sbi) \
|
||||
((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
|
||||
#define GET_SEC_FROM_SEG(sbi, segno) \
|
||||
((segno) / (sbi)->segs_per_sec)
|
||||
(((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
|
||||
#define GET_SEG_FROM_SEC(sbi, secno) \
|
||||
((secno) * (sbi)->segs_per_sec)
|
||||
#define GET_ZONE_FROM_SEC(sbi, secno) \
|
||||
((secno) / (sbi)->secs_per_zone)
|
||||
(((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
|
||||
#define GET_ZONE_FROM_SEG(sbi, segno) \
|
||||
GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
|
||||
|
||||
|
196
fs/f2fs/super.c
196
fs/f2fs/super.c
@@ -25,13 +25,14 @@
|
||||
#include <linux/quota.h>
|
||||
#include <linux/unicode.h>
|
||||
#include <linux/part_stat.h>
|
||||
#include <linux/zstd.h>
|
||||
#include <linux/lz4.h>
|
||||
|
||||
#include "f2fs.h"
|
||||
#include "node.h"
|
||||
#include "segment.h"
|
||||
#include "xattr.h"
|
||||
#include "gc.h"
|
||||
#include "trace.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/f2fs.h>
|
||||
@@ -45,7 +46,6 @@ const char *f2fs_fault_name[FAULT_MAX] = {
|
||||
[FAULT_KVMALLOC] = "kvmalloc",
|
||||
[FAULT_PAGE_ALLOC] = "page alloc",
|
||||
[FAULT_PAGE_GET] = "page get",
|
||||
[FAULT_ALLOC_BIO] = "alloc bio",
|
||||
[FAULT_ALLOC_NID] = "alloc nid",
|
||||
[FAULT_ORPHAN] = "orphan",
|
||||
[FAULT_BLOCK] = "no more block",
|
||||
@@ -143,6 +143,8 @@ enum {
|
||||
Opt_checkpoint_disable_cap,
|
||||
Opt_checkpoint_disable_cap_perc,
|
||||
Opt_checkpoint_enable,
|
||||
Opt_checkpoint_merge,
|
||||
Opt_nocheckpoint_merge,
|
||||
Opt_compress_algorithm,
|
||||
Opt_compress_log_size,
|
||||
Opt_compress_extension,
|
||||
@@ -213,6 +215,8 @@ static match_table_t f2fs_tokens = {
|
||||
{Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
|
||||
{Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
|
||||
{Opt_checkpoint_enable, "checkpoint=enable"},
|
||||
{Opt_checkpoint_merge, "checkpoint_merge"},
|
||||
{Opt_nocheckpoint_merge, "nocheckpoint_merge"},
|
||||
{Opt_compress_algorithm, "compress_algorithm=%s"},
|
||||
{Opt_compress_log_size, "compress_log_size=%u"},
|
||||
{Opt_compress_extension, "compress_extension=%s"},
|
||||
@@ -464,6 +468,74 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
#ifdef CONFIG_F2FS_FS_LZ4
|
||||
static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
|
||||
{
|
||||
#ifdef CONFIG_F2FS_FS_LZ4HC
|
||||
unsigned int level;
|
||||
#endif
|
||||
|
||||
if (strlen(str) == 3) {
|
||||
F2FS_OPTION(sbi).compress_level = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_LZ4HC
|
||||
str += 3;
|
||||
|
||||
if (str[0] != ':') {
|
||||
f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (kstrtouint(str + 1, 10, &level))
|
||||
return -EINVAL;
|
||||
|
||||
if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
|
||||
f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
F2FS_OPTION(sbi).compress_level = level;
|
||||
return 0;
|
||||
#else
|
||||
f2fs_info(sbi, "kernel doesn't support lz4hc compression");
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_ZSTD
|
||||
static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
|
||||
{
|
||||
unsigned int level;
|
||||
int len = 4;
|
||||
|
||||
if (strlen(str) == len) {
|
||||
F2FS_OPTION(sbi).compress_level = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
str += len;
|
||||
|
||||
if (str[0] != ':') {
|
||||
f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (kstrtouint(str + 1, 10, &level))
|
||||
return -EINVAL;
|
||||
|
||||
if (!level || level > ZSTD_maxCLevel()) {
|
||||
f2fs_info(sbi, "invalid zstd compress level: %d", level);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
F2FS_OPTION(sbi).compress_level = level;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
@@ -872,6 +944,12 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
case Opt_checkpoint_enable:
|
||||
clear_opt(sbi, DISABLE_CHECKPOINT);
|
||||
break;
|
||||
case Opt_checkpoint_merge:
|
||||
set_opt(sbi, MERGE_CHECKPOINT);
|
||||
break;
|
||||
case Opt_nocheckpoint_merge:
|
||||
clear_opt(sbi, MERGE_CHECKPOINT);
|
||||
break;
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
case Opt_compress_algorithm:
|
||||
if (!f2fs_sb_has_compression(sbi)) {
|
||||
@@ -882,17 +960,45 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
if (!strcmp(name, "lzo")) {
|
||||
#ifdef CONFIG_F2FS_FS_LZO
|
||||
F2FS_OPTION(sbi).compress_level = 0;
|
||||
F2FS_OPTION(sbi).compress_algorithm =
|
||||
COMPRESS_LZO;
|
||||
} else if (!strcmp(name, "lz4")) {
|
||||
#else
|
||||
f2fs_info(sbi, "kernel doesn't support lzo compression");
|
||||
#endif
|
||||
} else if (!strncmp(name, "lz4", 3)) {
|
||||
#ifdef CONFIG_F2FS_FS_LZ4
|
||||
ret = f2fs_set_lz4hc_level(sbi, name);
|
||||
if (ret) {
|
||||
kfree(name);
|
||||
return -EINVAL;
|
||||
}
|
||||
F2FS_OPTION(sbi).compress_algorithm =
|
||||
COMPRESS_LZ4;
|
||||
} else if (!strcmp(name, "zstd")) {
|
||||
#else
|
||||
f2fs_info(sbi, "kernel doesn't support lz4 compression");
|
||||
#endif
|
||||
} else if (!strncmp(name, "zstd", 4)) {
|
||||
#ifdef CONFIG_F2FS_FS_ZSTD
|
||||
ret = f2fs_set_zstd_level(sbi, name);
|
||||
if (ret) {
|
||||
kfree(name);
|
||||
return -EINVAL;
|
||||
}
|
||||
F2FS_OPTION(sbi).compress_algorithm =
|
||||
COMPRESS_ZSTD;
|
||||
#else
|
||||
f2fs_info(sbi, "kernel doesn't support zstd compression");
|
||||
#endif
|
||||
} else if (!strcmp(name, "lzo-rle")) {
|
||||
#ifdef CONFIG_F2FS_FS_LZORLE
|
||||
F2FS_OPTION(sbi).compress_level = 0;
|
||||
F2FS_OPTION(sbi).compress_algorithm =
|
||||
COMPRESS_LZORLE;
|
||||
#else
|
||||
f2fs_info(sbi, "kernel doesn't support lzorle compression");
|
||||
#endif
|
||||
} else {
|
||||
kfree(name);
|
||||
return -EINVAL;
|
||||
@@ -1076,8 +1182,6 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
|
||||
/* Will be used by directory only */
|
||||
fi->i_dir_level = F2FS_SB(sb)->dir_level;
|
||||
|
||||
fi->ra_offset = -1;
|
||||
|
||||
return &fi->vfs_inode;
|
||||
}
|
||||
|
||||
@@ -1245,6 +1349,12 @@ static void f2fs_put_super(struct super_block *sb)
|
||||
/* prevent remaining shrinker jobs */
|
||||
mutex_lock(&sbi->umount_mutex);
|
||||
|
||||
/*
|
||||
* flush all issued checkpoints and stop checkpoint issue thread.
|
||||
* after then, all checkpoints should be done by each process context.
|
||||
*/
|
||||
f2fs_stop_ckpt_thread(sbi);
|
||||
|
||||
/*
|
||||
* We don't need to do checkpoint when superblock is clean.
|
||||
* But, the previous checkpoint was not done by umount, it needs to do
|
||||
@@ -1343,16 +1453,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
|
||||
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
|
||||
return -EAGAIN;
|
||||
|
||||
if (sync) {
|
||||
struct cp_control cpc;
|
||||
|
||||
cpc.reason = __get_cp_reason(sbi);
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
err = f2fs_write_checkpoint(sbi, &cpc);
|
||||
up_write(&sbi->gc_lock);
|
||||
}
|
||||
f2fs_trace_ios(NULL, 1);
|
||||
if (sync)
|
||||
err = f2fs_issue_checkpoint(sbi);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -1369,6 +1471,10 @@ static int f2fs_freeze(struct super_block *sb)
|
||||
/* must be clean, since sync_filesystem() was already called */
|
||||
if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
|
||||
return -EINVAL;
|
||||
|
||||
/* ensure no checkpoint required */
|
||||
if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1539,6 +1645,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
|
||||
}
|
||||
seq_printf(seq, ",compress_algorithm=%s", algtype);
|
||||
|
||||
if (F2FS_OPTION(sbi).compress_level)
|
||||
seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
|
||||
|
||||
seq_printf(seq, ",compress_log_size=%u",
|
||||
F2FS_OPTION(sbi).compress_log_size);
|
||||
|
||||
@@ -1674,6 +1783,10 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
if (test_opt(sbi, DISABLE_CHECKPOINT))
|
||||
seq_printf(seq, ",checkpoint=disable:%u",
|
||||
F2FS_OPTION(sbi).unusable_cap);
|
||||
if (test_opt(sbi, MERGE_CHECKPOINT))
|
||||
seq_puts(seq, ",checkpoint_merge");
|
||||
else
|
||||
seq_puts(seq, ",nocheckpoint_merge");
|
||||
if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
|
||||
seq_printf(seq, ",fsync_mode=%s", "posix");
|
||||
else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
|
||||
@@ -1957,6 +2070,19 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
}
|
||||
}
|
||||
|
||||
if (!test_opt(sbi, DISABLE_CHECKPOINT) &&
|
||||
test_opt(sbi, MERGE_CHECKPOINT)) {
|
||||
err = f2fs_start_ckpt_thread(sbi);
|
||||
if (err) {
|
||||
f2fs_err(sbi,
|
||||
"Failed to start F2FS issue_checkpoint_thread (%d)",
|
||||
err);
|
||||
goto restore_gc;
|
||||
}
|
||||
} else {
|
||||
f2fs_stop_ckpt_thread(sbi);
|
||||
}
|
||||
|
||||
/*
|
||||
* We stop issue flush thread if FS is mounted as RO
|
||||
* or if flush_merge is not passed in mount option.
|
||||
@@ -2641,10 +2767,10 @@ static const struct export_operations f2fs_export_ops = {
|
||||
.get_parent = f2fs_get_parent,
|
||||
};
|
||||
|
||||
static loff_t max_file_blocks(void)
|
||||
loff_t max_file_blocks(struct inode *inode)
|
||||
{
|
||||
loff_t result = 0;
|
||||
loff_t leaf_count = DEF_ADDRS_PER_BLOCK;
|
||||
loff_t leaf_count;
|
||||
|
||||
/*
|
||||
* note: previously, result is equal to (DEF_ADDRS_PER_INODE -
|
||||
@@ -2653,6 +2779,11 @@ static loff_t max_file_blocks(void)
|
||||
* result as zero.
|
||||
*/
|
||||
|
||||
if (inode && f2fs_compressed_file(inode))
|
||||
leaf_count = ADDRS_PER_BLOCK(inode);
|
||||
else
|
||||
leaf_count = DEF_ADDRS_PER_BLOCK;
|
||||
|
||||
/* two direct node blocks */
|
||||
result += (leaf_count * 2);
|
||||
|
||||
@@ -3536,8 +3667,7 @@ try_onemore:
|
||||
if (err)
|
||||
goto free_options;
|
||||
|
||||
sbi->max_file_blocks = max_file_blocks();
|
||||
sb->s_maxbytes = sbi->max_file_blocks <<
|
||||
sb->s_maxbytes = max_file_blocks(NULL) <<
|
||||
le32_to_cpu(raw_super->log_blocksize);
|
||||
sb->s_max_links = F2FS_LINK_MAX;
|
||||
|
||||
@@ -3704,6 +3834,19 @@ try_onemore:
|
||||
|
||||
f2fs_init_fsync_node_info(sbi);
|
||||
|
||||
/* setup checkpoint request control and start checkpoint issue thread */
|
||||
f2fs_init_ckpt_req_control(sbi);
|
||||
if (!test_opt(sbi, DISABLE_CHECKPOINT) &&
|
||||
test_opt(sbi, MERGE_CHECKPOINT)) {
|
||||
err = f2fs_start_ckpt_thread(sbi);
|
||||
if (err) {
|
||||
f2fs_err(sbi,
|
||||
"Failed to start F2FS issue_checkpoint_thread (%d)",
|
||||
err);
|
||||
goto stop_ckpt_thread;
|
||||
}
|
||||
}
|
||||
|
||||
/* setup f2fs internal modules */
|
||||
err = f2fs_build_segment_manager(sbi);
|
||||
if (err) {
|
||||
@@ -3789,12 +3932,10 @@ try_onemore:
|
||||
* previous checkpoint was not done by clean system shutdown.
|
||||
*/
|
||||
if (f2fs_hw_is_readonly(sbi)) {
|
||||
if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
|
||||
err = -EROFS;
|
||||
if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))
|
||||
f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
|
||||
goto free_meta;
|
||||
}
|
||||
f2fs_info(sbi, "write access unavailable, skipping recovery");
|
||||
else
|
||||
f2fs_info(sbi, "write access unavailable, skipping recovery");
|
||||
goto reset_checkpoint;
|
||||
}
|
||||
|
||||
@@ -3913,6 +4054,8 @@ free_nm:
|
||||
free_sm:
|
||||
f2fs_destroy_segment_manager(sbi);
|
||||
f2fs_destroy_post_read_wq(sbi);
|
||||
stop_ckpt_thread:
|
||||
f2fs_stop_ckpt_thread(sbi);
|
||||
free_devices:
|
||||
destroy_device_list(sbi);
|
||||
kvfree(sbi->ckpt);
|
||||
@@ -4027,8 +4170,6 @@ static int __init init_f2fs_fs(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
f2fs_build_trace_ios();
|
||||
|
||||
err = init_inodecache();
|
||||
if (err)
|
||||
goto fail;
|
||||
@@ -4121,7 +4262,6 @@ static void __exit exit_f2fs_fs(void)
|
||||
f2fs_destroy_segment_manager_caches();
|
||||
f2fs_destroy_node_manager_caches();
|
||||
destroy_inodecache();
|
||||
f2fs_destroy_trace_ios();
|
||||
}
|
||||
|
||||
module_init(init_f2fs_fs)
|
||||
|
141
fs/f2fs/sysfs.c
141
fs/f2fs/sysfs.c
@@ -11,6 +11,7 @@
|
||||
#include <linux/f2fs_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/unicode.h>
|
||||
#include <linux/ioprio.h>
|
||||
|
||||
#include "f2fs.h"
|
||||
#include "segment.h"
|
||||
@@ -34,6 +35,7 @@ enum {
|
||||
FAULT_INFO_TYPE, /* struct f2fs_fault_info */
|
||||
#endif
|
||||
RESERVED_BLOCKS, /* struct f2fs_sb_info */
|
||||
CPRC_INFO, /* struct ckpt_req_control */
|
||||
};
|
||||
|
||||
struct f2fs_attr {
|
||||
@@ -70,6 +72,8 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
|
||||
else if (struct_type == STAT_INFO)
|
||||
return (unsigned char *)F2FS_STAT(sbi);
|
||||
#endif
|
||||
else if (struct_type == CPRC_INFO)
|
||||
return (unsigned char *)&sbi->cprc_info;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -90,26 +94,23 @@ static ssize_t free_segments_show(struct f2fs_attr *a,
|
||||
static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
struct super_block *sb = sbi->sb;
|
||||
|
||||
if (!sb->s_bdev->bd_part)
|
||||
return sprintf(buf, "0\n");
|
||||
|
||||
return sprintf(buf, "%llu\n",
|
||||
(unsigned long long)(sbi->kbytes_written +
|
||||
((f2fs_get_sectors_written(sbi) -
|
||||
sbi->sectors_written_start) >> 1)));
|
||||
}
|
||||
|
||||
static ssize_t sb_status_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%lx\n", sbi->s_flag);
|
||||
}
|
||||
|
||||
static ssize_t features_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
struct super_block *sb = sbi->sb;
|
||||
int len = 0;
|
||||
|
||||
if (!sb->s_bdev->bd_part)
|
||||
return sprintf(buf, "0\n");
|
||||
|
||||
if (f2fs_sb_has_encrypt(sbi))
|
||||
len += scnprintf(buf, PAGE_SIZE - len, "%s",
|
||||
"encryption");
|
||||
@@ -264,6 +265,23 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
|
||||
return len;
|
||||
}
|
||||
|
||||
if (!strcmp(a->attr.name, "ckpt_thread_ioprio")) {
|
||||
struct ckpt_req_control *cprc = &sbi->cprc_info;
|
||||
int len = 0;
|
||||
int class = IOPRIO_PRIO_CLASS(cprc->ckpt_thread_ioprio);
|
||||
int data = IOPRIO_PRIO_DATA(cprc->ckpt_thread_ioprio);
|
||||
|
||||
if (class == IOPRIO_CLASS_RT)
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "rt,");
|
||||
else if (class == IOPRIO_CLASS_BE)
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "be,");
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%d\n", data);
|
||||
return len;
|
||||
}
|
||||
|
||||
ui = (unsigned int *)(ptr + a->offset);
|
||||
|
||||
return sprintf(buf, "%u\n", *ui);
|
||||
@@ -317,6 +335,38 @@ out:
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
if (!strcmp(a->attr.name, "ckpt_thread_ioprio")) {
|
||||
const char *name = strim((char *)buf);
|
||||
struct ckpt_req_control *cprc = &sbi->cprc_info;
|
||||
int class;
|
||||
long data;
|
||||
int ret;
|
||||
|
||||
if (!strncmp(name, "rt,", 3))
|
||||
class = IOPRIO_CLASS_RT;
|
||||
else if (!strncmp(name, "be,", 3))
|
||||
class = IOPRIO_CLASS_BE;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
name += 3;
|
||||
ret = kstrtol(name, 10, &data);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (data >= IOPRIO_BE_NR || data < 0)
|
||||
return -EINVAL;
|
||||
|
||||
cprc->ckpt_thread_ioprio = IOPRIO_PRIO_VALUE(class, data);
|
||||
if (test_opt(sbi, MERGE_CHECKPOINT)) {
|
||||
ret = set_task_ioprio(cprc->f2fs_issue_ckpt,
|
||||
cprc->ckpt_thread_ioprio);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
ui = (unsigned int *)(ptr + a->offset);
|
||||
|
||||
ret = kstrtoul(skip_spaces(buf), 0, &t);
|
||||
@@ -576,6 +626,7 @@ F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
|
||||
#endif
|
||||
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag);
|
||||
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag);
|
||||
F2FS_RW_ATTR(CPRC_INFO, ckpt_req_control, ckpt_thread_ioprio, ckpt_thread_ioprio);
|
||||
F2FS_GENERAL_RO_ATTR(dirty_segments);
|
||||
F2FS_GENERAL_RO_ATTR(free_segments);
|
||||
F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
|
||||
@@ -661,6 +712,7 @@ static struct attribute *f2fs_attrs[] = {
|
||||
#endif
|
||||
ATTR_LIST(data_io_flag),
|
||||
ATTR_LIST(node_io_flag),
|
||||
ATTR_LIST(ckpt_thread_ioprio),
|
||||
ATTR_LIST(dirty_segments),
|
||||
ATTR_LIST(free_segments),
|
||||
ATTR_LIST(unusable),
|
||||
@@ -711,6 +763,13 @@ static struct attribute *f2fs_feat_attrs[] = {
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs_feat);
|
||||
|
||||
F2FS_GENERAL_RO_ATTR(sb_status);
|
||||
static struct attribute *f2fs_stat_attrs[] = {
|
||||
ATTR_LIST(sb_status),
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs_stat);
|
||||
|
||||
static const struct sysfs_ops f2fs_attr_ops = {
|
||||
.show = f2fs_attr_show,
|
||||
.store = f2fs_attr_store,
|
||||
@@ -739,6 +798,44 @@ static struct kobject f2fs_feat = {
|
||||
.kset = &f2fs_kset,
|
||||
};
|
||||
|
||||
static ssize_t f2fs_stat_attr_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
|
||||
s_stat_kobj);
|
||||
struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
|
||||
|
||||
return a->show ? a->show(a, sbi, buf) : 0;
|
||||
}
|
||||
|
||||
static ssize_t f2fs_stat_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
|
||||
s_stat_kobj);
|
||||
struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
|
||||
|
||||
return a->store ? a->store(a, sbi, buf, len) : 0;
|
||||
}
|
||||
|
||||
static void f2fs_stat_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
|
||||
s_stat_kobj);
|
||||
complete(&sbi->s_stat_kobj_unregister);
|
||||
}
|
||||
|
||||
static const struct sysfs_ops f2fs_stat_attr_ops = {
|
||||
.show = f2fs_stat_attr_show,
|
||||
.store = f2fs_stat_attr_store,
|
||||
};
|
||||
|
||||
static struct kobj_type f2fs_stat_ktype = {
|
||||
.default_groups = f2fs_stat_groups,
|
||||
.sysfs_ops = &f2fs_stat_attr_ops,
|
||||
.release = f2fs_stat_kobj_release,
|
||||
};
|
||||
|
||||
static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
|
||||
void *offset)
|
||||
{
|
||||
@@ -945,11 +1042,15 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
|
||||
init_completion(&sbi->s_kobj_unregister);
|
||||
err = kobject_init_and_add(&sbi->s_kobj, &f2fs_sb_ktype, NULL,
|
||||
"%s", sb->s_id);
|
||||
if (err) {
|
||||
kobject_put(&sbi->s_kobj);
|
||||
wait_for_completion(&sbi->s_kobj_unregister);
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
goto put_sb_kobj;
|
||||
|
||||
sbi->s_stat_kobj.kset = &f2fs_kset;
|
||||
init_completion(&sbi->s_stat_kobj_unregister);
|
||||
err = kobject_init_and_add(&sbi->s_stat_kobj, &f2fs_stat_ktype,
|
||||
&sbi->s_kobj, "stat");
|
||||
if (err)
|
||||
goto put_stat_kobj;
|
||||
|
||||
if (f2fs_proc_root)
|
||||
sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
|
||||
@@ -965,6 +1066,13 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
|
||||
victim_bits_seq_show, sb);
|
||||
}
|
||||
return 0;
|
||||
put_stat_kobj:
|
||||
kobject_put(&sbi->s_stat_kobj);
|
||||
wait_for_completion(&sbi->s_stat_kobj_unregister);
|
||||
put_sb_kobj:
|
||||
kobject_put(&sbi->s_kobj);
|
||||
wait_for_completion(&sbi->s_kobj_unregister);
|
||||
return err;
|
||||
}
|
||||
|
||||
void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
|
||||
@@ -976,6 +1084,11 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
|
||||
remove_proc_entry("victim_bits", sbi->s_proc);
|
||||
remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
|
||||
}
|
||||
|
||||
kobject_del(&sbi->s_stat_kobj);
|
||||
kobject_put(&sbi->s_stat_kobj);
|
||||
wait_for_completion(&sbi->s_stat_kobj_unregister);
|
||||
|
||||
kobject_del(&sbi->s_kobj);
|
||||
kobject_put(&sbi->s_kobj);
|
||||
wait_for_completion(&sbi->s_kobj_unregister);
|
||||
|
165
fs/f2fs/trace.c
165
fs/f2fs/trace.c
@@ -1,165 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* f2fs IO tracer
|
||||
*
|
||||
* Copyright (c) 2014 Motorola Mobility
|
||||
* Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
|
||||
*/
|
||||
#include <linux/fs.h>
|
||||
#include <linux/f2fs_fs.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/radix-tree.h>
|
||||
|
||||
#include "f2fs.h"
|
||||
#include "trace.h"
|
||||
|
||||
static RADIX_TREE(pids, GFP_ATOMIC);
|
||||
static spinlock_t pids_lock;
|
||||
static struct last_io_info last_io;
|
||||
|
||||
static inline void __print_last_io(void)
|
||||
{
|
||||
if (!last_io.len)
|
||||
return;
|
||||
|
||||
trace_printk("%3x:%3x %4x %-16s %2x %5x %5x %12x %4x\n",
|
||||
last_io.major, last_io.minor,
|
||||
last_io.pid, "----------------",
|
||||
last_io.type,
|
||||
last_io.fio.op, last_io.fio.op_flags,
|
||||
last_io.fio.new_blkaddr,
|
||||
last_io.len);
|
||||
memset(&last_io, 0, sizeof(last_io));
|
||||
}
|
||||
|
||||
static int __file_type(struct inode *inode, pid_t pid)
|
||||
{
|
||||
if (f2fs_is_atomic_file(inode))
|
||||
return __ATOMIC_FILE;
|
||||
else if (f2fs_is_volatile_file(inode))
|
||||
return __VOLATILE_FILE;
|
||||
else if (S_ISDIR(inode->i_mode))
|
||||
return __DIR_FILE;
|
||||
else if (inode->i_ino == F2FS_NODE_INO(F2FS_I_SB(inode)))
|
||||
return __NODE_FILE;
|
||||
else if (inode->i_ino == F2FS_META_INO(F2FS_I_SB(inode)))
|
||||
return __META_FILE;
|
||||
else if (pid)
|
||||
return __NORMAL_FILE;
|
||||
else
|
||||
return __MISC_FILE;
|
||||
}
|
||||
|
||||
void f2fs_trace_pid(struct page *page)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
pid_t pid = task_pid_nr(current);
|
||||
void *p;
|
||||
|
||||
set_page_private(page, (unsigned long)pid);
|
||||
|
||||
retry:
|
||||
if (radix_tree_preload(GFP_NOFS))
|
||||
return;
|
||||
|
||||
spin_lock(&pids_lock);
|
||||
p = radix_tree_lookup(&pids, pid);
|
||||
if (p == current)
|
||||
goto out;
|
||||
if (p)
|
||||
radix_tree_delete(&pids, pid);
|
||||
|
||||
if (radix_tree_insert(&pids, pid, current)) {
|
||||
spin_unlock(&pids_lock);
|
||||
radix_tree_preload_end();
|
||||
cond_resched();
|
||||
goto retry;
|
||||
}
|
||||
|
||||
trace_printk("%3x:%3x %4x %-16s\n",
|
||||
MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
|
||||
pid, current->comm);
|
||||
out:
|
||||
spin_unlock(&pids_lock);
|
||||
radix_tree_preload_end();
|
||||
}
|
||||
|
||||
void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
|
||||
{
|
||||
struct inode *inode;
|
||||
pid_t pid;
|
||||
int major, minor;
|
||||
|
||||
if (flush) {
|
||||
__print_last_io();
|
||||
return;
|
||||
}
|
||||
|
||||
inode = fio->page->mapping->host;
|
||||
pid = page_private(fio->page);
|
||||
|
||||
major = MAJOR(inode->i_sb->s_dev);
|
||||
minor = MINOR(inode->i_sb->s_dev);
|
||||
|
||||
if (last_io.major == major && last_io.minor == minor &&
|
||||
last_io.pid == pid &&
|
||||
last_io.type == __file_type(inode, pid) &&
|
||||
last_io.fio.op == fio->op &&
|
||||
last_io.fio.op_flags == fio->op_flags &&
|
||||
last_io.fio.new_blkaddr + last_io.len ==
|
||||
fio->new_blkaddr) {
|
||||
last_io.len++;
|
||||
return;
|
||||
}
|
||||
|
||||
__print_last_io();
|
||||
|
||||
last_io.major = major;
|
||||
last_io.minor = minor;
|
||||
last_io.pid = pid;
|
||||
last_io.type = __file_type(inode, pid);
|
||||
last_io.fio = *fio;
|
||||
last_io.len = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
void f2fs_build_trace_ios(void)
|
||||
{
|
||||
spin_lock_init(&pids_lock);
|
||||
}
|
||||
|
||||
#define PIDVEC_SIZE 128
|
||||
static unsigned int gang_lookup_pids(pid_t *results, unsigned long first_index,
|
||||
unsigned int max_items)
|
||||
{
|
||||
struct radix_tree_iter iter;
|
||||
void **slot;
|
||||
unsigned int ret = 0;
|
||||
|
||||
if (unlikely(!max_items))
|
||||
return 0;
|
||||
|
||||
radix_tree_for_each_slot(slot, &pids, &iter, first_index) {
|
||||
results[ret] = iter.index;
|
||||
if (++ret == max_items)
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void f2fs_destroy_trace_ios(void)
|
||||
{
|
||||
pid_t pid[PIDVEC_SIZE];
|
||||
pid_t next_pid = 0;
|
||||
unsigned int found;
|
||||
|
||||
spin_lock(&pids_lock);
|
||||
while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
|
||||
unsigned idx;
|
||||
|
||||
next_pid = pid[found - 1] + 1;
|
||||
for (idx = 0; idx < found; idx++)
|
||||
radix_tree_delete(&pids, pid[idx]);
|
||||
}
|
||||
spin_unlock(&pids_lock);
|
||||
}
|
@@ -1,43 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* f2fs IO tracer
|
||||
*
|
||||
* Copyright (c) 2014 Motorola Mobility
|
||||
* Copyright (c) 2014 Jaegeuk Kim <jaegeuk@kernel.org>
|
||||
*/
|
||||
#ifndef __F2FS_TRACE_H__
|
||||
#define __F2FS_TRACE_H__
|
||||
|
||||
#ifdef CONFIG_F2FS_IO_TRACE
|
||||
#include <trace/events/f2fs.h>
|
||||
|
||||
enum file_type {
|
||||
__NORMAL_FILE,
|
||||
__DIR_FILE,
|
||||
__NODE_FILE,
|
||||
__META_FILE,
|
||||
__ATOMIC_FILE,
|
||||
__VOLATILE_FILE,
|
||||
__MISC_FILE,
|
||||
};
|
||||
|
||||
struct last_io_info {
|
||||
int major, minor;
|
||||
pid_t pid;
|
||||
enum file_type type;
|
||||
struct f2fs_io_info fio;
|
||||
block_t len;
|
||||
};
|
||||
|
||||
extern void f2fs_trace_pid(struct page *);
|
||||
extern void f2fs_trace_ios(struct f2fs_io_info *, int);
|
||||
extern void f2fs_build_trace_ios(void);
|
||||
extern void f2fs_destroy_trace_ios(void);
|
||||
#else
|
||||
#define f2fs_trace_pid(p)
|
||||
#define f2fs_trace_ios(i, n)
|
||||
#define f2fs_build_trace_ios()
|
||||
#define f2fs_destroy_trace_ios()
|
||||
|
||||
#endif
|
||||
#endif /* __F2FS_TRACE_H__ */
|
@@ -327,7 +327,7 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
|
||||
void *last_addr = NULL;
|
||||
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
|
||||
unsigned int inline_size = inline_xattr_size(inode);
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
if (!xnid && !inline_size)
|
||||
return -ENODATA;
|
||||
@@ -515,7 +515,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
|
||||
void *buffer, size_t buffer_size, struct page *ipage)
|
||||
{
|
||||
struct f2fs_xattr_entry *entry = NULL;
|
||||
int error = 0;
|
||||
int error;
|
||||
unsigned int size, len;
|
||||
void *base_addr = NULL;
|
||||
int base_size;
|
||||
@@ -562,7 +562,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
struct inode *inode = d_inode(dentry);
|
||||
struct f2fs_xattr_entry *entry;
|
||||
void *base_addr, *last_base_addr;
|
||||
int error = 0;
|
||||
int error;
|
||||
size_t rest = buffer_size;
|
||||
|
||||
down_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
@@ -632,7 +632,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
|
||||
int found, newsize;
|
||||
size_t len;
|
||||
__u32 new_hsize;
|
||||
int error = 0;
|
||||
int error;
|
||||
|
||||
if (name == NULL)
|
||||
return -EINVAL;
|
||||
@@ -673,7 +673,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
|
||||
}
|
||||
|
||||
if (value && f2fs_xattr_value_same(here, value, size))
|
||||
goto exit;
|
||||
goto same;
|
||||
} else if ((flags & XATTR_REPLACE)) {
|
||||
error = -ENODATA;
|
||||
goto exit;
|
||||
@@ -738,17 +738,20 @@ static int __f2fs_setxattr(struct inode *inode, int index,
|
||||
if (error)
|
||||
goto exit;
|
||||
|
||||
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
|
||||
inode->i_mode = F2FS_I(inode)->i_acl_mode;
|
||||
inode->i_ctime = current_time(inode);
|
||||
clear_inode_flag(inode, FI_ACL_MODE);
|
||||
}
|
||||
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
|
||||
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
|
||||
f2fs_set_encrypted_inode(inode);
|
||||
f2fs_mark_inode_dirty_sync(inode, true);
|
||||
if (!error && S_ISDIR(inode->i_mode))
|
||||
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
|
||||
|
||||
same:
|
||||
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
|
||||
inode->i_mode = F2FS_I(inode)->i_acl_mode;
|
||||
inode->i_ctime = current_time(inode);
|
||||
clear_inode_flag(inode, FI_ACL_MODE);
|
||||
}
|
||||
|
||||
exit:
|
||||
kfree(base_addr);
|
||||
return error;
|
||||
|
@@ -8,3 +8,5 @@ incrementalfs-y := \
|
||||
main.o \
|
||||
pseudo_files.o \
|
||||
vfs.o
|
||||
|
||||
incrementalfs-$(CONFIG_FS_VERITY) += verity.o
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user