Merge branch 'android12-5.10' into android12-5.10-lts
Sync up with android12-5.10 for the following commits:de1a0ea811
ANDROID: logbuf: Remove if directive for vendor hooks3b6916b4d4
ANDROID: iommu/io-pgtable-arm: Add IOMMU_CACHE_ICACHE_OCACHE_NWA2e289f3641
FROMGIT: mac80211_hwsim: add concurrent channels scanning support over virtio5b1baee639
ANDROID: GKI: update allowed symbols for exynosauto soc961be31178
ANDROID: GKI: initial upload list for exynosauto soc01f2392e13
ANDROID: logbuf: Add new logbuf vendor hook to support pr_cont()3cd04ea95a
ANDROID: lib: Export show_mem() for vendor module usageba085dd70a
FROMLIST: remoteproc: core: Export the rproc coredump APIs1093a9bfdb
ANDROID: sched: select fallback rq must check for allowed cpus8943a2e7a3
ANDROID: android: Export symbols for invoking cpufreq_update_util()e7cf28a1a4
ANDROID: Update symbol list for mtkc685777105
ANDROID: kernel: add module info for debug_kinfoea53c24cb0
FROMGIT: bpf: Do not change gso_size during bpf_skb_change_proto()b007e43692
FROMGIT: mm: slub: fix the leak of alloc/free traces debugfs interface850f11aa85
Revert "KMI: BACKPORT: FROMGIT: scsi: ufs: Optimize host lock on transfer requests send/compl paths"46575badbb
Revert "BACKPORT: FROMGIT: scsi: ufs: Optimize host lock on transfer requests send/compl paths"83d653257a
Revert "FROMGIT: scsi: ufs: Utilize Transfer Request List Completion Notification Register"9f8f2ea03e
ANDROID: power: wakeup_reason: change abort logd5a092726b
ANDROID: GKI: Update abi_gki_aarch64_qcom list for rwsem list addeabe9707f2
ANDROID: Add hook to show vendor info for transactions12902c9996
ANDROID: vendor_hooks: Export direct reclaim trace pointscea24faf98
ANDROID: Update the ABI representationc0e8aae5c5
ANDROID: qcom: Add xfrm and skb related symbols8102df91f2
Merge remote-tracking branch 'aosp/upstream-f2fs-stable-linux-5.10.y' into android12-5.10b70055f85a
ANDROID: iommu: Revise vendor hook param for iova free trackinga985701859
ANDROID: abi_gki_aarch64_qcom: Add additional symbols for 32bit execvec7c351ab3f
ANDROID: sched: add restricted tracehooks for 32bit execvef502bc761a
ANDROID: GKI: Update symbols to symbol list4198bdb2cf
ANDROID: coresight: Update ETE DT yaml file6f1bd7583f
ANDROID: coresight: Update ETE/TRBE to v6 merged upstreamce71010347
ANDROID: kvm: arm64: Clarify the comment for SPE save contextee7e80c81b
BACKPORT: arm64: KVM: Enable access to TRBE support for hostb6b0927eac
BACKPORT: KVM: arm64: Move SPE availability check to VCPU loaddde40c1089
UPSTREAM: KVM: arm64: Handle access to TRFCR_EL1ee682ec766
ANDROID: GKI: Enable ARCH_SPRD and SPRD_TIMER39147716e8
UPSTREAM: x86, lto: Pass -stack-alignment only on LLD < 13.0.07d3618b8b9
ANDROID: fix permission error of page_pinnercfe1f5aea6
ANDROID: gki_config: disable per-cgroup pressure trackingc7186c2c46
FROMGIT: cgroup: make per-cgroup pressure stall tracking configurable0d054fc5d7
Revert "ANDROID: make per-cgroup PSI tracking configurable"951bdfd077
FROMLIST: arm: Mark the recheduling IPI as raw interrupt42f775df72
FROMLIST: arm64: Mark the recheduling IPI as raw interrupt3f9d45d802
FROMLIST: genirq: Allow an interrupt to be marked as 'raw'08327b9007
FROMLIST: genirq: Add __irq_modify_status() helper to clear/set special flags77c9f446b6
ANDROID: GKI: Update abi_gki_aarch64_qcom list for shmem allocations728626cb04
ANDROID: power: Add vendor hook to qos for GKI purpose.9d55580966
ANDROID: selinux: modify RTM_GETNEIGH{TBL}bb51a33182
Revert "f2fs: avoid attaching SB_ACTIVE flag during mount/remount"c81ac64da1
f2fs: remove false alarm on iget failure during GCcdeff03989
f2fs: enable extent cache for compression files in read-only15a475975e
f2fs: fix to avoid adding tab before doc section44e0be85eb
f2fs: introduce f2fs_casefolded_name slab cacheeaef955b91
f2fs: swap: support migrating swapfile in aligned write mode34c703ff04
f2fs: swap: remove dead codesec3ea14d2f
f2fs: compress: add compress_inode to cache compressed blocksd90505e519
f2fs: clean up /sys/fs/f2fs/<disk>/features95f2afc02d
f2fs: add pin_file in feature listd8755821c4
f2fs: Advertise encrypted casefolding in sysfs7004c47db2
f2fs: Show casefolding support only when supported832ee33262
f2fs: support RO featureb5a393c8a8
f2fs: logging neatening Change-Id: I42f9108240ebf0d1e75b5d2ee24644cb995f8b3c Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -203,7 +203,34 @@ Description: Shows total written kbytes issued to disk.
|
||||
What: /sys/fs/f2fs/<disk>/features
|
||||
Date: July 2017
|
||||
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
|
||||
Description: Shows all enabled features in current device.
|
||||
Description: <deprecated: should use /sys/fs/f2fs/<disk>/feature_list/
|
||||
Shows all enabled features in current device.
|
||||
Supported features:
|
||||
encryption, blkzoned, extra_attr, projquota, inode_checksum,
|
||||
flexible_inline_xattr, quota_ino, inode_crtime, lost_found,
|
||||
verity, sb_checksum, casefold, readonly, compression, pin_file.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/feature_list/
|
||||
Date: June 2021
|
||||
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
|
||||
Description: Expand /sys/fs/f2fs/<disk>/features to meet sysfs rule.
|
||||
Supported on-disk features:
|
||||
encryption, block_zoned (aka blkzoned), extra_attr,
|
||||
project_quota (aka projquota), inode_checksum,
|
||||
flexible_inline_xattr, quota_ino, inode_crtime, lost_found,
|
||||
verity, sb_checksum, casefold, readonly, compression.
|
||||
Note that, pin_file is moved into /sys/fs/f2fs/features/.
|
||||
|
||||
What: /sys/fs/f2fs/features/
|
||||
Date: July 2017
|
||||
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
|
||||
Description: Shows all enabled kernel features.
|
||||
Supported features:
|
||||
encryption, block_zoned, extra_attr, project_quota,
|
||||
inode_checksum, flexible_inline_xattr, quota_ino,
|
||||
inode_crtime, lost_found, verity, sb_checksum,
|
||||
casefold, readonly, compression, test_dummy_encryption_v2,
|
||||
atomic_write, pin_file, encrypted_casefold.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/inject_rate
|
||||
Date: May 2016
|
||||
|
@@ -513,16 +513,21 @@
|
||||
ccw_timeout_log [S390]
|
||||
See Documentation/s390/common_io.rst for details.
|
||||
|
||||
cgroup_disable= [KNL] Disable a particular controller
|
||||
Format: {name of the controller(s) to disable}
|
||||
cgroup_disable= [KNL] Disable a particular controller or optional feature
|
||||
Format: {name of the controller(s) or feature(s) to disable}
|
||||
The effects of cgroup_disable=foo are:
|
||||
- foo isn't auto-mounted if you mount all cgroups in
|
||||
a single hierarchy
|
||||
- foo isn't visible as an individually mountable
|
||||
subsystem
|
||||
- if foo is an optional feature then the feature is
|
||||
disabled and corresponding cgroup files are not
|
||||
created
|
||||
{Currently only "memory" controller deal with this and
|
||||
cut the overhead, others just disable the usage. So
|
||||
only cgroup_disable=memory is actually worthy}
|
||||
Specifying "pressure" disables per-cgroup pressure
|
||||
stall information accounting feature
|
||||
|
||||
cgroup_no_v1= [KNL] Disable cgroup controllers and named hierarchies in v1
|
||||
Format: { { controller | "all" | "named" }
|
||||
|
@@ -37,7 +37,11 @@ properties:
|
||||
out-ports:
|
||||
description: |
|
||||
Output connections from the ETE to legacy CoreSight trace bus.
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
$ref: /schemas/graph.yaml#/properties/ports
|
||||
properties:
|
||||
port:
|
||||
description: Output connection from the ETE to legacy CoreSight Trace bus.
|
||||
$ref: /schemas/graph.yaml#/properties/port required:
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
@@ -289,6 +289,9 @@ compress_mode=%s Control file compression mode. This supports "fs" and "user"
|
||||
choosing the target file and the timing. The user can do manual
|
||||
compression/decompression on the compression enabled files using
|
||||
ioctls.
|
||||
compress_cache Support to use address space of a filesystem managed inode to
|
||||
cache compressed block, in order to improve cache hit ratio of
|
||||
random read.
|
||||
inlinecrypt When possible, encrypt/decrypt the contents of encrypted
|
||||
files using the blk-crypto framework rather than
|
||||
filesystem-layer encryption. This allows the use of
|
||||
@@ -717,10 +720,10 @@ users.
|
||||
===================== ======================== ===================
|
||||
User F2FS Block
|
||||
===================== ======================== ===================
|
||||
META WRITE_LIFE_NOT_SET
|
||||
HOT_NODE "
|
||||
WARM_NODE "
|
||||
COLD_NODE "
|
||||
N/A META WRITE_LIFE_NOT_SET
|
||||
N/A HOT_NODE "
|
||||
N/A WARM_NODE "
|
||||
N/A COLD_NODE "
|
||||
ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
|
||||
extension list " "
|
||||
|
||||
@@ -746,10 +749,10 @@ WRITE_LIFE_LONG " WRITE_LIFE_LONG
|
||||
===================== ======================== ===================
|
||||
User F2FS Block
|
||||
===================== ======================== ===================
|
||||
META WRITE_LIFE_MEDIUM;
|
||||
HOT_NODE WRITE_LIFE_NOT_SET
|
||||
WARM_NODE "
|
||||
COLD_NODE WRITE_LIFE_NONE
|
||||
N/A META WRITE_LIFE_MEDIUM;
|
||||
N/A HOT_NODE WRITE_LIFE_NOT_SET
|
||||
N/A WARM_NODE "
|
||||
N/A COLD_NODE WRITE_LIFE_NONE
|
||||
ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
|
||||
extension list " "
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
1375
android/abi_gki_aarch64_exynosauto
Normal file
1375
android/abi_gki_aarch64_exynosauto
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1733,6 +1733,7 @@
|
||||
sscanf
|
||||
__stack_chk_fail
|
||||
__stack_chk_guard
|
||||
static_key_disable
|
||||
static_key_slow_dec
|
||||
static_key_slow_inc
|
||||
stop_machine
|
||||
@@ -1773,6 +1774,7 @@
|
||||
synchronize_net
|
||||
synchronize_rcu
|
||||
syscon_regmap_lookup_by_phandle
|
||||
sysctl_sched_features
|
||||
sysctl_sched_latency
|
||||
sysfs_add_file_to_group
|
||||
sysfs_create_file_ns
|
||||
@@ -1891,6 +1893,8 @@
|
||||
__traceiter_dwc3_readl
|
||||
__traceiter_dwc3_writel
|
||||
__traceiter_gpu_mem_total
|
||||
__traceiter_mm_vmscan_direct_reclaim_begin
|
||||
__traceiter_mm_vmscan_direct_reclaim_end
|
||||
__traceiter_pelt_cfs_tp
|
||||
__traceiter_pelt_dl_tp
|
||||
__traceiter_pelt_irq_tp
|
||||
@@ -1958,6 +1962,8 @@
|
||||
__tracepoint_dwc3_readl
|
||||
__tracepoint_dwc3_writel
|
||||
__tracepoint_gpu_mem_total
|
||||
__tracepoint_mm_vmscan_direct_reclaim_begin
|
||||
__tracepoint_mm_vmscan_direct_reclaim_end
|
||||
__tracepoint_pelt_cfs_tp
|
||||
__tracepoint_pelt_dl_tp
|
||||
__tracepoint_pelt_irq_tp
|
||||
|
@@ -363,8 +363,10 @@
|
||||
devm_extcon_register_notifier
|
||||
devm_free_irq
|
||||
devm_fwnode_pwm_get
|
||||
devm_gpiochip_add_data_with_key
|
||||
devm_gpiod_get
|
||||
devm_gpiod_get_index
|
||||
devm_gpiod_get_optional
|
||||
devm_gpiod_put
|
||||
devm_gpio_free
|
||||
devm_gpio_request
|
||||
@@ -770,6 +772,7 @@
|
||||
gpiod_set_debounce
|
||||
gpiod_set_raw_value
|
||||
gpiod_set_value
|
||||
gpiod_set_value_cansleep
|
||||
gpiod_to_irq
|
||||
gpio_free
|
||||
gpio_request
|
||||
@@ -1197,6 +1200,7 @@
|
||||
of_get_next_parent
|
||||
of_get_parent
|
||||
of_get_property
|
||||
of_get_regulator_init_data
|
||||
of_graph_get_next_endpoint
|
||||
of_graph_get_remote_node
|
||||
of_graph_get_remote_port_parent
|
||||
@@ -1297,7 +1301,6 @@
|
||||
pinctrl_utils_free_map
|
||||
pinctrl_utils_reserve_map
|
||||
pin_user_pages_fast
|
||||
pin_user_pages_remote
|
||||
platform_bus_type
|
||||
platform_device_add
|
||||
platform_device_add_data
|
||||
@@ -1379,9 +1382,11 @@
|
||||
raw_notifier_chain_unregister
|
||||
_raw_read_lock
|
||||
_raw_read_lock_bh
|
||||
_raw_read_lock_irqsave
|
||||
_raw_read_trylock
|
||||
_raw_read_unlock
|
||||
_raw_read_unlock_bh
|
||||
_raw_read_unlock_irqrestore
|
||||
_raw_spin_lock
|
||||
_raw_spin_lock_bh
|
||||
_raw_spin_lock_irq
|
||||
@@ -1410,6 +1415,7 @@
|
||||
rdev_get_dev
|
||||
rdev_get_drvdata
|
||||
rdev_get_id
|
||||
rdev_get_regmap
|
||||
refcount_warn_saturate
|
||||
__refrigerator
|
||||
regcache_cache_only
|
||||
@@ -1462,9 +1468,11 @@
|
||||
regulator_list_voltage_linear_range
|
||||
regulator_list_voltage_table
|
||||
regulator_map_voltage_iterate
|
||||
regulator_map_voltage_linear
|
||||
regulator_map_voltage_linear_range
|
||||
regulator_notifier_call_chain
|
||||
regulator_put
|
||||
regulator_set_active_discharge_regmap
|
||||
regulator_set_current_limit
|
||||
regulator_set_current_limit_regmap
|
||||
regulator_set_mode
|
||||
@@ -1805,6 +1813,7 @@
|
||||
__traceiter_android_vh_finish_update_load_avg_se
|
||||
__traceiter_android_vh_iommu_alloc_iova
|
||||
__traceiter_android_vh_iommu_free_iova
|
||||
__traceiter_android_vh_logbuf
|
||||
__traceiter_android_vh_override_creds
|
||||
__traceiter_android_vh_prepare_update_load_avg_se
|
||||
__traceiter_android_vh_revert_creds
|
||||
@@ -1861,6 +1870,7 @@
|
||||
__tracepoint_android_vh_finish_update_load_avg_se
|
||||
__tracepoint_android_vh_iommu_alloc_iova
|
||||
__tracepoint_android_vh_iommu_free_iova
|
||||
__tracepoint_android_vh_logbuf
|
||||
__tracepoint_android_vh_override_creds
|
||||
__tracepoint_android_vh_prepare_update_load_avg_se
|
||||
__tracepoint_android_vh_revert_creds
|
||||
@@ -1938,6 +1948,7 @@
|
||||
ufshcd_fixup_dev_quirks
|
||||
ufshcd_get_pwr_dev_param
|
||||
ufshcd_hba_enable
|
||||
ufshcd_hba_stop
|
||||
ufshcd_link_recovery
|
||||
ufshcd_make_hba_operational
|
||||
ufshcd_pltfrm_init
|
||||
@@ -1951,7 +1962,6 @@
|
||||
ufshcd_uic_hibern8_exit
|
||||
unlock_page
|
||||
unmap_mapping_range
|
||||
unpin_user_page
|
||||
unpin_user_pages
|
||||
unregister_blkdev
|
||||
__unregister_chrdev
|
||||
@@ -1995,6 +2005,8 @@
|
||||
usb_ep_queue
|
||||
usb_ep_set_halt
|
||||
usb_ep_set_maxpacket_limit
|
||||
usb_function_register
|
||||
usb_function_unregister
|
||||
usb_gadget_giveback_request
|
||||
usb_gadget_map_request
|
||||
usb_gadget_set_state
|
||||
@@ -2175,6 +2187,8 @@
|
||||
woken_wake_function
|
||||
work_busy
|
||||
work_on_cpu
|
||||
ww_mutex_lock
|
||||
ww_mutex_unlock
|
||||
xhci_add_endpoint
|
||||
xhci_check_bandwidth
|
||||
xhci_drop_endpoint
|
||||
@@ -2193,7 +2207,9 @@
|
||||
debugfs_create_u8
|
||||
devm_of_pwm_get
|
||||
led_classdev_unregister
|
||||
pin_user_pages_remote
|
||||
send_sig_info
|
||||
syscore_resume
|
||||
syscore_suspend
|
||||
unpin_user_page
|
||||
v4l2_m2m_buf_remove_by_buf
|
||||
|
@@ -1261,6 +1261,7 @@
|
||||
kfree_const
|
||||
kfree_sensitive
|
||||
kfree_skb
|
||||
kfree_skb_list
|
||||
kick_all_cpus_sync
|
||||
kill_anon_super
|
||||
kill_fasync
|
||||
@@ -2131,7 +2132,9 @@
|
||||
sg_pcopy_from_buffer
|
||||
sg_pcopy_to_buffer
|
||||
sg_scsi_ioctl
|
||||
shmem_file_setup
|
||||
shmem_mark_page_lazyfree
|
||||
shmem_read_mapping_page_gfp
|
||||
shmem_truncate_range
|
||||
show_rcu_gp_kthreads
|
||||
show_regs
|
||||
@@ -2429,6 +2432,8 @@
|
||||
__traceiter_android_rvh_find_busiest_queue
|
||||
__traceiter_android_rvh_find_lowest_rq
|
||||
__traceiter_android_rvh_flush_task
|
||||
__traceiter_android_rvh_force_compatible_post
|
||||
__traceiter_android_rvh_force_compatible_pre
|
||||
__traceiter_android_rvh_gic_v3_set_affinity
|
||||
__traceiter_android_rvh_irqs_disable
|
||||
__traceiter_android_rvh_irqs_enable
|
||||
@@ -2523,6 +2528,8 @@
|
||||
__tracepoint_android_rvh_find_busiest_queue
|
||||
__tracepoint_android_rvh_find_lowest_rq
|
||||
__tracepoint_android_rvh_flush_task
|
||||
__tracepoint_android_rvh_force_compatible_post
|
||||
__tracepoint_android_rvh_force_compatible_pre
|
||||
__tracepoint_android_rvh_gic_v3_set_affinity
|
||||
__tracepoint_android_rvh_irqs_disable
|
||||
__tracepoint_android_rvh_irqs_enable
|
||||
@@ -2560,6 +2567,7 @@
|
||||
__tracepoint_android_rvh_update_misfit_status
|
||||
__tracepoint_android_rvh_wake_up_new_task
|
||||
__tracepoint_android_vh_allow_domain_state
|
||||
__tracepoint_android_vh_alter_rwsem_list_add
|
||||
__tracepoint_android_vh_binder_restore_priority
|
||||
__tracepoint_android_vh_binder_set_priority
|
||||
__tracepoint_android_vh_binder_transaction_init
|
||||
@@ -2910,6 +2918,9 @@
|
||||
xa_find_after
|
||||
xa_load
|
||||
xa_store
|
||||
__xfrm_decode_session
|
||||
xfrm_lookup
|
||||
xfrm_output
|
||||
xhci_alloc_command
|
||||
xhci_alloc_erst
|
||||
xhci_free_command
|
||||
|
1969
android/abi_gki_aarch64_vivo
Normal file
1969
android/abi_gki_aarch64_vivo
Normal file
File diff suppressed because it is too large
Load Diff
@@ -51,6 +51,7 @@ config ARM
|
||||
select GENERIC_ATOMIC64 if CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||
select GENERIC_IRQ_IPI if SMP
|
||||
select ARCH_WANTS_IRQ_RAW if GENERIC_IRQ_IPI
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
select GENERIC_IDLE_POLL_SETUP
|
||||
|
@@ -742,6 +742,10 @@ void __init set_smp_ipi_range(int ipi_base, int n)
|
||||
|
||||
ipi_desc[i] = irq_to_desc(ipi_base + i);
|
||||
irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
|
||||
|
||||
/* The recheduling IPI is special... */
|
||||
if (i == IPI_RESCHEDULE)
|
||||
__irq_modify_status(ipi_base + i, 0, IRQ_RAW, ~0);
|
||||
}
|
||||
|
||||
ipi_irq_base = ipi_base;
|
||||
|
@@ -111,6 +111,7 @@ config ARM64
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
select GENERIC_IDLE_POLL_SETUP
|
||||
select GENERIC_IRQ_IPI
|
||||
select ARCH_WANTS_IRQ_RAW
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_IRQ_SHOW
|
||||
|
@@ -48,6 +48,7 @@ CONFIG_PROFILING=y
|
||||
CONFIG_ARCH_SUNXI=y
|
||||
CONFIG_ARCH_HISI=y
|
||||
CONFIG_ARCH_QCOM=y
|
||||
CONFIG_ARCH_SPRD=y
|
||||
CONFIG_SCHED_MC=y
|
||||
CONFIG_NR_CPUS=32
|
||||
CONFIG_PARAVIRT=y
|
||||
@@ -59,7 +60,7 @@ CONFIG_CP15_BARRIER_EMULATION=y
|
||||
CONFIG_SETEND_EMULATION=y
|
||||
CONFIG_RANDOMIZE_BASE=y
|
||||
# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
|
||||
CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off kvm-arm.mode=protected"
|
||||
CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off kvm-arm.mode=protected cgroup_disable=pressure"
|
||||
CONFIG_CMDLINE_EXTEND=y
|
||||
# CONFIG_DMI is not set
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
@@ -518,6 +519,7 @@ CONFIG_STAGING=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_DEBUG_KINFO=y
|
||||
CONFIG_COMMON_CLK_SCPI=y
|
||||
# CONFIG_SPRD_COMMON_CLK is not set
|
||||
# CONFIG_CLK_SUNXI is not set
|
||||
# CONFIG_SUNXI_CCU is not set
|
||||
CONFIG_HWSPINLOCK=y
|
||||
|
@@ -405,6 +405,8 @@ struct kvm_vcpu_arch {
|
||||
#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
|
||||
#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
|
||||
#define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
|
||||
#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
|
||||
#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
|
||||
|
||||
/*
|
||||
* When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
|
||||
@@ -733,6 +735,10 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
|
||||
return (!has_vhe() && attr->exclude_host);
|
||||
}
|
||||
|
||||
/* Flags for host debug state */
|
||||
void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
|
||||
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
|
||||
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@@ -1005,6 +1005,10 @@ void __init set_smp_ipi_range(int ipi_base, int n)
|
||||
|
||||
ipi_desc[i] = irq_to_desc(ipi_base + i);
|
||||
irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
|
||||
|
||||
/* The recheduling IPI is special... */
|
||||
if (i == IPI_RESCHEDULE)
|
||||
__irq_modify_status(ipi_base + i, 0, IRQ_RAW, ~0);
|
||||
}
|
||||
|
||||
ipi_irq_base = ipi_base;
|
||||
|
@@ -416,10 +416,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
|
||||
if (vcpu_has_ptrauth(vcpu))
|
||||
vcpu_ptrauth_disable(vcpu);
|
||||
kvm_arch_vcpu_load_debug_state_flags(vcpu);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_arch_vcpu_put_debug_state_flags(vcpu);
|
||||
kvm_arch_vcpu_put_fp(vcpu);
|
||||
if (has_vhe())
|
||||
kvm_vcpu_put_sysregs_vhe(vcpu);
|
||||
|
@@ -141,7 +141,13 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
|
||||
* @vcpu: the vcpu pointer
|
||||
*
|
||||
* This is called before each entry into the hypervisor to setup any
|
||||
* debug related registers.
|
||||
* debug related registers. Currently this just ensures we will trap
|
||||
* access to:
|
||||
* - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
|
||||
* - Debug ROM Address (MDCR_EL2_TDRA)
|
||||
* - OS related registers (MDCR_EL2_TDOSA)
|
||||
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
|
||||
* - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
|
||||
*
|
||||
* Additionally, KVM only traps guest accesses to the debug registers if
|
||||
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
|
||||
@@ -263,3 +269,32 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 dfr0;
|
||||
|
||||
/* For VHE, there is nothing to do */
|
||||
if (has_vhe())
|
||||
return;
|
||||
|
||||
dfr0 = read_sysreg(id_aa64dfr0_el1);
|
||||
/*
|
||||
* If SPE is present on this CPU and is available at current EL,
|
||||
* we may need to check if the host state needs to be saved.
|
||||
*/
|
||||
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
|
||||
!(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
|
||||
vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE;
|
||||
|
||||
/* Check if we have TRBE implemented and available at the host */
|
||||
if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
|
||||
!(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
|
||||
vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE;
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.flags &= ~(KVM_ARM64_DEBUG_STATE_SAVE_SPE |
|
||||
KVM_ARM64_DEBUG_STATE_SAVE_TRBE);
|
||||
}
|
||||
|
@@ -21,17 +21,11 @@ static void __debug_save_spe(u64 *pmscr_el1)
|
||||
/* Clear pmscr in case of early return */
|
||||
*pmscr_el1 = 0;
|
||||
|
||||
/* SPE present on this CPU? */
|
||||
if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
|
||||
ID_AA64DFR0_PMSVER_SHIFT))
|
||||
return;
|
||||
|
||||
/* Yes; is it owned by EL3? */
|
||||
reg = read_sysreg_s(SYS_PMBIDR_EL1);
|
||||
if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
|
||||
return;
|
||||
|
||||
/* No; is the host actually using the thing? */
|
||||
/*
|
||||
* At this point, we know that this CPU implements
|
||||
* SPE and is available to the host.
|
||||
* Check if the host is actually using it ?
|
||||
*/
|
||||
reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
|
||||
if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
|
||||
return;
|
||||
@@ -60,18 +54,8 @@ static void __debug_restore_spe(u64 pmscr_el1)
|
||||
|
||||
static void __debug_save_trace(u64 *trfcr_el1)
|
||||
{
|
||||
|
||||
*trfcr_el1 = 0;
|
||||
|
||||
/* Check if we have TRBE */
|
||||
if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
|
||||
ID_AA64DFR0_TRBE_SHIFT))
|
||||
return;
|
||||
|
||||
/* Check we can access the TRBE */
|
||||
if ((read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
|
||||
return;
|
||||
|
||||
/* Check if the TRBE is enabled */
|
||||
if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_ENABLE))
|
||||
return;
|
||||
@@ -100,9 +84,11 @@ static void __debug_restore_trace(u64 trfcr_el1)
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Disable and flush SPE data generation */
|
||||
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
|
||||
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
|
||||
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
|
||||
/* Disable and flush Self-Hosted Trace generation */
|
||||
__debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1);
|
||||
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
|
||||
__debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
@@ -112,8 +98,10 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
|
||||
|
||||
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
|
||||
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
|
||||
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
|
||||
if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
|
||||
__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
|
||||
}
|
||||
|
||||
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
|
||||
|
@@ -193,12 +193,11 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
__sysreg_save_state_nvhe(host_ctxt);
|
||||
/*
|
||||
* For nVHE, we must save and disable any SPE
|
||||
* buffers, as the translation regime is going
|
||||
* to be loaded with that of the guest. And we must
|
||||
* save host context for SPE, before we change the
|
||||
* ownership to EL2 (via MDCR_EL2_E2PB == 0) and before
|
||||
* we load guest Stage1.
|
||||
* We must flush and disable the SPE buffer for nVHE, as
|
||||
* the translation regime(EL1&0) is going to be loaded with
|
||||
* that of the guest. And we must do this before we change the
|
||||
* translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
|
||||
* before we load guest Stage1.
|
||||
*/
|
||||
__debug_save_host_buffers_nvhe(vcpu);
|
||||
|
||||
|
@@ -1472,6 +1472,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_GCR_EL1), undef_access },
|
||||
|
||||
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
|
||||
{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
|
||||
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
|
||||
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
|
||||
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
|
||||
|
@@ -199,8 +199,9 @@ endif
|
||||
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
|
||||
|
||||
ifdef CONFIG_LTO_CLANG
|
||||
KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
|
||||
-plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
|
||||
ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
|
||||
KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86_NEED_RELOCS
|
||||
|
@@ -53,7 +53,7 @@ CONFIG_PARAVIRT=y
|
||||
CONFIG_NR_CPUS=32
|
||||
CONFIG_EFI=y
|
||||
CONFIG_CMDLINE_BOOL=y
|
||||
CONFIG_CMDLINE="stack_depot_disable=on"
|
||||
CONFIG_CMDLINE="stack_depot_disable=on cgroup_disable=pressure"
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
|
@@ -16,9 +16,11 @@ android/abi_gki_aarch64_virtual_device
|
||||
android/abi_gki_aarch64_hikey960
|
||||
android/abi_gki_aarch64_generic
|
||||
android/abi_gki_aarch64_exynos
|
||||
android/abi_gki_aarch64_exynosauto
|
||||
android/abi_gki_aarch64_mtk
|
||||
android/abi_gki_aarch64_xiaomi
|
||||
android/abi_gki_aarch64_fips140
|
||||
android/abi_gki_aarch64_vivo
|
||||
"
|
||||
|
||||
FILES="${FILES}
|
||||
|
@@ -5481,6 +5481,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
|
||||
struct binder_buffer *buffer = t->buffer;
|
||||
|
||||
spin_lock(&t->lock);
|
||||
trace_android_vh_binder_print_transaction_info(m, proc, prefix, t);
|
||||
to_proc = t->to_proc;
|
||||
seq_printf(m,
|
||||
"%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
|
||||
|
@@ -271,6 +271,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpm_get_timer);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpm_adj_current_limit);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf_pr_cont);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass);
|
||||
@@ -321,6 +322,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_force_compatible_pre);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_force_compatible_post);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_add_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_update_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_remove_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_balance_anon_file_reclaim);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_enter);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_exit);
|
||||
@@ -328,3 +332,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sha256);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_expandkey);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_encrypt);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_decrypt);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_force_compatible_pre);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_force_compatible_post);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_print_transaction_info);
|
||||
|
@@ -1234,8 +1234,8 @@ Run:
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
if (error) {
|
||||
async_error = error;
|
||||
log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
|
||||
dev_name(dev), callback, error);
|
||||
log_suspend_abort_reason("Device %s failed to %s noirq: error %d",
|
||||
dev_name(dev), pm_verb(state.event), error);
|
||||
goto Complete;
|
||||
}
|
||||
|
||||
@@ -1428,8 +1428,8 @@ Run:
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
if (error) {
|
||||
async_error = error;
|
||||
log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
|
||||
dev_name(dev), callback, error);
|
||||
log_suspend_abort_reason("Device %s failed to %s late: error %d",
|
||||
dev_name(dev), pm_verb(state.event), error);
|
||||
goto Complete;
|
||||
}
|
||||
dpm_propagate_wakeup_to_parent(dev);
|
||||
@@ -1701,8 +1701,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
dpm_propagate_wakeup_to_parent(dev);
|
||||
dpm_clear_superiors_direct_complete(dev);
|
||||
} else {
|
||||
log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
|
||||
dev_name(dev), callback, error);
|
||||
log_suspend_abort_reason("Device %s failed to %s: error %d",
|
||||
dev_name(dev), pm_verb(state.event), error);
|
||||
}
|
||||
|
||||
device_unlock(dev);
|
||||
|
@@ -23,7 +23,7 @@
|
||||
#include "coresight-priv.h"
|
||||
|
||||
static DEFINE_MUTEX(coresight_mutex);
|
||||
DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
|
||||
static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
|
||||
|
||||
/**
|
||||
* struct coresight_node - elements of a path, from source to sink
|
||||
|
@@ -115,7 +115,7 @@ void etm4x_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
|
||||
}
|
||||
}
|
||||
|
||||
u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
|
||||
static u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
|
||||
{
|
||||
u64 res = 0;
|
||||
|
||||
@@ -132,7 +132,7 @@ u64 ete_sysreg_read(u32 offset, bool _relaxed, bool _64bit)
|
||||
return res;
|
||||
}
|
||||
|
||||
void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
|
||||
static void ete_sysreg_write(u64 val, u32 offset, bool _relaxed, bool _64bit)
|
||||
{
|
||||
if (!_relaxed)
|
||||
__iowmb(); /* Imitate the !relaxed I/O helpers */
|
||||
|
@@ -5,6 +5,8 @@
|
||||
* device (ETE) thus generating required trace data. Trace can be enabled
|
||||
* via the perf framework.
|
||||
*
|
||||
* The AUX buffer handling is inspired from Arm SPE PMU driver.
|
||||
*
|
||||
* Copyright (C) 2020 ARM Ltd.
|
||||
*
|
||||
* Author: Anshuman Khandual <anshuman.khandual@arm.com>
|
||||
@@ -515,7 +517,7 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
|
||||
if (!buf->trbe_base) {
|
||||
kfree(pglist);
|
||||
kfree(buf);
|
||||
return ERR_PTR(buf->trbe_base);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
|
||||
buf->trbe_write = buf->trbe_base;
|
||||
@@ -614,9 +616,10 @@ static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
|
||||
/*
|
||||
* Otherwise, the buffer is full and the write pointer
|
||||
* has reached base. Adjust this back to the Limit pointer
|
||||
* for correct size.
|
||||
* for correct size. Also, mark the buffer truncated.
|
||||
*/
|
||||
write = get_trbe_limit_pointer();
|
||||
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
|
||||
}
|
||||
|
||||
offset = write - base;
|
||||
@@ -703,7 +706,12 @@ static void trbe_handle_overflow(struct perf_output_handle *handle)
|
||||
if (buf->snapshot)
|
||||
handle->head += size;
|
||||
|
||||
perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
|
||||
/*
|
||||
* Mark the buffer as truncated, as we have stopped the trace
|
||||
* collection upon the WRAP event, without stopping the source.
|
||||
*/
|
||||
perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW |
|
||||
PERF_AUX_FLAG_TRUNCATED);
|
||||
perf_aux_output_end(handle, size);
|
||||
event_data = perf_aux_output_begin(handle, event);
|
||||
if (!event_data) {
|
||||
@@ -863,7 +871,7 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
|
||||
|
||||
dev = &cpudata->drvdata->pdev->dev;
|
||||
desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
|
||||
if (IS_ERR(desc.name))
|
||||
if (!desc.name)
|
||||
goto cpu_clear;
|
||||
|
||||
desc.type = CORESIGHT_DEV_TYPE_SINK;
|
||||
@@ -1038,7 +1046,7 @@ static int arm_trbe_probe_irq(struct platform_device *pdev,
|
||||
if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
|
||||
return -EINVAL;
|
||||
|
||||
drvdata->handle = alloc_percpu(typeof(*drvdata->handle));
|
||||
drvdata->handle = alloc_percpu(struct perf_output_handle *);
|
||||
if (!drvdata->handle)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@@ -128,8 +128,7 @@ static inline void set_trbe_write_pointer(unsigned long addr)
|
||||
static inline unsigned long get_trbe_limit_pointer(void)
|
||||
{
|
||||
u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
|
||||
unsigned long limit = (trblimitr >> TRBLIMITR_LIMIT_SHIFT) & TRBLIMITR_LIMIT_MASK;
|
||||
unsigned long addr = limit << TRBLIMITR_LIMIT_SHIFT;
|
||||
unsigned long addr = trblimitr & (TRBLIMITR_LIMIT_MASK << TRBLIMITR_LIMIT_SHIFT);
|
||||
|
||||
WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
|
||||
return addr;
|
||||
|
@@ -434,9 +434,9 @@ static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
|
||||
if (attrs & DMA_ATTR_PRIVILEGED)
|
||||
prot |= IOMMU_PRIV;
|
||||
if (attrs & DMA_ATTR_SYS_CACHE_ONLY)
|
||||
prot |= IOMMU_SYS_CACHE_ONLY;
|
||||
prot |= IOMMU_SYS_CACHE;
|
||||
if (attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA)
|
||||
prot |= IOMMU_SYS_CACHE_ONLY_NWA;
|
||||
prot |= IOMMU_SYS_CACHE_NWA;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
@@ -487,7 +487,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
|
||||
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
|
||||
true);
|
||||
|
||||
trace_android_vh_iommu_alloc_iova(dev, iova, size);
|
||||
trace_android_vh_iommu_alloc_iova(dev, (dma_addr_t)iova << shift, size);
|
||||
|
||||
return (dma_addr_t)iova << shift;
|
||||
}
|
||||
|
@@ -112,18 +112,20 @@
|
||||
#define ARM_LPAE_VTCR_PS_SHIFT 16
|
||||
#define ARM_LPAE_VTCR_PS_MASK 0x7
|
||||
|
||||
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
|
||||
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
|
||||
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04ULL
|
||||
#define ARM_LPAE_MAIR_ATTR_NC 0x44ULL
|
||||
#define ARM_LPAE_MAIR_ATTR_INC_OWBRANWA 0xe4ULL
|
||||
#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4ULL
|
||||
#define ARM_LPAE_MAIR_ATTR_WBRWA 0xffULL
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA 4
|
||||
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
|
||||
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
|
||||
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04ULL
|
||||
#define ARM_LPAE_MAIR_ATTR_NC 0x44ULL
|
||||
#define ARM_LPAE_MAIR_ATTR_INC_OWBRANWA 0xe4ULL
|
||||
#define ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA 0xefULL
|
||||
#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4ULL
|
||||
#define ARM_LPAE_MAIR_ATTR_WBRWA 0xffULL
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA 4
|
||||
#define ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA 5
|
||||
|
||||
#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
|
||||
#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
|
||||
@@ -435,13 +437,17 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
|
||||
if (prot & IOMMU_MMIO)
|
||||
pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
else if ((prot & IOMMU_CACHE) && (prot & IOMMU_SYS_CACHE_NWA))
|
||||
pte |= (ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
/* IOMMU_CACHE + IOMMU_SYS_CACHE equivalent to IOMMU_CACHE */
|
||||
else if (prot & IOMMU_CACHE)
|
||||
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
else if (prot & IOMMU_SYS_CACHE_ONLY)
|
||||
else if (prot & IOMMU_SYS_CACHE)
|
||||
pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
else if (prot & IOMMU_SYS_CACHE_ONLY_NWA)
|
||||
else if (prot & IOMMU_SYS_CACHE_NWA)
|
||||
pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
}
|
||||
@@ -904,7 +910,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
(ARM_LPAE_MAIR_ATTR_INC_OWBRWA
|
||||
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)) |
|
||||
(ARM_LPAE_MAIR_ATTR_INC_OWBRANWA
|
||||
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA));
|
||||
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA)) |
|
||||
(ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA
|
||||
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA));
|
||||
|
||||
cfg->arm_lpae_s1_cfg.mair = reg;
|
||||
|
||||
|
@@ -561,6 +561,7 @@ struct mac80211_hwsim_data {
|
||||
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
|
||||
|
||||
struct mac_address addresses[2];
|
||||
struct ieee80211_chanctx_conf *chanctx;
|
||||
int channels, idx;
|
||||
bool use_chanctx;
|
||||
bool destroy_on_close;
|
||||
@@ -1191,7 +1192,8 @@ static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate)
|
||||
|
||||
static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
|
||||
struct sk_buff *my_skb,
|
||||
int dst_portid)
|
||||
int dst_portid,
|
||||
struct ieee80211_channel *channel)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct mac80211_hwsim_data *data = hw->priv;
|
||||
@@ -1246,7 +1248,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
|
||||
if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nla_put_u32(skb, HWSIM_ATTR_FREQ, data->channel->center_freq))
|
||||
if (nla_put_u32(skb, HWSIM_ATTR_FREQ, channel->center_freq))
|
||||
goto nla_put_failure;
|
||||
|
||||
/* We get the tx control (rate and retries) info*/
|
||||
@@ -1593,7 +1595,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
|
||||
_portid = READ_ONCE(data->wmediumd);
|
||||
|
||||
if (_portid || hwsim_virtio_enabled)
|
||||
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
|
||||
return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, channel);
|
||||
|
||||
/* NO wmediumd detected, perfect medium simulation */
|
||||
data->tx_pkts++;
|
||||
@@ -1704,7 +1706,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
|
||||
mac80211_hwsim_monitor_rx(hw, skb, chan);
|
||||
|
||||
if (_pid || hwsim_virtio_enabled)
|
||||
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
|
||||
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid, chan);
|
||||
|
||||
mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
|
||||
dev_kfree_skb(skb);
|
||||
@@ -2443,6 +2445,11 @@ static int mac80211_hwsim_croc(struct ieee80211_hw *hw,
|
||||
static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
|
||||
struct ieee80211_chanctx_conf *ctx)
|
||||
{
|
||||
struct mac80211_hwsim_data *hwsim = hw->priv;
|
||||
|
||||
mutex_lock(&hwsim->mutex);
|
||||
hwsim->chanctx = ctx;
|
||||
mutex_unlock(&hwsim->mutex);
|
||||
hwsim_set_chanctx_magic(ctx);
|
||||
wiphy_dbg(hw->wiphy,
|
||||
"add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
|
||||
@@ -2454,6 +2461,11 @@ static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
|
||||
static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
|
||||
struct ieee80211_chanctx_conf *ctx)
|
||||
{
|
||||
struct mac80211_hwsim_data *hwsim = hw->priv;
|
||||
|
||||
mutex_lock(&hwsim->mutex);
|
||||
hwsim->chanctx = NULL;
|
||||
mutex_unlock(&hwsim->mutex);
|
||||
wiphy_dbg(hw->wiphy,
|
||||
"remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
|
||||
ctx->def.chan->center_freq, ctx->def.width,
|
||||
@@ -2466,6 +2478,11 @@ static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
|
||||
struct ieee80211_chanctx_conf *ctx,
|
||||
u32 changed)
|
||||
{
|
||||
struct mac80211_hwsim_data *hwsim = hw->priv;
|
||||
|
||||
mutex_lock(&hwsim->mutex);
|
||||
hwsim->chanctx = ctx;
|
||||
mutex_unlock(&hwsim->mutex);
|
||||
hwsim_check_chanctx_magic(ctx);
|
||||
wiphy_dbg(hw->wiphy,
|
||||
"change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
|
||||
@@ -3060,6 +3077,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
|
||||
hw->wiphy->max_remain_on_channel_duration = 1000;
|
||||
data->if_combination.radar_detect_widths = 0;
|
||||
data->if_combination.num_different_channels = data->channels;
|
||||
data->chanctx = NULL;
|
||||
} else {
|
||||
data->if_combination.num_different_channels = 1;
|
||||
data->if_combination.radar_detect_widths =
|
||||
@@ -3567,6 +3585,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
|
||||
int frame_data_len;
|
||||
void *frame_data;
|
||||
struct sk_buff *skb = NULL;
|
||||
struct ieee80211_channel *channel = NULL;
|
||||
|
||||
if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
|
||||
!info->attrs[HWSIM_ATTR_FRAME] ||
|
||||
@@ -3593,6 +3612,17 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
|
||||
if (!data2)
|
||||
goto out;
|
||||
|
||||
if (data2->use_chanctx) {
|
||||
if (data2->tmp_chan)
|
||||
channel = data2->tmp_chan;
|
||||
else if (data2->chanctx)
|
||||
channel = data2->chanctx->def.chan;
|
||||
} else {
|
||||
channel = data2->channel;
|
||||
}
|
||||
if (!channel)
|
||||
goto out;
|
||||
|
||||
if (!hwsim_virtio_enabled) {
|
||||
if (hwsim_net_get_netgroup(genl_info_net(info)) !=
|
||||
data2->netgroup)
|
||||
@@ -3604,7 +3634,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
|
||||
|
||||
/* check if radio is configured properly */
|
||||
|
||||
if (data2->idle || !data2->started)
|
||||
if ((data2->idle && !data2->tmp_chan) || !data2->started)
|
||||
goto out;
|
||||
|
||||
/* A frame is received from user space */
|
||||
@@ -3617,18 +3647,16 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
|
||||
mutex_lock(&data2->mutex);
|
||||
rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]);
|
||||
|
||||
if (rx_status.freq != data2->channel->center_freq &&
|
||||
(!data2->tmp_chan ||
|
||||
rx_status.freq != data2->tmp_chan->center_freq)) {
|
||||
if (rx_status.freq != channel->center_freq) {
|
||||
mutex_unlock(&data2->mutex);
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&data2->mutex);
|
||||
} else {
|
||||
rx_status.freq = data2->channel->center_freq;
|
||||
rx_status.freq = channel->center_freq;
|
||||
}
|
||||
|
||||
rx_status.band = data2->channel->band;
|
||||
rx_status.band = channel->band;
|
||||
rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]);
|
||||
rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]);
|
||||
|
||||
|
@@ -32,6 +32,7 @@ void rproc_coredump_cleanup(struct rproc *rproc)
|
||||
kfree(entry);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(rproc_coredump_cleanup);
|
||||
|
||||
/**
|
||||
* rproc_coredump_add_segment() - add segment of device memory to coredump
|
||||
@@ -327,6 +328,7 @@ void rproc_coredump(struct rproc *rproc)
|
||||
*/
|
||||
wait_for_completion(&dump_state.dump_done);
|
||||
}
|
||||
EXPORT_SYMBOL(rproc_coredump);
|
||||
|
||||
/**
|
||||
* rproc_coredump_using_sections() - perform coredump using section headers
|
||||
|
@@ -49,10 +49,6 @@ extern struct class rproc_class;
|
||||
int rproc_init_sysfs(void);
|
||||
void rproc_exit_sysfs(void);
|
||||
|
||||
/* from remoteproc_coredump.c */
|
||||
void rproc_coredump_cleanup(struct rproc *rproc);
|
||||
void rproc_coredump(struct rproc *rproc);
|
||||
|
||||
#ifdef CONFIG_REMOTEPROC_CDEV
|
||||
void rproc_init_cdev(void);
|
||||
void rproc_exit_cdev(void);
|
||||
|
@@ -730,7 +730,7 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
|
||||
*/
|
||||
static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
|
||||
{
|
||||
clear_bit(tag, &hba->outstanding_reqs);
|
||||
__clear_bit(tag, &hba->outstanding_reqs);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1956,19 +1956,15 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
|
||||
{
|
||||
bool queue_resume_work = false;
|
||||
ktime_t curr_t = ktime_get();
|
||||
unsigned long flags;
|
||||
|
||||
if (!ufshcd_is_clkscaling_supported(hba))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
if (!hba->clk_scaling.active_reqs++)
|
||||
queue_resume_work = true;
|
||||
|
||||
if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
|
||||
return;
|
||||
}
|
||||
|
||||
if (queue_resume_work)
|
||||
queue_work(hba->clk_scaling.workq,
|
||||
@@ -1984,26 +1980,21 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
|
||||
hba->clk_scaling.busy_start_t = curr_t;
|
||||
hba->clk_scaling.is_busy_started = true;
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
|
||||
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
|
||||
unsigned long flags;
|
||||
|
||||
if (!ufshcd_is_clkscaling_supported(hba))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->clk_scaling.active_reqs--;
|
||||
if (!hba->outstanding_reqs && scaling->is_busy_started) {
|
||||
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
|
||||
scaling->busy_start_t));
|
||||
scaling->busy_start_t = 0;
|
||||
scaling->is_busy_started = false;
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
|
||||
static inline int ufshcd_monitor_opcode2dir(u8 opcode)
|
||||
@@ -2029,20 +2020,15 @@ static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
|
||||
static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
|
||||
hba->monitor.busy_start_ts[dir] = ktime_get();
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
|
||||
static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
|
||||
struct request *req = lrbp->cmd->request;
|
||||
struct ufs_hba_monitor *m = &hba->monitor;
|
||||
@@ -2066,7 +2052,6 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
/* Push forward the busy start of monitor */
|
||||
m->busy_start_ts[dir] = now;
|
||||
}
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -2085,21 +2070,10 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
|
||||
trace_android_vh_ufs_send_command(hba, lrbp);
|
||||
ufshcd_add_command_trace(hba, task_tag, "send");
|
||||
ufshcd_clk_scaling_start_busy(hba);
|
||||
__set_bit(task_tag, &hba->outstanding_reqs);
|
||||
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
|
||||
ufshcd_start_monitor(hba, lrbp);
|
||||
if (ufshcd_has_utrlcnr(hba)) {
|
||||
set_bit(task_tag, &hba->outstanding_reqs);
|
||||
ufshcd_writel(hba, 1 << task_tag,
|
||||
REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
} else {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
set_bit(task_tag, &hba->outstanding_reqs);
|
||||
ufshcd_writel(hba, 1 << task_tag,
|
||||
REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
/* Make sure that doorbell is committed immediately */
|
||||
wmb();
|
||||
}
|
||||
@@ -2663,6 +2637,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
{
|
||||
struct ufshcd_lrb *lrbp;
|
||||
struct ufs_hba *hba;
|
||||
unsigned long flags;
|
||||
int tag;
|
||||
int err = 0;
|
||||
|
||||
@@ -2679,43 +2654,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
if (!down_read_trylock(&hba->clk_scaling_lock))
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
switch (hba->ufshcd_state) {
|
||||
case UFSHCD_STATE_OPERATIONAL:
|
||||
case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
|
||||
break;
|
||||
case UFSHCD_STATE_EH_SCHEDULED_FATAL:
|
||||
/*
|
||||
* pm_runtime_get_sync() is used at error handling preparation
|
||||
* stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
|
||||
* PM ops, it can never be finished if we let SCSI layer keep
|
||||
* retrying it, which gets err handler stuck forever. Neither
|
||||
* can we let the scsi cmd pass through, because UFS is in bad
|
||||
* state, the scsi cmd may eventually time out, which will get
|
||||
* err handler blocked for too long. So, just fail the scsi cmd
|
||||
* sent from PM ops, err handler can recover PM error anyways.
|
||||
*/
|
||||
if (hba->pm_op_in_progress) {
|
||||
hba->force_reset = true;
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
cmd->scsi_done(cmd);
|
||||
goto out;
|
||||
}
|
||||
fallthrough;
|
||||
case UFSHCD_STATE_RESET:
|
||||
err = SCSI_MLQUEUE_HOST_BUSY;
|
||||
goto out;
|
||||
case UFSHCD_STATE_ERROR:
|
||||
set_host_byte(cmd, DID_ERROR);
|
||||
cmd->scsi_done(cmd);
|
||||
goto out;
|
||||
default:
|
||||
dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
|
||||
__func__, hba->ufshcd_state);
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
cmd->scsi_done(cmd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
hba->req_abort_count = 0;
|
||||
|
||||
err = ufshcd_hold(hba, true);
|
||||
@@ -2726,6 +2664,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
|
||||
(hba->clk_gating.state != CLKS_ON));
|
||||
|
||||
lrbp = &hba->lrb[tag];
|
||||
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
|
||||
if (hba->pm_op_in_progress)
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
@@ -2735,7 +2674,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
goto out;
|
||||
}
|
||||
|
||||
lrbp = &hba->lrb[tag];
|
||||
WARN_ON(lrbp->cmd);
|
||||
lrbp->cmd = cmd;
|
||||
lrbp->sense_bufflen = UFS_SENSE_SIZE;
|
||||
@@ -2766,7 +2704,51 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||
/* Make sure descriptors are ready before ringing the doorbell */
|
||||
wmb();
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
switch (hba->ufshcd_state) {
|
||||
case UFSHCD_STATE_OPERATIONAL:
|
||||
case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
|
||||
break;
|
||||
case UFSHCD_STATE_EH_SCHEDULED_FATAL:
|
||||
/*
|
||||
* pm_runtime_get_sync() is used at error handling preparation
|
||||
* stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
|
||||
* PM ops, it can never be finished if we let SCSI layer keep
|
||||
* retrying it, which gets err handler stuck forever. Neither
|
||||
* can we let the scsi cmd pass through, because UFS is in bad
|
||||
* state, the scsi cmd may eventually time out, which will get
|
||||
* err handler blocked for too long. So, just fail the scsi cmd
|
||||
* sent from PM ops, err handler can recover PM error anyways.
|
||||
*/
|
||||
if (hba->pm_op_in_progress) {
|
||||
hba->force_reset = true;
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
goto out_compl_cmd;
|
||||
}
|
||||
fallthrough;
|
||||
case UFSHCD_STATE_RESET:
|
||||
err = SCSI_MLQUEUE_HOST_BUSY;
|
||||
goto out_compl_cmd;
|
||||
case UFSHCD_STATE_ERROR:
|
||||
set_host_byte(cmd, DID_ERROR);
|
||||
goto out_compl_cmd;
|
||||
default:
|
||||
dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
|
||||
__func__, hba->ufshcd_state);
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
goto out_compl_cmd;
|
||||
}
|
||||
ufshcd_send_command(hba, tag);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
goto out;
|
||||
|
||||
out_compl_cmd:
|
||||
scsi_dma_unmap(lrbp->cmd);
|
||||
lrbp->cmd = NULL;
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ufshcd_release(hba);
|
||||
if (!err)
|
||||
cmd->scsi_done(cmd);
|
||||
out:
|
||||
up_read(&hba->clk_scaling_lock);
|
||||
return err;
|
||||
@@ -2921,6 +2903,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
||||
int err;
|
||||
int tag;
|
||||
struct completion wait;
|
||||
unsigned long flags;
|
||||
|
||||
down_read(&hba->clk_scaling_lock);
|
||||
|
||||
@@ -2940,30 +2923,34 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
||||
req->timeout = msecs_to_jiffies(2 * timeout);
|
||||
blk_mq_start_request(req);
|
||||
|
||||
init_completion(&wait);
|
||||
lrbp = &hba->lrb[tag];
|
||||
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
init_completion(&wait);
|
||||
lrbp = &hba->lrb[tag];
|
||||
WARN_ON(lrbp->cmd);
|
||||
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
|
||||
if (unlikely(err))
|
||||
goto out;
|
||||
goto out_put_tag;
|
||||
|
||||
hba->dev_cmd.complete = &wait;
|
||||
|
||||
ufshcd_add_query_upiu_trace(hba, tag, "query_send");
|
||||
/* Make sure descriptors are ready before ringing the doorbell */
|
||||
wmb();
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ufshcd_send_command(hba, tag);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
|
||||
|
||||
out:
|
||||
ufshcd_add_query_upiu_trace(hba, tag,
|
||||
err ? "query_complete_err" : "query_complete");
|
||||
|
||||
out:
|
||||
out_put_tag:
|
||||
blk_put_request(req);
|
||||
out_unlock:
|
||||
up_read(&hba->clk_scaling_lock);
|
||||
@@ -5096,24 +5083,6 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
|
||||
u32 intr_mask)
|
||||
{
|
||||
if (!ufshcd_is_auto_hibern8_supported(hba) ||
|
||||
!ufshcd_is_auto_hibern8_enabled(hba))
|
||||
return false;
|
||||
|
||||
if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
|
||||
return false;
|
||||
|
||||
if (hba->active_uic_cmd &&
|
||||
(hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
|
||||
hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_uic_cmd_compl - handle completion of uic command
|
||||
* @hba: per adapter instance
|
||||
@@ -5127,10 +5096,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
||||
{
|
||||
irqreturn_t retval = IRQ_NONE;
|
||||
|
||||
spin_lock(hba->host->host_lock);
|
||||
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
|
||||
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
|
||||
|
||||
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
|
||||
hba->active_uic_cmd->argument2 |=
|
||||
ufshcd_get_uic_cmd_result(hba);
|
||||
@@ -5151,7 +5116,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
||||
if (retval == IRQ_HANDLED)
|
||||
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
|
||||
"complete");
|
||||
spin_unlock(hba->host->host_lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -5188,7 +5152,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
lrbp->cmd = NULL;
|
||||
/* Do not touch lrbp after scsi done */
|
||||
cmd->scsi_done(cmd);
|
||||
ufshcd_release(hba);
|
||||
__ufshcd_release(hba);
|
||||
update_scaling = true;
|
||||
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
|
||||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
|
||||
@@ -5200,23 +5164,25 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
update_scaling = true;
|
||||
}
|
||||
}
|
||||
if (update_scaling)
|
||||
ufshcd_clk_scaling_update_busy(hba);
|
||||
if (ufshcd_is_clkscaling_supported(hba) && update_scaling)
|
||||
hba->clk_scaling.active_reqs--;
|
||||
}
|
||||
|
||||
ufshcd_clk_scaling_update_busy(hba);
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_trc_handler - handle transfer requests completion
|
||||
* ufshcd_transfer_req_compl - handle SCSI and query command completion
|
||||
* @hba: per adapter instance
|
||||
* @use_utrlcnr: get completed requests from UTRLCNR
|
||||
*
|
||||
* Returns
|
||||
* IRQ_HANDLED - If interrupt is valid
|
||||
* IRQ_NONE - If invalid interrupt
|
||||
*/
|
||||
static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
|
||||
static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
|
||||
{
|
||||
unsigned long completed_reqs = 0;
|
||||
unsigned long completed_reqs;
|
||||
u32 tr_doorbell;
|
||||
|
||||
/* Resetting interrupt aggregation counters first and reading the
|
||||
* DOOR_BELL afterward allows us to handle all the completed requests.
|
||||
@@ -5229,24 +5195,8 @@ static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr)
|
||||
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
|
||||
ufshcd_reset_intr_aggr(hba);
|
||||
|
||||
if (use_utrlcnr) {
|
||||
u32 utrlcnr;
|
||||
|
||||
utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL);
|
||||
if (utrlcnr) {
|
||||
ufshcd_writel(hba, utrlcnr,
|
||||
REG_UTP_TRANSFER_REQ_LIST_COMPL);
|
||||
completed_reqs = utrlcnr;
|
||||
}
|
||||
} else {
|
||||
unsigned long flags;
|
||||
u32 tr_doorbell;
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
}
|
||||
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
|
||||
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
|
||||
|
||||
if (completed_reqs) {
|
||||
__ufshcd_transfer_req_compl(hba, completed_reqs);
|
||||
@@ -5755,7 +5705,7 @@ out:
|
||||
/* Complete requests that have door-bell cleared */
|
||||
static void ufshcd_complete_requests(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_trc_handler(hba, false);
|
||||
ufshcd_transfer_req_compl(hba);
|
||||
ufshcd_tmc_handler(hba);
|
||||
}
|
||||
|
||||
@@ -6004,11 +5954,13 @@ static void ufshcd_err_handler(struct work_struct *work)
|
||||
ufshcd_set_eh_in_progress(hba);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ufshcd_err_handling_prepare(hba);
|
||||
/* Complete requests that have door-bell cleared by h/w */
|
||||
ufshcd_complete_requests(hba);
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
|
||||
hba->ufshcd_state = UFSHCD_STATE_RESET;
|
||||
|
||||
/* Complete requests that have door-bell cleared by h/w */
|
||||
ufshcd_complete_requests(hba);
|
||||
|
||||
/*
|
||||
* A full reset and restore might have happened after preparation
|
||||
* is finished, double check whether we should stop.
|
||||
@@ -6091,11 +6043,12 @@ static void ufshcd_err_handler(struct work_struct *work)
|
||||
}
|
||||
|
||||
lock_skip_pending_xfer_clear:
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
|
||||
/* Complete the requests that are cleared by s/w */
|
||||
ufshcd_complete_requests(hba);
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->silence_err_logs = false;
|
||||
|
||||
if (err_xfer || err_tm) {
|
||||
needs_reset = true;
|
||||
goto do_reset;
|
||||
@@ -6245,23 +6198,37 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
|
||||
u32 intr_mask)
|
||||
{
|
||||
if (!ufshcd_is_auto_hibern8_supported(hba) ||
|
||||
!ufshcd_is_auto_hibern8_enabled(hba))
|
||||
return false;
|
||||
|
||||
if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
|
||||
return false;
|
||||
|
||||
if (hba->active_uic_cmd &&
|
||||
(hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
|
||||
hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_check_errors - Check for errors that need s/w attention
|
||||
* @hba: per-adapter instance
|
||||
* @intr_status: interrupt status generated by the controller
|
||||
*
|
||||
* Returns
|
||||
* IRQ_HANDLED - If interrupt is valid
|
||||
* IRQ_NONE - If invalid interrupt
|
||||
*/
|
||||
static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
|
||||
static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
|
||||
{
|
||||
bool queue_eh_work = false;
|
||||
irqreturn_t retval = IRQ_NONE;
|
||||
|
||||
spin_lock(hba->host->host_lock);
|
||||
hba->errors |= UFSHCD_ERROR_MASK & intr_status;
|
||||
|
||||
if (hba->errors & INT_FATAL_ERRORS) {
|
||||
ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
|
||||
hba->errors);
|
||||
@@ -6318,9 +6285,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
|
||||
* itself without s/w intervention or errors that will be
|
||||
* handled by the SCSI core layer.
|
||||
*/
|
||||
hba->errors = 0;
|
||||
hba->uic_error = 0;
|
||||
spin_unlock(hba->host->host_lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -6355,17 +6319,13 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
|
||||
*/
|
||||
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct request_queue *q = hba->tmf_queue;
|
||||
struct ctm_info ci = {
|
||||
.hba = hba,
|
||||
.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
|
||||
};
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
|
||||
blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
@@ -6382,17 +6342,22 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
|
||||
{
|
||||
irqreturn_t retval = IRQ_NONE;
|
||||
|
||||
hba->errors = UFSHCD_ERROR_MASK & intr_status;
|
||||
|
||||
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
|
||||
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
|
||||
|
||||
if (hba->errors)
|
||||
retval |= ufshcd_check_errors(hba);
|
||||
|
||||
if (intr_status & UFSHCD_UIC_MASK)
|
||||
retval |= ufshcd_uic_cmd_compl(hba, intr_status);
|
||||
|
||||
if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
|
||||
retval |= ufshcd_check_errors(hba, intr_status);
|
||||
|
||||
if (intr_status & UTP_TASK_REQ_COMPL)
|
||||
retval |= ufshcd_tmc_handler(hba);
|
||||
|
||||
if (intr_status & UTP_TRANSFER_REQ_COMPL)
|
||||
retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba));
|
||||
retval |= ufshcd_transfer_req_compl(hba);
|
||||
|
||||
return retval;
|
||||
}
|
||||
@@ -6413,6 +6378,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
||||
struct ufs_hba *hba = __hba;
|
||||
int retries = hba->nutrs;
|
||||
|
||||
spin_lock(hba->host->host_lock);
|
||||
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
||||
hba->ufs_stats.last_intr_status = intr_status;
|
||||
hba->ufs_stats.last_intr_ts = ktime_get();
|
||||
@@ -6444,6 +6410,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
||||
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
|
||||
}
|
||||
|
||||
spin_unlock(hba->host->host_lock);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@@ -6620,6 +6587,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
||||
int err = 0;
|
||||
int tag;
|
||||
struct completion wait;
|
||||
unsigned long flags;
|
||||
u8 upiu_flags;
|
||||
|
||||
down_read(&hba->clk_scaling_lock);
|
||||
@@ -6632,13 +6600,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
||||
tag = req->tag;
|
||||
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
|
||||
|
||||
init_completion(&wait);
|
||||
if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
init_completion(&wait);
|
||||
lrbp = &hba->lrb[tag];
|
||||
|
||||
WARN_ON(lrbp->cmd);
|
||||
lrbp->cmd = NULL;
|
||||
lrbp->sense_bufflen = 0;
|
||||
@@ -6676,8 +6644,10 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
||||
|
||||
/* Make sure descriptors are ready before ringing the doorbell */
|
||||
wmb();
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ufshcd_send_command(hba, tag);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
/*
|
||||
* ignore the returning value here - ufshcd_check_query_response is
|
||||
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
|
||||
@@ -6796,6 +6766,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
u32 pos;
|
||||
int err;
|
||||
u8 resp = 0xF, lun;
|
||||
unsigned long flags;
|
||||
|
||||
host = cmd->device->host;
|
||||
hba = shost_priv(host);
|
||||
@@ -6814,9 +6785,11 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
||||
err = ufshcd_clear_cmd(hba, pos);
|
||||
if (err)
|
||||
break;
|
||||
__ufshcd_transfer_req_compl(hba, pos);
|
||||
}
|
||||
}
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
ufshcd_transfer_req_compl(hba);
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
|
||||
out:
|
||||
hba->req_abort_count = 0;
|
||||
@@ -6992,16 +6965,19 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||||
* will fail, due to spec violation, scsi err handling next step
|
||||
* will be to send LU reset which, again, is a spec violation.
|
||||
* To avoid these unnecessary/illegal steps, first we clean up
|
||||
* the lrb taken by this cmd and re-set it in outstanding_reqs,
|
||||
* then queue the eh_work and bail.
|
||||
* the lrb taken by this cmd and mark the lrb as in_use, then
|
||||
* queue the eh_work and bail.
|
||||
*/
|
||||
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
|
||||
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
|
||||
__ufshcd_transfer_req_compl(hba, (1UL << tag));
|
||||
set_bit(tag, &hba->outstanding_reqs);
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
hba->force_reset = true;
|
||||
ufshcd_schedule_eh_work(hba);
|
||||
if (lrbp->cmd) {
|
||||
__ufshcd_transfer_req_compl(hba, (1UL << tag));
|
||||
__set_bit(tag, &hba->outstanding_reqs);
|
||||
hba->force_reset = true;
|
||||
ufshcd_schedule_eh_work(hba);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
goto out;
|
||||
}
|
||||
@@ -7014,7 +6990,9 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||||
|
||||
if (!err) {
|
||||
cleanup:
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
__ufshcd_transfer_req_compl(hba, (1UL << tag));
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
out:
|
||||
err = SUCCESS;
|
||||
} else {
|
||||
@@ -7044,15 +7022,19 @@ out:
|
||||
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
|
||||
{
|
||||
int err;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Stop the host controller and complete the requests
|
||||
* cleared by h/w
|
||||
*/
|
||||
ufshcd_hba_stop(hba);
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
hba->silence_err_logs = true;
|
||||
ufshcd_complete_requests(hba);
|
||||
hba->silence_err_logs = false;
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
|
||||
/* scale up clocks to max frequency before full reinitialization */
|
||||
ufshcd_set_clk_freq(hba, true);
|
||||
|
@@ -1166,11 +1166,6 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
|
||||
return ufshcd_readl(hba, REG_UFS_VERSION);
|
||||
}
|
||||
|
||||
static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba)
|
||||
{
|
||||
return (hba->ufs_version >= ufshci_version(3, 0));
|
||||
}
|
||||
|
||||
static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
|
||||
bool up, enum ufs_notify_change_status status)
|
||||
{
|
||||
|
@@ -39,7 +39,6 @@ enum {
|
||||
REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58,
|
||||
REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C,
|
||||
REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60,
|
||||
REG_UTP_TRANSFER_REQ_LIST_COMPL = 0x64,
|
||||
REG_UTP_TASK_REQ_LIST_BASE_L = 0x70,
|
||||
REG_UTP_TASK_REQ_LIST_BASE_H = 0x74,
|
||||
REG_UTP_TASK_REQ_DOOR_BELL = 0x78,
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include "debug_kinfo.h"
|
||||
|
||||
/*
|
||||
@@ -162,7 +163,13 @@ static int debug_kinfo_probe(struct platform_device *pdev)
|
||||
info->mod_core_layout_offset = offsetof(struct module, core_layout);
|
||||
info->mod_init_layout_offset = offsetof(struct module, init_layout);
|
||||
info->mod_kallsyms_offset = offsetof(struct module, kallsyms);
|
||||
|
||||
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
|
||||
info->module_start_va = MODULES_VADDR;
|
||||
info->module_end_va = MODULES_END;
|
||||
#else
|
||||
info->module_start_va = VMALLOC_START;
|
||||
info->module_end_va = VMALLOC_END;
|
||||
#endif
|
||||
update_kernel_all_info(all_info);
|
||||
|
||||
return 0;
|
||||
|
@@ -58,6 +58,8 @@ struct kernel_info {
|
||||
__u32 mod_core_layout_offset;
|
||||
__u32 mod_init_layout_offset;
|
||||
__u32 mod_kallsyms_offset;
|
||||
__u64 module_start_va;
|
||||
__u64 module_end_va;
|
||||
} __packed;
|
||||
|
||||
struct kernel_all_info {
|
||||
|
@@ -691,6 +691,9 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QUOTA
|
||||
/* Needed for iput() to work correctly and not trash data */
|
||||
sbi->sb->s_flags |= SB_ACTIVE;
|
||||
|
||||
/*
|
||||
* Turn on quotas which were not enabled for read-only mounts if
|
||||
* filesystem has quota feature, so that they are updated correctly.
|
||||
|
@@ -12,9 +12,11 @@
|
||||
#include <linux/lzo.h>
|
||||
#include <linux/lz4.h>
|
||||
#include <linux/zstd.h>
|
||||
#include <linux/pagevec.h>
|
||||
|
||||
#include "f2fs.h"
|
||||
#include "node.h"
|
||||
#include "segment.h"
|
||||
#include <trace/events/f2fs.h>
|
||||
|
||||
static struct kmem_cache *cic_entry_slab;
|
||||
@@ -736,7 +738,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
|
||||
void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
|
||||
struct f2fs_inode_info *fi = F2FS_I(dic->inode);
|
||||
@@ -835,7 +837,8 @@ out_end_io:
|
||||
* page being waited on in the cluster, and if so, it decompresses the cluster
|
||||
* (or in the case of a failure, cleans up without actually decompressing).
|
||||
*/
|
||||
void f2fs_end_read_compressed_page(struct page *page, bool failed)
|
||||
void f2fs_end_read_compressed_page(struct page *page, bool failed,
|
||||
block_t blkaddr)
|
||||
{
|
||||
struct decompress_io_ctx *dic =
|
||||
(struct decompress_io_ctx *)page_private(page);
|
||||
@@ -845,6 +848,9 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed)
|
||||
|
||||
if (failed)
|
||||
WRITE_ONCE(dic->failed, true);
|
||||
else if (blkaddr)
|
||||
f2fs_cache_compressed_page(sbi, page,
|
||||
dic->inode->i_ino, blkaddr);
|
||||
|
||||
if (atomic_dec_and_test(&dic->remaining_pages))
|
||||
f2fs_decompress_cluster(dic);
|
||||
@@ -1660,6 +1666,164 @@ void f2fs_put_page_dic(struct page *page)
|
||||
f2fs_put_dic(dic);
|
||||
}
|
||||
|
||||
const struct address_space_operations f2fs_compress_aops = {
|
||||
.releasepage = f2fs_release_page,
|
||||
.invalidatepage = f2fs_invalidate_page,
|
||||
};
|
||||
|
||||
struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
return sbi->compress_inode->i_mapping;
|
||||
}
|
||||
|
||||
void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
|
||||
{
|
||||
if (!sbi->compress_inode)
|
||||
return;
|
||||
invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
|
||||
}
|
||||
|
||||
void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
|
||||
nid_t ino, block_t blkaddr)
|
||||
{
|
||||
struct page *cpage;
|
||||
int ret;
|
||||
|
||||
if (!test_opt(sbi, COMPRESS_CACHE))
|
||||
return;
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
|
||||
return;
|
||||
|
||||
if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
|
||||
return;
|
||||
|
||||
cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
|
||||
if (cpage) {
|
||||
f2fs_put_page(cpage, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
|
||||
if (!cpage)
|
||||
return;
|
||||
|
||||
ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
|
||||
blkaddr, GFP_NOFS);
|
||||
if (ret) {
|
||||
f2fs_put_page(cpage, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
set_page_private_data(cpage, ino);
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
|
||||
goto out;
|
||||
|
||||
memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
|
||||
SetPageUptodate(cpage);
|
||||
out:
|
||||
f2fs_put_page(cpage, 1);
|
||||
}
|
||||
|
||||
bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
|
||||
block_t blkaddr)
|
||||
{
|
||||
struct page *cpage;
|
||||
bool hitted = false;
|
||||
|
||||
if (!test_opt(sbi, COMPRESS_CACHE))
|
||||
return false;
|
||||
|
||||
cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
|
||||
blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
|
||||
if (cpage) {
|
||||
if (PageUptodate(cpage)) {
|
||||
atomic_inc(&sbi->compress_page_hit);
|
||||
memcpy(page_address(page),
|
||||
page_address(cpage), PAGE_SIZE);
|
||||
hitted = true;
|
||||
}
|
||||
f2fs_put_page(cpage, 1);
|
||||
}
|
||||
|
||||
return hitted;
|
||||
}
|
||||
|
||||
void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
|
||||
{
|
||||
struct address_space *mapping = sbi->compress_inode->i_mapping;
|
||||
struct pagevec pvec;
|
||||
pgoff_t index = 0;
|
||||
pgoff_t end = MAX_BLKADDR(sbi);
|
||||
|
||||
if (!mapping->nrpages)
|
||||
return;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
|
||||
do {
|
||||
unsigned int nr_pages;
|
||||
int i;
|
||||
|
||||
nr_pages = pagevec_lookup_range(&pvec, mapping,
|
||||
&index, end - 1);
|
||||
if (!nr_pages)
|
||||
break;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
|
||||
if (page->index > end)
|
||||
break;
|
||||
|
||||
lock_page(page);
|
||||
if (page->mapping != mapping) {
|
||||
unlock_page(page);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ino != get_page_private_data(page)) {
|
||||
unlock_page(page);
|
||||
continue;
|
||||
}
|
||||
|
||||
generic_error_remove_page(mapping, page);
|
||||
unlock_page(page);
|
||||
}
|
||||
pagevec_release(&pvec);
|
||||
cond_resched();
|
||||
} while (index < end);
|
||||
}
|
||||
|
||||
int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
if (!test_opt(sbi, COMPRESS_CACHE))
|
||||
return 0;
|
||||
|
||||
inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
|
||||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
sbi->compress_inode = inode;
|
||||
|
||||
sbi->compress_percent = COMPRESS_PERCENT;
|
||||
sbi->compress_watermark = COMPRESS_WATERMARK;
|
||||
|
||||
atomic_set(&sbi->compress_page_hit, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
if (!sbi->compress_inode)
|
||||
return;
|
||||
iput(sbi->compress_inode);
|
||||
sbi->compress_inode = NULL;
|
||||
}
|
||||
|
||||
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
dev_t dev = sbi->sb->s_bdev->bd_dev;
|
||||
|
287
fs/f2fs/data.c
287
fs/f2fs/data.c
@@ -133,7 +133,7 @@ static void f2fs_finish_read_bio(struct bio *bio)
|
||||
|
||||
if (f2fs_is_compressed_page(page)) {
|
||||
if (bio->bi_status)
|
||||
f2fs_end_read_compressed_page(page, true);
|
||||
f2fs_end_read_compressed_page(page, true, 0);
|
||||
f2fs_put_page_dic(page);
|
||||
continue;
|
||||
}
|
||||
@@ -229,15 +229,19 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
|
||||
struct bio_vec *bv;
|
||||
struct bvec_iter_all iter_all;
|
||||
bool all_compressed = true;
|
||||
block_t blkaddr = SECTOR_TO_BLOCK(ctx->bio->bi_iter.bi_sector);
|
||||
|
||||
bio_for_each_segment_all(bv, ctx->bio, iter_all) {
|
||||
struct page *page = bv->bv_page;
|
||||
|
||||
/* PG_error was set if decryption failed. */
|
||||
if (f2fs_is_compressed_page(page))
|
||||
f2fs_end_read_compressed_page(page, PageError(page));
|
||||
f2fs_end_read_compressed_page(page, PageError(page),
|
||||
blkaddr);
|
||||
else
|
||||
all_compressed = false;
|
||||
|
||||
blkaddr++;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1368,9 +1372,11 @@ alloc:
|
||||
old_blkaddr = dn->data_blkaddr;
|
||||
f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
|
||||
&sum, seg_type, NULL);
|
||||
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
|
||||
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
|
||||
invalidate_mapping_pages(META_MAPPING(sbi),
|
||||
old_blkaddr, old_blkaddr);
|
||||
f2fs_invalidate_compress_page(sbi, old_blkaddr);
|
||||
}
|
||||
f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
|
||||
|
||||
/*
|
||||
@@ -2190,7 +2196,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
goto out_put_dnode;
|
||||
}
|
||||
|
||||
for (i = 0; i < dic->nr_cpages; i++) {
|
||||
for (i = 0; i < cc->nr_cpages; i++) {
|
||||
struct page *page = dic->cpages[i];
|
||||
block_t blkaddr;
|
||||
struct bio_post_read_ctx *ctx;
|
||||
@@ -2198,6 +2204,14 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
|
||||
blkaddr = data_blkaddr(dn.inode, dn.node_page,
|
||||
dn.ofs_in_node + i + 1);
|
||||
|
||||
f2fs_wait_on_block_writeback(inode, blkaddr);
|
||||
|
||||
if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
|
||||
if (atomic_dec_and_test(&dic->remaining_pages))
|
||||
f2fs_decompress_cluster(dic);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (bio && (!page_is_mergeable(sbi, bio,
|
||||
*last_block_in_bio, blkaddr) ||
|
||||
!f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
|
||||
@@ -2219,8 +2233,6 @@ submit_and_realloc:
|
||||
}
|
||||
}
|
||||
|
||||
f2fs_wait_on_block_writeback(inode, blkaddr);
|
||||
|
||||
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
|
||||
goto submit_and_realloc;
|
||||
|
||||
@@ -2476,6 +2488,10 @@ static inline bool check_inplace_update_policy(struct inode *inode,
|
||||
|
||||
bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
|
||||
{
|
||||
/* swap file is migrating in aligned write mode */
|
||||
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
|
||||
return false;
|
||||
|
||||
if (f2fs_is_pinned_file(inode))
|
||||
return true;
|
||||
|
||||
@@ -2498,6 +2514,11 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
|
||||
return true;
|
||||
if (f2fs_is_atomic_file(inode))
|
||||
return true;
|
||||
|
||||
/* swap file is migrating in aligned write mode */
|
||||
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
|
||||
return true;
|
||||
|
||||
if (fio) {
|
||||
if (page_private_gcing(fio->page))
|
||||
return true;
|
||||
@@ -3675,6 +3696,13 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
|
||||
|
||||
clear_page_private_gcing(page);
|
||||
|
||||
if (test_opt(sbi, COMPRESS_CACHE)) {
|
||||
if (f2fs_compressed_file(inode))
|
||||
f2fs_invalidate_compress_pages(sbi, inode->i_ino);
|
||||
if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
|
||||
clear_page_private_data(page);
|
||||
}
|
||||
|
||||
if (page_private_atomic(page))
|
||||
return f2fs_drop_inmem_page(inode, page);
|
||||
|
||||
@@ -3692,6 +3720,16 @@ int f2fs_release_page(struct page *page, gfp_t wait)
|
||||
if (page_private_atomic(page))
|
||||
return 0;
|
||||
|
||||
if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
|
||||
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
|
||||
struct inode *inode = page->mapping->host;
|
||||
|
||||
if (f2fs_compressed_file(inode))
|
||||
f2fs_invalidate_compress_pages(sbi, inode->i_ino);
|
||||
if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
|
||||
clear_page_private_data(page);
|
||||
}
|
||||
|
||||
clear_page_private_gcing(page);
|
||||
|
||||
detach_page_private(page);
|
||||
@@ -3858,67 +3896,66 @@ int f2fs_migrate_page(struct address_space *mapping,
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SWAP
|
||||
static int f2fs_is_file_aligned(struct inode *inode)
|
||||
static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
|
||||
unsigned int blkcnt)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
block_t main_blkaddr = SM_I(sbi)->main_blkaddr;
|
||||
block_t cur_lblock;
|
||||
block_t last_lblock;
|
||||
block_t pblock;
|
||||
unsigned long nr_pblocks;
|
||||
unsigned int blocks_per_sec = BLKS_PER_SEC(sbi);
|
||||
unsigned int not_aligned = 0;
|
||||
unsigned int blkofs;
|
||||
unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
|
||||
unsigned int secidx = start_blk / blk_per_sec;
|
||||
unsigned int end_sec = secidx + blkcnt / blk_per_sec;
|
||||
int ret = 0;
|
||||
|
||||
cur_lblock = 0;
|
||||
last_lblock = bytes_to_blks(inode, i_size_read(inode));
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
while (cur_lblock < last_lblock) {
|
||||
struct f2fs_map_blocks map;
|
||||
set_inode_flag(inode, FI_ALIGNED_WRITE);
|
||||
|
||||
memset(&map, 0, sizeof(map));
|
||||
map.m_lblk = cur_lblock;
|
||||
map.m_len = last_lblock - cur_lblock;
|
||||
map.m_next_pgofs = NULL;
|
||||
map.m_next_extent = NULL;
|
||||
map.m_seg_type = NO_CHECK_TYPE;
|
||||
map.m_may_create = false;
|
||||
for (; secidx < end_sec; secidx++) {
|
||||
down_write(&sbi->pin_sem);
|
||||
|
||||
ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
|
||||
if (ret)
|
||||
goto out;
|
||||
f2fs_lock_op(sbi);
|
||||
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
/* hole */
|
||||
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
|
||||
f2fs_err(sbi, "Swapfile has holes\n");
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
set_inode_flag(inode, FI_DO_DEFRAG);
|
||||
|
||||
pblock = map.m_pblk;
|
||||
nr_pblocks = map.m_len;
|
||||
for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
|
||||
struct page *page;
|
||||
unsigned int blkidx = secidx * blk_per_sec + blkofs;
|
||||
|
||||
if ((pblock - main_blkaddr) & (blocks_per_sec - 1) ||
|
||||
nr_pblocks & (blocks_per_sec - 1)) {
|
||||
if (f2fs_is_pinned_file(inode)) {
|
||||
f2fs_err(sbi, "Swapfile does not align to section");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
page = f2fs_get_lock_data_page(inode, blkidx, true);
|
||||
if (IS_ERR(page)) {
|
||||
up_write(&sbi->pin_sem);
|
||||
ret = PTR_ERR(page);
|
||||
goto done;
|
||||
}
|
||||
not_aligned++;
|
||||
|
||||
set_page_dirty(page);
|
||||
f2fs_put_page(page, 1);
|
||||
}
|
||||
|
||||
cur_lblock += nr_pblocks;
|
||||
clear_inode_flag(inode, FI_DO_DEFRAG);
|
||||
|
||||
ret = filemap_fdatawrite(inode->i_mapping);
|
||||
|
||||
up_write(&sbi->pin_sem);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
if (not_aligned)
|
||||
f2fs_warn(sbi, "Swapfile (%u) is not align to section: \n"
|
||||
"\t1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate()",
|
||||
not_aligned);
|
||||
out:
|
||||
|
||||
done:
|
||||
clear_inode_flag(inode, FI_DO_DEFRAG);
|
||||
clear_inode_flag(inode, FI_ALIGNED_WRITE);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int check_swap_activate_fast(struct swap_info_struct *sis,
|
||||
static int check_swap_activate(struct swap_info_struct *sis,
|
||||
struct file *swap_file, sector_t *span)
|
||||
{
|
||||
struct address_space *mapping = swap_file->f_mapping;
|
||||
@@ -3931,7 +3968,8 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
|
||||
sector_t highest_pblock = 0;
|
||||
int nr_extents = 0;
|
||||
unsigned long nr_pblocks;
|
||||
unsigned int blocks_per_sec = BLKS_PER_SEC(sbi);
|
||||
unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
|
||||
unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
|
||||
unsigned int not_aligned = 0;
|
||||
int ret = 0;
|
||||
|
||||
@@ -3944,7 +3982,7 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
|
||||
|
||||
while (cur_lblock < last_lblock && cur_lblock < sis->max) {
|
||||
struct f2fs_map_blocks map;
|
||||
|
||||
retry:
|
||||
cond_resched();
|
||||
|
||||
memset(&map, 0, sizeof(map));
|
||||
@@ -3961,7 +3999,7 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
|
||||
|
||||
/* hole */
|
||||
if (!(map.m_flags & F2FS_MAP_FLAGS)) {
|
||||
f2fs_err(sbi, "Swapfile has holes\n");
|
||||
f2fs_err(sbi, "Swapfile has holes");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@@ -3969,16 +4007,28 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
|
||||
pblock = map.m_pblk;
|
||||
nr_pblocks = map.m_len;
|
||||
|
||||
if ((pblock - SM_I(sbi)->main_blkaddr) & (blocks_per_sec - 1) ||
|
||||
nr_pblocks & (blocks_per_sec - 1)) {
|
||||
if (f2fs_is_pinned_file(inode)) {
|
||||
f2fs_err(sbi, "Swapfile does not align to section");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
|
||||
nr_pblocks & sec_blks_mask) {
|
||||
not_aligned++;
|
||||
}
|
||||
|
||||
nr_pblocks = roundup(nr_pblocks, blks_per_sec);
|
||||
if (cur_lblock + nr_pblocks > sis->max)
|
||||
nr_pblocks -= blks_per_sec;
|
||||
|
||||
if (!nr_pblocks) {
|
||||
/* this extent is last one */
|
||||
nr_pblocks = map.m_len;
|
||||
f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
|
||||
goto next;
|
||||
}
|
||||
|
||||
ret = f2fs_migrate_blocks(inode, cur_lblock,
|
||||
nr_pblocks);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto retry;
|
||||
}
|
||||
next:
|
||||
if (cur_lblock + nr_pblocks >= sis->max)
|
||||
nr_pblocks = sis->max - cur_lblock;
|
||||
|
||||
@@ -4005,122 +4055,13 @@ static int check_swap_activate_fast(struct swap_info_struct *sis,
|
||||
sis->max = cur_lblock;
|
||||
sis->pages = cur_lblock - 1;
|
||||
sis->highest_bit = cur_lblock - 1;
|
||||
|
||||
out:
|
||||
if (not_aligned)
|
||||
f2fs_warn(sbi, "Swapfile (%u) is not align to section: \n"
|
||||
"\t1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate()",
|
||||
not_aligned);
|
||||
out:
|
||||
f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
|
||||
not_aligned, blks_per_sec * F2FS_BLKSIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Copied from generic_swapfile_activate() to check any holes */
|
||||
static int check_swap_activate(struct swap_info_struct *sis,
|
||||
struct file *swap_file, sector_t *span)
|
||||
{
|
||||
struct address_space *mapping = swap_file->f_mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
unsigned blocks_per_page;
|
||||
unsigned long page_no;
|
||||
sector_t probe_block;
|
||||
sector_t last_block;
|
||||
sector_t lowest_block = -1;
|
||||
sector_t highest_block = 0;
|
||||
int nr_extents = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (PAGE_SIZE == F2FS_BLKSIZE)
|
||||
return check_swap_activate_fast(sis, swap_file, span);
|
||||
|
||||
ret = f2fs_is_file_aligned(inode);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
blocks_per_page = bytes_to_blks(inode, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Map all the blocks into the extent list. This code doesn't try
|
||||
* to be very smart.
|
||||
*/
|
||||
probe_block = 0;
|
||||
page_no = 0;
|
||||
last_block = bytes_to_blks(inode, i_size_read(inode));
|
||||
while ((probe_block + blocks_per_page) <= last_block &&
|
||||
page_no < sis->max) {
|
||||
unsigned block_in_page;
|
||||
sector_t first_block;
|
||||
sector_t block = 0;
|
||||
|
||||
cond_resched();
|
||||
|
||||
block = probe_block;
|
||||
ret = bmap(inode, &block);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (!block)
|
||||
goto bad_bmap;
|
||||
first_block = block;
|
||||
|
||||
/*
|
||||
* It must be PAGE_SIZE aligned on-disk
|
||||
*/
|
||||
if (first_block & (blocks_per_page - 1)) {
|
||||
probe_block++;
|
||||
goto reprobe;
|
||||
}
|
||||
|
||||
for (block_in_page = 1; block_in_page < blocks_per_page;
|
||||
block_in_page++) {
|
||||
|
||||
block = probe_block + block_in_page;
|
||||
ret = bmap(inode, &block);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (!block)
|
||||
goto bad_bmap;
|
||||
|
||||
if (block != first_block + block_in_page) {
|
||||
/* Discontiguity */
|
||||
probe_block++;
|
||||
goto reprobe;
|
||||
}
|
||||
}
|
||||
|
||||
first_block >>= (PAGE_SHIFT - inode->i_blkbits);
|
||||
if (page_no) { /* exclude the header page */
|
||||
if (first_block < lowest_block)
|
||||
lowest_block = first_block;
|
||||
if (first_block > highest_block)
|
||||
highest_block = first_block;
|
||||
}
|
||||
|
||||
/*
|
||||
* We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
|
||||
*/
|
||||
ret = add_swap_extent(sis, page_no, 1, first_block);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
nr_extents += ret;
|
||||
page_no++;
|
||||
probe_block += blocks_per_page;
|
||||
reprobe:
|
||||
continue;
|
||||
}
|
||||
ret = nr_extents;
|
||||
*span = 1 + highest_block - lowest_block;
|
||||
if (page_no == 0)
|
||||
page_no = 1; /* force Empty message */
|
||||
sis->max = page_no;
|
||||
sis->pages = page_no - 1;
|
||||
sis->highest_bit = page_no - 1;
|
||||
out:
|
||||
return ret;
|
||||
bad_bmap:
|
||||
f2fs_err(sbi, "Swapfile has holes\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
|
||||
sector_t *span)
|
||||
{
|
||||
|
@@ -152,6 +152,12 @@ static void update_general_status(struct f2fs_sb_info *sbi)
|
||||
si->node_pages = NODE_MAPPING(sbi)->nrpages;
|
||||
if (sbi->meta_inode)
|
||||
si->meta_pages = META_MAPPING(sbi)->nrpages;
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (sbi->compress_inode) {
|
||||
si->compress_pages = COMPRESS_MAPPING(sbi)->nrpages;
|
||||
si->compress_page_hit = atomic_read(&sbi->compress_page_hit);
|
||||
}
|
||||
#endif
|
||||
si->nats = NM_I(sbi)->nat_cnt[TOTAL_NAT];
|
||||
si->dirty_nats = NM_I(sbi)->nat_cnt[DIRTY_NAT];
|
||||
si->sits = MAIN_SEGS(sbi);
|
||||
@@ -309,6 +315,12 @@ get_cache:
|
||||
|
||||
si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
|
||||
}
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (sbi->compress_inode) {
|
||||
unsigned npages = COMPRESS_MAPPING(sbi)->nrpages;
|
||||
si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int stat_show(struct seq_file *s, void *v)
|
||||
@@ -476,6 +488,7 @@ static int stat_show(struct seq_file *s, void *v)
|
||||
"volatile IO: %4d (Max. %4d)\n",
|
||||
si->inmem_pages, si->aw_cnt, si->max_aw_cnt,
|
||||
si->vw_cnt, si->max_vw_cnt);
|
||||
seq_printf(s, " - compress: %4d, hit:%8d\n", si->compress_pages, si->compress_page_hit);
|
||||
seq_printf(s, " - nodes: %4d in %4d\n",
|
||||
si->ndirty_node, si->node_pages);
|
||||
seq_printf(s, " - dents: %4d in dirs:%4d (%4d)\n",
|
||||
|
@@ -16,6 +16,10 @@
|
||||
#include "xattr.h"
|
||||
#include <trace/events/f2fs.h>
|
||||
|
||||
#ifdef CONFIG_UNICODE
|
||||
extern struct kmem_cache *f2fs_cf_name_slab;
|
||||
#endif
|
||||
|
||||
static unsigned long dir_blocks(struct inode *inode)
|
||||
{
|
||||
return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1))
|
||||
@@ -77,11 +81,10 @@ int f2fs_init_casefolded_name(const struct inode *dir,
|
||||
{
|
||||
#ifdef CONFIG_UNICODE
|
||||
struct super_block *sb = dir->i_sb;
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(sb);
|
||||
|
||||
if (IS_CASEFOLDED(dir)) {
|
||||
fname->cf_name.name = f2fs_kmalloc(sbi, F2FS_NAME_LEN,
|
||||
GFP_NOFS);
|
||||
fname->cf_name.name = kmem_cache_alloc(f2fs_cf_name_slab,
|
||||
GFP_NOFS);
|
||||
if (!fname->cf_name.name)
|
||||
return -ENOMEM;
|
||||
fname->cf_name.len = utf8_casefold(sb->s_encoding,
|
||||
@@ -89,7 +92,7 @@ int f2fs_init_casefolded_name(const struct inode *dir,
|
||||
fname->cf_name.name,
|
||||
F2FS_NAME_LEN);
|
||||
if ((int)fname->cf_name.len <= 0) {
|
||||
kfree(fname->cf_name.name);
|
||||
kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
|
||||
fname->cf_name.name = NULL;
|
||||
if (sb_has_strict_encoding(sb))
|
||||
return -EINVAL;
|
||||
@@ -172,8 +175,10 @@ void f2fs_free_filename(struct f2fs_filename *fname)
|
||||
fname->crypto_buf.name = NULL;
|
||||
#endif
|
||||
#ifdef CONFIG_UNICODE
|
||||
kfree(fname->cf_name.name);
|
||||
fname->cf_name.name = NULL;
|
||||
if (fname->cf_name.name) {
|
||||
kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
|
||||
fname->cf_name.name = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
117
fs/f2fs/f2fs.h
117
fs/f2fs/f2fs.h
@@ -98,6 +98,7 @@ extern const char *f2fs_fault_name[FAULT_MAX];
|
||||
#define F2FS_MOUNT_ATGC 0x08000000
|
||||
#define F2FS_MOUNT_MERGE_CHECKPOINT 0x10000000
|
||||
#define F2FS_MOUNT_GC_MERGE 0x20000000
|
||||
#define F2FS_MOUNT_COMPRESS_CACHE 0x40000000
|
||||
|
||||
#define F2FS_OPTION(sbi) ((sbi)->mount_opt)
|
||||
#define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
|
||||
@@ -168,6 +169,7 @@ struct f2fs_mount_info {
|
||||
#define F2FS_FEATURE_SB_CHKSUM 0x0800
|
||||
#define F2FS_FEATURE_CASEFOLD 0x1000
|
||||
#define F2FS_FEATURE_COMPRESSION 0x2000
|
||||
#define F2FS_FEATURE_RO 0x4000
|
||||
|
||||
#define __F2FS_HAS_FEATURE(raw_super, mask) \
|
||||
((raw_super->feature & cpu_to_le32(mask)) != 0)
|
||||
@@ -707,6 +709,7 @@ enum {
|
||||
FI_MMAP_FILE, /* indicate file was mmapped */
|
||||
FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */
|
||||
FI_COMPRESS_RELEASED, /* compressed blocks were released */
|
||||
FI_ALIGNED_WRITE, /* enable aligned write */
|
||||
FI_MAX, /* max flag, never be used */
|
||||
};
|
||||
|
||||
@@ -940,6 +943,7 @@ static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
|
||||
#define NR_CURSEG_DATA_TYPE (3)
|
||||
#define NR_CURSEG_NODE_TYPE (3)
|
||||
#define NR_CURSEG_INMEM_TYPE (2)
|
||||
#define NR_CURSEG_RO_TYPE (2)
|
||||
#define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
|
||||
#define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
|
||||
|
||||
@@ -1372,6 +1376,37 @@ PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION);
|
||||
PAGE_PRIVATE_CLEAR_FUNC(atomic, ATOMIC_WRITE);
|
||||
PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE);
|
||||
|
||||
static inline unsigned long get_page_private_data(struct page *page)
|
||||
{
|
||||
unsigned long data = page_private(page);
|
||||
|
||||
if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data))
|
||||
return 0;
|
||||
return data >> PAGE_PRIVATE_MAX;
|
||||
}
|
||||
|
||||
static inline void set_page_private_data(struct page *page, unsigned long data)
|
||||
{
|
||||
if (!PagePrivate(page)) {
|
||||
get_page(page);
|
||||
SetPagePrivate(page);
|
||||
}
|
||||
set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page));
|
||||
page_private(page) |= data << PAGE_PRIVATE_MAX;
|
||||
}
|
||||
|
||||
static inline void clear_page_private_data(struct page *page)
|
||||
{
|
||||
page_private(page) &= (1 << PAGE_PRIVATE_MAX) - 1;
|
||||
if (page_private(page) == 1 << PAGE_PRIVATE_NOT_POINTER) {
|
||||
set_page_private(page, 0);
|
||||
if (PagePrivate(page)) {
|
||||
ClearPagePrivate(page);
|
||||
put_page(page);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* For compression */
|
||||
enum compress_algorithm_type {
|
||||
COMPRESS_LZO,
|
||||
@@ -1386,6 +1421,9 @@ enum compress_flag {
|
||||
COMPRESS_MAX_FLAG,
|
||||
};
|
||||
|
||||
#define COMPRESS_WATERMARK 20
|
||||
#define COMPRESS_PERCENT 20
|
||||
|
||||
#define COMPRESS_DATA_RESERVED_SIZE 4
|
||||
struct compress_data {
|
||||
__le32 clen; /* compressed data size */
|
||||
@@ -1663,6 +1701,9 @@ struct f2fs_sb_info {
|
||||
struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */
|
||||
struct completion s_stat_kobj_unregister;
|
||||
|
||||
struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */
|
||||
struct completion s_feature_list_kobj_unregister;
|
||||
|
||||
/* For shrinker support */
|
||||
struct list_head s_list;
|
||||
int s_ndevs; /* number of devices */
|
||||
@@ -1695,6 +1736,12 @@ struct f2fs_sb_info {
|
||||
u64 compr_written_block;
|
||||
u64 compr_saved_block;
|
||||
u32 compr_new_inode;
|
||||
|
||||
/* For compressed block cache */
|
||||
struct inode *compress_inode; /* cache compressed blocks */
|
||||
unsigned int compress_percent; /* cache page percentage */
|
||||
unsigned int compress_watermark; /* cache page watermark */
|
||||
atomic_t compress_page_hit; /* cache hit count */
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -3101,25 +3148,6 @@ static inline bool is_dot_dotdot(const u8 *name, size_t len)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool f2fs_may_extent_tree(struct inode *inode)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
|
||||
if (!test_opt(sbi, EXTENT_CACHE) ||
|
||||
is_inode_flag_set(inode, FI_NO_EXTENT) ||
|
||||
is_inode_flag_set(inode, FI_COMPRESSED_FILE))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* for recovered files during mount do not create extents
|
||||
* if shrinker is not registered.
|
||||
*/
|
||||
if (list_empty(&sbi->s_list))
|
||||
return false;
|
||||
|
||||
return S_ISREG(inode->i_mode);
|
||||
}
|
||||
|
||||
static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
|
||||
size_t size, gfp_t flags)
|
||||
{
|
||||
@@ -3662,7 +3690,8 @@ struct f2fs_stat_info {
|
||||
unsigned int bimodal, avg_vblocks;
|
||||
int util_free, util_valid, util_invalid;
|
||||
int rsvd_segs, overp_segs;
|
||||
int dirty_count, node_pages, meta_pages;
|
||||
int dirty_count, node_pages, meta_pages, compress_pages;
|
||||
int compress_page_hit;
|
||||
int prefree_count, call_count, cp_count, bg_cp_count;
|
||||
int tot_segs, node_segs, data_segs, free_segs, free_secs;
|
||||
int bg_node_segs, bg_data_segs;
|
||||
@@ -3998,7 +4027,9 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
|
||||
bool f2fs_is_compress_backend_ready(struct inode *inode);
|
||||
int f2fs_init_compress_mempool(void);
|
||||
void f2fs_destroy_compress_mempool(void);
|
||||
void f2fs_end_read_compressed_page(struct page *page, bool failed);
|
||||
void f2fs_decompress_cluster(struct decompress_io_ctx *dic);
|
||||
void f2fs_end_read_compressed_page(struct page *page, bool failed,
|
||||
block_t blkaddr);
|
||||
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
|
||||
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
|
||||
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
|
||||
@@ -4016,10 +4047,19 @@ void f2fs_put_page_dic(struct page *page);
|
||||
int f2fs_init_compress_ctx(struct compress_ctx *cc);
|
||||
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
|
||||
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
|
||||
int f2fs_init_compress_inode(struct f2fs_sb_info *sbi);
|
||||
void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi);
|
||||
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
|
||||
void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
|
||||
int __init f2fs_init_compress_cache(void);
|
||||
void f2fs_destroy_compress_cache(void);
|
||||
struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi);
|
||||
void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr);
|
||||
void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
|
||||
nid_t ino, block_t blkaddr);
|
||||
bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
|
||||
block_t blkaddr);
|
||||
void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino);
|
||||
#define inc_compr_inode_stat(inode) \
|
||||
do { \
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \
|
||||
@@ -4048,7 +4088,9 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
|
||||
}
|
||||
static inline int f2fs_init_compress_mempool(void) { return 0; }
|
||||
static inline void f2fs_destroy_compress_mempool(void) { }
|
||||
static inline void f2fs_end_read_compressed_page(struct page *page, bool failed)
|
||||
static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { }
|
||||
static inline void f2fs_end_read_compressed_page(struct page *page,
|
||||
bool failed, block_t blkaddr)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
@@ -4056,10 +4098,20 @@ static inline void f2fs_put_page_dic(struct page *page)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
|
||||
static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
|
||||
static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
|
||||
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
|
||||
static inline int __init f2fs_init_compress_cache(void) { return 0; }
|
||||
static inline void f2fs_destroy_compress_cache(void) { }
|
||||
static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi,
|
||||
block_t blkaddr) { }
|
||||
static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi,
|
||||
struct page *page, nid_t ino, block_t blkaddr) { }
|
||||
static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
|
||||
struct page *page, block_t blkaddr) { return false; }
|
||||
static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
|
||||
nid_t ino) { }
|
||||
#define inc_compr_inode_stat(inode) do { } while (0)
|
||||
#endif
|
||||
|
||||
@@ -4124,6 +4176,27 @@ F2FS_FEATURE_FUNCS(verity, VERITY);
|
||||
F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
|
||||
F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
|
||||
F2FS_FEATURE_FUNCS(compression, COMPRESSION);
|
||||
F2FS_FEATURE_FUNCS(readonly, RO);
|
||||
|
||||
static inline bool f2fs_may_extent_tree(struct inode *inode)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
|
||||
if (!test_opt(sbi, EXTENT_CACHE) ||
|
||||
is_inode_flag_set(inode, FI_NO_EXTENT) ||
|
||||
(is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
|
||||
!f2fs_sb_has_readonly(sbi)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* for recovered files during mount do not create extents
|
||||
* if shrinker is not registered.
|
||||
*/
|
||||
if (list_empty(&sbi->s_list))
|
||||
return false;
|
||||
|
||||
return S_ISREG(inode->i_mode);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
|
||||
|
@@ -3360,7 +3360,7 @@ static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
|
||||
|
||||
if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
|
||||
f2fs_warn(F2FS_I_SB(inode),
|
||||
"Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
|
||||
"Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
|
||||
inode->i_ino);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@@ -4144,9 +4144,8 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
|
||||
LLONG_MAX);
|
||||
|
||||
if (ret)
|
||||
f2fs_warn(sbi, "%s: The file might be partially decompressed "
|
||||
"(errno=%d). Please delete the file.\n",
|
||||
__func__, ret);
|
||||
f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
|
||||
__func__, ret);
|
||||
out:
|
||||
inode_unlock(inode);
|
||||
file_end_write(filp);
|
||||
@@ -4218,9 +4217,8 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
|
||||
clear_inode_flag(inode, FI_ENABLE_COMPRESS);
|
||||
|
||||
if (ret)
|
||||
f2fs_warn(sbi, "%s: The file might be partially compressed "
|
||||
"(errno=%d). Please delete the file.\n",
|
||||
__func__, ret);
|
||||
f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
|
||||
__func__, ret);
|
||||
out:
|
||||
inode_unlock(inode);
|
||||
file_end_write(filp);
|
||||
|
@@ -1031,8 +1031,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
|
||||
if (unlikely(check_valid_map(sbi, segno, offset))) {
|
||||
if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
|
||||
f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
|
||||
blkaddr, source_blkaddr, segno);
|
||||
f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
|
||||
blkaddr, source_blkaddr, segno);
|
||||
f2fs_bug_on(sbi, 1);
|
||||
}
|
||||
}
|
||||
@@ -1261,6 +1261,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
f2fs_put_page(mpage, 1);
|
||||
invalidate_mapping_pages(META_MAPPING(fio.sbi),
|
||||
fio.old_blkaddr, fio.old_blkaddr);
|
||||
f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
|
||||
|
||||
set_page_dirty(fio.encrypted_page);
|
||||
if (clear_page_dirty_for_io(fio.encrypted_page))
|
||||
@@ -1450,10 +1451,8 @@ next_step:
|
||||
|
||||
if (phase == 3) {
|
||||
inode = f2fs_iget(sb, dni.ino);
|
||||
if (IS_ERR(inode) || is_bad_inode(inode)) {
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
if (IS_ERR(inode) || is_bad_inode(inode))
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!down_write_trylock(
|
||||
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
|
||||
|
@@ -18,6 +18,10 @@
|
||||
|
||||
#include <trace/events/f2fs.h>
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
extern const struct address_space_operations f2fs_compress_aops;
|
||||
#endif
|
||||
|
||||
void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
|
||||
{
|
||||
if (is_inode_flag_set(inode, FI_NEW_INODE))
|
||||
@@ -494,6 +498,11 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
|
||||
if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
|
||||
goto make_now;
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (ino == F2FS_COMPRESS_INO(sbi))
|
||||
goto make_now;
|
||||
#endif
|
||||
|
||||
ret = do_read_inode(inode);
|
||||
if (ret)
|
||||
goto bad_inode;
|
||||
@@ -504,6 +513,12 @@ make_now:
|
||||
} else if (ino == F2FS_META_INO(sbi)) {
|
||||
inode->i_mapping->a_ops = &f2fs_meta_aops;
|
||||
mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
|
||||
} else if (ino == F2FS_COMPRESS_INO(sbi)) {
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
inode->i_mapping->a_ops = &f2fs_compress_aops;
|
||||
#endif
|
||||
mapping_set_gfp_mask(inode->i_mapping,
|
||||
GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
|
||||
} else if (S_ISREG(inode->i_mode)) {
|
||||
inode->i_op = &f2fs_file_inode_operations;
|
||||
inode->i_fop = &f2fs_file_operations;
|
||||
@@ -723,8 +738,12 @@ void f2fs_evict_inode(struct inode *inode)
|
||||
trace_f2fs_evict_inode(inode);
|
||||
truncate_inode_pages_final(&inode->i_data);
|
||||
|
||||
if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
|
||||
f2fs_invalidate_compress_pages(sbi, inode->i_ino);
|
||||
|
||||
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
|
||||
inode->i_ino == F2FS_META_INO(sbi))
|
||||
inode->i_ino == F2FS_META_INO(sbi) ||
|
||||
inode->i_ino == F2FS_COMPRESS_INO(sbi))
|
||||
goto out_clear;
|
||||
|
||||
f2fs_bug_on(sbi, get_dirty_pages(inode));
|
||||
|
@@ -97,6 +97,20 @@ bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
|
||||
mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
|
||||
sizeof(struct discard_cmd)) >> PAGE_SHIFT;
|
||||
res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
|
||||
} else if (type == COMPRESS_PAGE) {
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
unsigned long free_ram = val.freeram;
|
||||
|
||||
/*
|
||||
* free memory is lower than watermark or cached page count
|
||||
* exceed threshold, deny caching compress page.
|
||||
*/
|
||||
res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
|
||||
(COMPRESS_MAPPING(sbi)->nrpages <
|
||||
free_ram * sbi->compress_percent / 100);
|
||||
#else
|
||||
res = false;
|
||||
#endif
|
||||
} else {
|
||||
if (!sbi->sb->s_bdi->wb.dirty_exceeded)
|
||||
return true;
|
||||
|
@@ -38,6 +38,9 @@
|
||||
/* return value for read_node_page */
|
||||
#define LOCKED_PAGE 1
|
||||
|
||||
/* check pinned file's alignment status of physical blocks */
|
||||
#define FILE_NOT_ALIGNED 1
|
||||
|
||||
/* For flag in struct node_info */
|
||||
enum {
|
||||
IS_CHECKPOINTED, /* is it checkpointed before? */
|
||||
@@ -148,6 +151,7 @@ enum mem_type {
|
||||
EXTENT_CACHE, /* indicates extent cache */
|
||||
INMEM_PAGES, /* indicates inmemory pages */
|
||||
DISCARD_CACHE, /* indicates memory of cached discard cmds */
|
||||
COMPRESS_PAGE, /* indicates memory of cached compressed pages */
|
||||
BASE_CHECK, /* check kernel status */
|
||||
};
|
||||
|
||||
|
@@ -45,6 +45,10 @@
|
||||
|
||||
static struct kmem_cache *fsync_entry_slab;
|
||||
|
||||
#ifdef CONFIG_UNICODE
|
||||
extern struct kmem_cache *f2fs_cf_name_slab;
|
||||
#endif
|
||||
|
||||
bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
|
||||
@@ -145,7 +149,7 @@ static int init_recovered_filename(const struct inode *dir,
|
||||
f2fs_hash_filename(dir, fname);
|
||||
#ifdef CONFIG_UNICODE
|
||||
/* Case-sensitive match is fine for recovery */
|
||||
kfree(fname->cf_name.name);
|
||||
kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
|
||||
fname->cf_name.name = NULL;
|
||||
#endif
|
||||
} else {
|
||||
@@ -782,6 +786,8 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QUOTA
|
||||
/* Needed for iput() to work correctly and not trash data */
|
||||
sbi->sb->s_flags |= SB_ACTIVE;
|
||||
/* Turn on quotas so that they are updated correctly */
|
||||
quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
|
||||
#endif
|
||||
@@ -809,8 +815,10 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
|
||||
err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
|
||||
if (!err)
|
||||
f2fs_bug_on(sbi, !list_empty(&inode_list));
|
||||
else
|
||||
f2fs_bug_on(sbi, sbi->sb->s_flags & SB_ACTIVE);
|
||||
else {
|
||||
/* restore s_flags to let iput() trash data */
|
||||
sbi->sb->s_flags = s_flags;
|
||||
}
|
||||
skip:
|
||||
fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
|
||||
|
||||
|
@@ -2322,6 +2322,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
|
||||
return;
|
||||
|
||||
invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
|
||||
f2fs_invalidate_compress_page(sbi, addr);
|
||||
|
||||
/* add it into sit main buffer */
|
||||
down_write(&sit_i->sentry_lock);
|
||||
@@ -3290,6 +3291,9 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
|
||||
if (fio->type == DATA) {
|
||||
struct inode *inode = fio->page->mapping->host;
|
||||
|
||||
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
|
||||
return CURSEG_COLD_DATA_PINNED;
|
||||
|
||||
if (page_private_gcing(fio->page)) {
|
||||
if (fio->sbi->am.atgc_enabled &&
|
||||
(fio->io_type == FS_DATA_IO) &&
|
||||
@@ -3469,9 +3473,11 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
||||
reallocate:
|
||||
f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
||||
&fio->new_blkaddr, sum, type, fio);
|
||||
if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
|
||||
if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
|
||||
invalidate_mapping_pages(META_MAPPING(fio->sbi),
|
||||
fio->old_blkaddr, fio->old_blkaddr);
|
||||
f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
|
||||
}
|
||||
|
||||
/* writeout dirty page into bdev */
|
||||
f2fs_submit_page_write(fio);
|
||||
@@ -3661,6 +3667,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
|
||||
invalidate_mapping_pages(META_MAPPING(sbi),
|
||||
old_blkaddr, old_blkaddr);
|
||||
f2fs_invalidate_compress_page(sbi, old_blkaddr);
|
||||
if (!from_gc)
|
||||
update_segment_mtime(sbi, old_blkaddr, 0);
|
||||
update_sit_entry(sbi, old_blkaddr, -1);
|
||||
@@ -3920,7 +3927,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
|
||||
/* sanity check for summary blocks */
|
||||
if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
|
||||
sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
|
||||
f2fs_err(sbi, "invalid journal entries nats %u sits %u\n",
|
||||
f2fs_err(sbi, "invalid journal entries nats %u sits %u",
|
||||
nats_in_cursum(nat_j), sits_in_cursum(sit_j));
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -4683,6 +4690,10 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi)
|
||||
struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
|
||||
unsigned int blkofs = curseg->next_blkoff;
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi) &&
|
||||
i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
|
||||
continue;
|
||||
|
||||
sanity_check_seg_type(sbi, curseg->seg_type);
|
||||
|
||||
if (f2fs_test_bit(blkofs, se->cur_valid_map))
|
||||
|
111
fs/f2fs/super.c
111
fs/f2fs/super.c
@@ -150,6 +150,7 @@ enum {
|
||||
Opt_compress_extension,
|
||||
Opt_compress_chksum,
|
||||
Opt_compress_mode,
|
||||
Opt_compress_cache,
|
||||
Opt_atgc,
|
||||
Opt_gc_merge,
|
||||
Opt_nogc_merge,
|
||||
@@ -224,6 +225,7 @@ static match_table_t f2fs_tokens = {
|
||||
{Opt_compress_extension, "compress_extension=%s"},
|
||||
{Opt_compress_chksum, "compress_chksum"},
|
||||
{Opt_compress_mode, "compress_mode=%s"},
|
||||
{Opt_compress_cache, "compress_cache"},
|
||||
{Opt_atgc, "atgc"},
|
||||
{Opt_gc_merge, "gc_merge"},
|
||||
{Opt_nogc_merge, "nogc_merge"},
|
||||
@@ -275,6 +277,24 @@ static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct kmem_cache *f2fs_cf_name_slab;
|
||||
static int __init f2fs_create_casefold_cache(void)
|
||||
{
|
||||
f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
|
||||
F2FS_NAME_LEN);
|
||||
if (!f2fs_cf_name_slab)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void f2fs_destroy_casefold_cache(void)
|
||||
{
|
||||
kmem_cache_destroy(f2fs_cf_name_slab);
|
||||
}
|
||||
#else
|
||||
static int __init f2fs_create_casefold_cache(void) { return 0; }
|
||||
static void f2fs_destroy_casefold_cache(void) { }
|
||||
#endif
|
||||
|
||||
static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
|
||||
@@ -555,7 +575,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
int ret;
|
||||
|
||||
if (!options)
|
||||
return 0;
|
||||
goto default_check;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
int token;
|
||||
@@ -1066,12 +1086,16 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
}
|
||||
kfree(name);
|
||||
break;
|
||||
case Opt_compress_cache:
|
||||
set_opt(sbi, COMPRESS_CACHE);
|
||||
break;
|
||||
#else
|
||||
case Opt_compress_algorithm:
|
||||
case Opt_compress_log_size:
|
||||
case Opt_compress_extension:
|
||||
case Opt_compress_chksum:
|
||||
case Opt_compress_mode:
|
||||
case Opt_compress_cache:
|
||||
f2fs_info(sbi, "compression options not supported");
|
||||
break;
|
||||
#endif
|
||||
@@ -1090,6 +1114,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
default_check:
|
||||
#ifdef CONFIG_QUOTA
|
||||
if (f2fs_check_quota_options(sbi))
|
||||
return -EINVAL;
|
||||
@@ -1153,7 +1178,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
}
|
||||
|
||||
if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
|
||||
f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
|
||||
f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -1162,6 +1187,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
*/
|
||||
if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
|
||||
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
|
||||
f2fs_err(sbi, "Allow to mount readonly mode only");
|
||||
return -EROFS;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1406,6 +1436,8 @@ static void f2fs_put_super(struct super_block *sb)
|
||||
|
||||
f2fs_bug_on(sbi, sbi->fsync_node_num);
|
||||
|
||||
f2fs_destroy_compress_inode(sbi);
|
||||
|
||||
iput(sbi->node_inode);
|
||||
sbi->node_inode = NULL;
|
||||
|
||||
@@ -1675,6 +1707,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
|
||||
seq_printf(seq, ",compress_mode=%s", "fs");
|
||||
else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
|
||||
seq_printf(seq, ",compress_mode=%s", "user");
|
||||
|
||||
if (test_opt(sbi, COMPRESS_CACHE))
|
||||
seq_puts(seq, ",compress_cache");
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1822,7 +1857,11 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
static void default_options(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
/* init some FS parameters */
|
||||
F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
|
||||
if (f2fs_sb_has_readonly(sbi))
|
||||
F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
|
||||
else
|
||||
F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
|
||||
|
||||
F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
|
||||
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
|
||||
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
|
||||
@@ -1869,15 +1908,17 @@ static int f2fs_enable_quotas(struct super_block *sb);
|
||||
|
||||
static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
unsigned int s_flags = sbi->sb->s_flags;
|
||||
struct cp_control cpc;
|
||||
int err = 0;
|
||||
int ret;
|
||||
block_t unusable;
|
||||
|
||||
if (sbi->sb->s_flags & SB_RDONLY) {
|
||||
if (s_flags & SB_RDONLY) {
|
||||
f2fs_err(sbi, "checkpoint=disable on readonly fs");
|
||||
return -EINVAL;
|
||||
}
|
||||
sbi->sb->s_flags |= SB_ACTIVE;
|
||||
|
||||
f2fs_update_time(sbi, DISABLE_TIME);
|
||||
|
||||
@@ -1895,13 +1936,13 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
ret = sync_filesystem(sbi->sb);
|
||||
if (ret || err) {
|
||||
err = ret ? ret : err;
|
||||
goto out;
|
||||
goto restore_flag;
|
||||
}
|
||||
|
||||
unusable = f2fs_get_unusable_blocks(sbi);
|
||||
if (f2fs_disable_cp_again(sbi, unusable)) {
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
goto restore_flag;
|
||||
}
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
@@ -1917,7 +1958,8 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
|
||||
out_unlock:
|
||||
up_write(&sbi->gc_lock);
|
||||
out:
|
||||
restore_flag:
|
||||
sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1949,6 +1991,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
|
||||
bool no_io_align = !F2FS_IO_ALIGNED(sbi);
|
||||
bool no_atgc = !test_opt(sbi, ATGC);
|
||||
bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
|
||||
bool checkpoint_changed;
|
||||
#ifdef CONFIG_QUOTA
|
||||
int i, j;
|
||||
@@ -2004,6 +2047,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
|
||||
goto skip;
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
|
||||
err = -EROFS;
|
||||
goto restore_opts;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QUOTA
|
||||
if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
|
||||
err = dquot_suspend(sb, -1);
|
||||
@@ -2041,6 +2089,12 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
goto restore_opts;
|
||||
}
|
||||
|
||||
if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
|
||||
err = -EINVAL;
|
||||
f2fs_warn(sbi, "switch compress_cache option is not allowed");
|
||||
goto restore_opts;
|
||||
}
|
||||
|
||||
if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
|
||||
err = -EINVAL;
|
||||
f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
|
||||
@@ -3137,14 +3191,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||
ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
|
||||
reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
|
||||
|
||||
if (unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
|
||||
if (!f2fs_sb_has_readonly(sbi) &&
|
||||
unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
|
||||
ovp_segments == 0 || reserved_segments == 0)) {
|
||||
f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
|
||||
return 1;
|
||||
}
|
||||
|
||||
user_block_count = le64_to_cpu(ckpt->user_block_count);
|
||||
segment_count_main = le32_to_cpu(raw_super->segment_count_main);
|
||||
segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
|
||||
(f2fs_sb_has_readonly(sbi) ? 1 : 0);
|
||||
log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
|
||||
if (!user_block_count || user_block_count >=
|
||||
segment_count_main << log_blocks_per_seg) {
|
||||
@@ -3175,6 +3230,10 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
|
||||
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
|
||||
return 1;
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi))
|
||||
goto check_data;
|
||||
|
||||
for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
|
||||
if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
|
||||
le32_to_cpu(ckpt->cur_node_segno[j])) {
|
||||
@@ -3185,10 +3244,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
}
|
||||
}
|
||||
check_data:
|
||||
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
|
||||
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
|
||||
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
|
||||
return 1;
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi))
|
||||
goto skip_cross;
|
||||
|
||||
for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
|
||||
if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
|
||||
le32_to_cpu(ckpt->cur_data_segno[j])) {
|
||||
@@ -3210,7 +3274,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
skip_cross:
|
||||
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
|
||||
nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
|
||||
|
||||
@@ -3555,7 +3619,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
|
||||
!f2fs_sb_has_blkzoned(sbi)) {
|
||||
f2fs_err(sbi, "Zoned block device feature not enabled\n");
|
||||
f2fs_err(sbi, "Zoned block device feature not enabled");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
|
||||
@@ -3940,10 +4004,14 @@ try_onemore:
|
||||
goto free_node_inode;
|
||||
}
|
||||
|
||||
err = f2fs_register_sysfs(sbi);
|
||||
err = f2fs_init_compress_inode(sbi);
|
||||
if (err)
|
||||
goto free_root_inode;
|
||||
|
||||
err = f2fs_register_sysfs(sbi);
|
||||
if (err)
|
||||
goto free_compress_inode;
|
||||
|
||||
#ifdef CONFIG_QUOTA
|
||||
/* Enable quota usage during mount */
|
||||
if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
|
||||
@@ -4084,6 +4152,8 @@ free_meta:
|
||||
/* evict some inodes being cached by GC */
|
||||
evict_inodes(sb);
|
||||
f2fs_unregister_sysfs(sbi);
|
||||
free_compress_inode:
|
||||
f2fs_destroy_compress_inode(sbi);
|
||||
free_root_inode:
|
||||
dput(sb->s_root);
|
||||
sb->s_root = NULL;
|
||||
@@ -4162,6 +4232,15 @@ static void kill_f2fs_super(struct super_block *sb)
|
||||
f2fs_stop_gc_thread(sbi);
|
||||
f2fs_stop_discard_thread(sbi);
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
/*
|
||||
* latter evict_inode() can bypass checking and invalidating
|
||||
* compress inode cache.
|
||||
*/
|
||||
if (test_opt(sbi, COMPRESS_CACHE))
|
||||
truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
|
||||
#endif
|
||||
|
||||
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
|
||||
!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
|
||||
struct cp_control cpc = {
|
||||
@@ -4261,7 +4340,12 @@ static int __init init_f2fs_fs(void)
|
||||
err = f2fs_init_compress_cache();
|
||||
if (err)
|
||||
goto free_compress_mempool;
|
||||
err = f2fs_create_casefold_cache();
|
||||
if (err)
|
||||
goto free_compress_cache;
|
||||
return 0;
|
||||
free_compress_cache:
|
||||
f2fs_destroy_compress_cache();
|
||||
free_compress_mempool:
|
||||
f2fs_destroy_compress_mempool();
|
||||
free_bioset:
|
||||
@@ -4297,6 +4381,7 @@ fail:
|
||||
|
||||
static void __exit exit_f2fs_fs(void)
|
||||
{
|
||||
f2fs_destroy_casefold_cache();
|
||||
f2fs_destroy_compress_cache();
|
||||
f2fs_destroy_compress_mempool();
|
||||
f2fs_destroy_bioset();
|
||||
|
205
fs/f2fs/sysfs.c
205
fs/f2fs/sysfs.c
@@ -158,6 +158,9 @@ static ssize_t features_show(struct f2fs_attr *a,
|
||||
if (f2fs_sb_has_casefold(sbi))
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
|
||||
len ? ", " : "", "casefold");
|
||||
if (f2fs_sb_has_readonly(sbi))
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
|
||||
len ? ", " : "", "readonly");
|
||||
if (f2fs_sb_has_compression(sbi))
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
|
||||
len ? ", " : "", "compression");
|
||||
@@ -563,46 +566,49 @@ static void f2fs_sb_release(struct kobject *kobj)
|
||||
complete(&sbi->s_kobj_unregister);
|
||||
}
|
||||
|
||||
enum feat_id {
|
||||
FEAT_CRYPTO = 0,
|
||||
FEAT_BLKZONED,
|
||||
FEAT_ATOMIC_WRITE,
|
||||
FEAT_EXTRA_ATTR,
|
||||
FEAT_PROJECT_QUOTA,
|
||||
FEAT_INODE_CHECKSUM,
|
||||
FEAT_FLEXIBLE_INLINE_XATTR,
|
||||
FEAT_QUOTA_INO,
|
||||
FEAT_INODE_CRTIME,
|
||||
FEAT_LOST_FOUND,
|
||||
FEAT_VERITY,
|
||||
FEAT_SB_CHECKSUM,
|
||||
FEAT_CASEFOLD,
|
||||
FEAT_COMPRESSION,
|
||||
FEAT_TEST_DUMMY_ENCRYPTION_V2,
|
||||
};
|
||||
|
||||
/*
|
||||
* Note that there are three feature list entries:
|
||||
* 1) /sys/fs/f2fs/features
|
||||
* : shows runtime features supported by in-kernel f2fs along with Kconfig.
|
||||
* - ref. F2FS_FEATURE_RO_ATTR()
|
||||
*
|
||||
* 2) /sys/fs/f2fs/$s_id/features <deprecated>
|
||||
* : shows on-disk features enabled by mkfs.f2fs, used for old kernels. This
|
||||
* won't add new feature anymore, and thus, users should check entries in 3)
|
||||
* instead of this 2).
|
||||
*
|
||||
* 3) /sys/fs/f2fs/$s_id/feature_list
|
||||
* : shows on-disk features enabled by mkfs.f2fs per instance, which follows
|
||||
* sysfs entry rule where each entry should expose single value.
|
||||
* This list covers old feature list provided by 2) and beyond. Therefore,
|
||||
* please add new on-disk feature in this list only.
|
||||
* - ref. F2FS_SB_FEATURE_RO_ATTR()
|
||||
*/
|
||||
static ssize_t f2fs_feature_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
switch (a->id) {
|
||||
case FEAT_CRYPTO:
|
||||
case FEAT_BLKZONED:
|
||||
case FEAT_ATOMIC_WRITE:
|
||||
case FEAT_EXTRA_ATTR:
|
||||
case FEAT_PROJECT_QUOTA:
|
||||
case FEAT_INODE_CHECKSUM:
|
||||
case FEAT_FLEXIBLE_INLINE_XATTR:
|
||||
case FEAT_QUOTA_INO:
|
||||
case FEAT_INODE_CRTIME:
|
||||
case FEAT_LOST_FOUND:
|
||||
case FEAT_VERITY:
|
||||
case FEAT_SB_CHECKSUM:
|
||||
case FEAT_CASEFOLD:
|
||||
case FEAT_COMPRESSION:
|
||||
case FEAT_TEST_DUMMY_ENCRYPTION_V2:
|
||||
return sprintf(buf, "supported\n");
|
||||
}
|
||||
|
||||
#define F2FS_FEATURE_RO_ATTR(_name) \
|
||||
static struct f2fs_attr f2fs_attr_##_name = { \
|
||||
.attr = {.name = __stringify(_name), .mode = 0444 }, \
|
||||
.show = f2fs_feature_show, \
|
||||
}
|
||||
|
||||
static ssize_t f2fs_sb_feature_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
if (F2FS_HAS_FEATURE(sbi, a->id))
|
||||
return sprintf(buf, "supported\n");
|
||||
}
|
||||
return 0;
|
||||
return sprintf(buf, "unsupported\n");
|
||||
}
|
||||
|
||||
#define F2FS_SB_FEATURE_RO_ATTR(_name, _feat) \
|
||||
static struct f2fs_attr f2fs_attr_sb_##_name = { \
|
||||
.attr = {.name = __stringify(_name), .mode = 0444 }, \
|
||||
.show = f2fs_sb_feature_show, \
|
||||
.id = F2FS_FEATURE_##_feat, \
|
||||
}
|
||||
|
||||
#define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
|
||||
@@ -622,13 +628,6 @@ static struct f2fs_attr f2fs_attr_##_name = { \
|
||||
#define F2FS_GENERAL_RO_ATTR(name) \
|
||||
static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
|
||||
|
||||
#define F2FS_FEATURE_RO_ATTR(_name, _id) \
|
||||
static struct f2fs_attr f2fs_attr_##_name = { \
|
||||
.attr = {.name = __stringify(_name), .mode = 0444 }, \
|
||||
.show = f2fs_feature_show, \
|
||||
.id = _id, \
|
||||
}
|
||||
|
||||
#define F2FS_STAT_ATTR(_struct_type, _struct_name, _name, _elname) \
|
||||
static struct f2fs_attr f2fs_attr_##_name = { \
|
||||
.attr = {.name = __stringify(_name), .mode = 0444 }, \
|
||||
@@ -702,31 +701,39 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
|
||||
F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2, FEAT_TEST_DUMMY_ENCRYPTION_V2);
|
||||
F2FS_FEATURE_RO_ATTR(encryption);
|
||||
F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2);
|
||||
#ifdef CONFIG_UNICODE
|
||||
F2FS_FEATURE_RO_ATTR(encrypted_casefold);
|
||||
#endif
|
||||
#endif /* CONFIG_FS_ENCRYPTION */
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
F2FS_FEATURE_RO_ATTR(block_zoned, FEAT_BLKZONED);
|
||||
F2FS_FEATURE_RO_ATTR(block_zoned);
|
||||
#endif
|
||||
F2FS_FEATURE_RO_ATTR(atomic_write, FEAT_ATOMIC_WRITE);
|
||||
F2FS_FEATURE_RO_ATTR(extra_attr, FEAT_EXTRA_ATTR);
|
||||
F2FS_FEATURE_RO_ATTR(project_quota, FEAT_PROJECT_QUOTA);
|
||||
F2FS_FEATURE_RO_ATTR(inode_checksum, FEAT_INODE_CHECKSUM);
|
||||
F2FS_FEATURE_RO_ATTR(flexible_inline_xattr, FEAT_FLEXIBLE_INLINE_XATTR);
|
||||
F2FS_FEATURE_RO_ATTR(quota_ino, FEAT_QUOTA_INO);
|
||||
F2FS_FEATURE_RO_ATTR(inode_crtime, FEAT_INODE_CRTIME);
|
||||
F2FS_FEATURE_RO_ATTR(lost_found, FEAT_LOST_FOUND);
|
||||
F2FS_FEATURE_RO_ATTR(atomic_write);
|
||||
F2FS_FEATURE_RO_ATTR(extra_attr);
|
||||
F2FS_FEATURE_RO_ATTR(project_quota);
|
||||
F2FS_FEATURE_RO_ATTR(inode_checksum);
|
||||
F2FS_FEATURE_RO_ATTR(flexible_inline_xattr);
|
||||
F2FS_FEATURE_RO_ATTR(quota_ino);
|
||||
F2FS_FEATURE_RO_ATTR(inode_crtime);
|
||||
F2FS_FEATURE_RO_ATTR(lost_found);
|
||||
#ifdef CONFIG_FS_VERITY
|
||||
F2FS_FEATURE_RO_ATTR(verity, FEAT_VERITY);
|
||||
F2FS_FEATURE_RO_ATTR(verity);
|
||||
#endif
|
||||
F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM);
|
||||
F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD);
|
||||
F2FS_FEATURE_RO_ATTR(sb_checksum);
|
||||
#ifdef CONFIG_UNICODE
|
||||
F2FS_FEATURE_RO_ATTR(casefold);
|
||||
#endif
|
||||
F2FS_FEATURE_RO_ATTR(readonly);
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
F2FS_FEATURE_RO_ATTR(compression, FEAT_COMPRESSION);
|
||||
F2FS_FEATURE_RO_ATTR(compression);
|
||||
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_written_block, compr_written_block);
|
||||
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_saved_block, compr_saved_block);
|
||||
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, compr_new_inode, compr_new_inode);
|
||||
#endif
|
||||
F2FS_FEATURE_RO_ATTR(pin_file);
|
||||
|
||||
/* For ATGC */
|
||||
F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_candidate_ratio, candidate_ratio);
|
||||
F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_candidate_count, max_candidate_count);
|
||||
@@ -813,7 +820,10 @@ static struct attribute *f2fs_feat_attrs[] = {
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
ATTR_LIST(encryption),
|
||||
ATTR_LIST(test_dummy_encryption_v2),
|
||||
#ifdef CONFIG_UNICODE
|
||||
ATTR_LIST(encrypted_casefold),
|
||||
#endif
|
||||
#endif /* CONFIG_FS_ENCRYPTION */
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
ATTR_LIST(block_zoned),
|
||||
#endif
|
||||
@@ -829,10 +839,14 @@ static struct attribute *f2fs_feat_attrs[] = {
|
||||
ATTR_LIST(verity),
|
||||
#endif
|
||||
ATTR_LIST(sb_checksum),
|
||||
#ifdef CONFIG_UNICODE
|
||||
ATTR_LIST(casefold),
|
||||
#endif
|
||||
ATTR_LIST(readonly),
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
ATTR_LIST(compression),
|
||||
#endif
|
||||
ATTR_LIST(pin_file),
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs_feat);
|
||||
@@ -844,6 +858,40 @@ static struct attribute *f2fs_stat_attrs[] = {
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs_stat);
|
||||
|
||||
F2FS_SB_FEATURE_RO_ATTR(encryption, ENCRYPT);
|
||||
F2FS_SB_FEATURE_RO_ATTR(block_zoned, BLKZONED);
|
||||
F2FS_SB_FEATURE_RO_ATTR(extra_attr, EXTRA_ATTR);
|
||||
F2FS_SB_FEATURE_RO_ATTR(project_quota, PRJQUOTA);
|
||||
F2FS_SB_FEATURE_RO_ATTR(inode_checksum, INODE_CHKSUM);
|
||||
F2FS_SB_FEATURE_RO_ATTR(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
|
||||
F2FS_SB_FEATURE_RO_ATTR(quota_ino, QUOTA_INO);
|
||||
F2FS_SB_FEATURE_RO_ATTR(inode_crtime, INODE_CRTIME);
|
||||
F2FS_SB_FEATURE_RO_ATTR(lost_found, LOST_FOUND);
|
||||
F2FS_SB_FEATURE_RO_ATTR(verity, VERITY);
|
||||
F2FS_SB_FEATURE_RO_ATTR(sb_checksum, SB_CHKSUM);
|
||||
F2FS_SB_FEATURE_RO_ATTR(casefold, CASEFOLD);
|
||||
F2FS_SB_FEATURE_RO_ATTR(compression, COMPRESSION);
|
||||
F2FS_SB_FEATURE_RO_ATTR(readonly, RO);
|
||||
|
||||
static struct attribute *f2fs_sb_feat_attrs[] = {
|
||||
ATTR_LIST(sb_encryption),
|
||||
ATTR_LIST(sb_block_zoned),
|
||||
ATTR_LIST(sb_extra_attr),
|
||||
ATTR_LIST(sb_project_quota),
|
||||
ATTR_LIST(sb_inode_checksum),
|
||||
ATTR_LIST(sb_flexible_inline_xattr),
|
||||
ATTR_LIST(sb_quota_ino),
|
||||
ATTR_LIST(sb_inode_crtime),
|
||||
ATTR_LIST(sb_lost_found),
|
||||
ATTR_LIST(sb_verity),
|
||||
ATTR_LIST(sb_sb_checksum),
|
||||
ATTR_LIST(sb_casefold),
|
||||
ATTR_LIST(sb_compression),
|
||||
ATTR_LIST(sb_readonly),
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs_sb_feat);
|
||||
|
||||
static const struct sysfs_ops f2fs_attr_ops = {
|
||||
.show = f2fs_attr_show,
|
||||
.store = f2fs_attr_store,
|
||||
@@ -910,6 +958,33 @@ static struct kobj_type f2fs_stat_ktype = {
|
||||
.release = f2fs_stat_kobj_release,
|
||||
};
|
||||
|
||||
static ssize_t f2fs_sb_feat_attr_show(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
|
||||
s_feature_list_kobj);
|
||||
struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
|
||||
|
||||
return a->show ? a->show(a, sbi, buf) : 0;
|
||||
}
|
||||
|
||||
static void f2fs_feature_list_kobj_release(struct kobject *kobj)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
|
||||
s_feature_list_kobj);
|
||||
complete(&sbi->s_feature_list_kobj_unregister);
|
||||
}
|
||||
|
||||
static const struct sysfs_ops f2fs_feature_list_attr_ops = {
|
||||
.show = f2fs_sb_feat_attr_show,
|
||||
};
|
||||
|
||||
static struct kobj_type f2fs_feature_list_ktype = {
|
||||
.default_groups = f2fs_sb_feat_groups,
|
||||
.sysfs_ops = &f2fs_feature_list_attr_ops,
|
||||
.release = f2fs_feature_list_kobj_release,
|
||||
};
|
||||
|
||||
static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
|
||||
void *offset)
|
||||
{
|
||||
@@ -1126,6 +1201,14 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
|
||||
if (err)
|
||||
goto put_stat_kobj;
|
||||
|
||||
sbi->s_feature_list_kobj.kset = &f2fs_kset;
|
||||
init_completion(&sbi->s_feature_list_kobj_unregister);
|
||||
err = kobject_init_and_add(&sbi->s_feature_list_kobj,
|
||||
&f2fs_feature_list_ktype,
|
||||
&sbi->s_kobj, "feature_list");
|
||||
if (err)
|
||||
goto put_feature_list_kobj;
|
||||
|
||||
if (f2fs_proc_root)
|
||||
sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
|
||||
|
||||
@@ -1140,6 +1223,9 @@ int f2fs_register_sysfs(struct f2fs_sb_info *sbi)
|
||||
victim_bits_seq_show, sb);
|
||||
}
|
||||
return 0;
|
||||
put_feature_list_kobj:
|
||||
kobject_put(&sbi->s_feature_list_kobj);
|
||||
wait_for_completion(&sbi->s_feature_list_kobj_unregister);
|
||||
put_stat_kobj:
|
||||
kobject_put(&sbi->s_stat_kobj);
|
||||
wait_for_completion(&sbi->s_stat_kobj_unregister);
|
||||
@@ -1162,6 +1248,9 @@ void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi)
|
||||
kobject_del(&sbi->s_stat_kobj);
|
||||
kobject_put(&sbi->s_stat_kobj);
|
||||
wait_for_completion(&sbi->s_stat_kobj_unregister);
|
||||
kobject_del(&sbi->s_feature_list_kobj);
|
||||
kobject_put(&sbi->s_feature_list_kobj);
|
||||
wait_for_completion(&sbi->s_feature_list_kobj_unregister);
|
||||
|
||||
kobject_del(&sbi->s_kobj);
|
||||
kobject_put(&sbi->s_kobj);
|
||||
|
@@ -110,6 +110,7 @@ enum {
|
||||
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
|
||||
CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
|
||||
CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
|
||||
CFTYPE_PRESSURE = (1 << 6), /* only if pressure feature is enabled */
|
||||
|
||||
/* internal flags, do not use outside cgroup core proper */
|
||||
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
|
||||
|
@@ -676,6 +676,8 @@ static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
|
||||
return &cgrp->psi;
|
||||
}
|
||||
|
||||
bool cgroup_psi_enabled(void);
|
||||
|
||||
static inline void cgroup_init_kthreadd(void)
|
||||
{
|
||||
/*
|
||||
@@ -735,6 +737,11 @@ static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool cgroup_psi_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
|
||||
struct cgroup *ancestor)
|
||||
{
|
||||
|
@@ -34,6 +34,7 @@
|
||||
#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
|
||||
#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
|
||||
#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
|
||||
#define F2FS_COMPRESS_INO(sbi) (NM_I(sbi)->max_nid)
|
||||
|
||||
#define F2FS_MAX_QUOTAS 3
|
||||
|
||||
|
@@ -32,18 +32,17 @@
|
||||
*/
|
||||
#define IOMMU_PRIV (1 << 5)
|
||||
/*
|
||||
* Non-coherent masters can use this page protection flag to set cacheable
|
||||
* memory attributes for only a transparent outer level of cache, also known as
|
||||
* the last-level or system cache.
|
||||
* Allow caching in a transparent outer level of cache, also known as
|
||||
* the last-level or system cache, with a read/write allocation policy.
|
||||
* Does not depend on IOMMU_CACHE. Incompatible with IOMMU_SYS_CACHE_NWA.
|
||||
*/
|
||||
#define IOMMU_SYS_CACHE_ONLY (1 << 6)
|
||||
#define IOMMU_SYS_CACHE (1 << 6)
|
||||
/*
|
||||
* Non-coherent masters can use this page protection flag to set cacheable
|
||||
* memory attributes with a no write allocation cache policy for only a
|
||||
* transparent outer level of cache, also known as the last-level or system
|
||||
* cache.
|
||||
* Allow caching in a transparent outer level of cache, also known as
|
||||
* the last-level or system cache, with a read allocation policy.
|
||||
* Does not depend on IOMMU_CACHE. Incompatible with IOMMU_SYS_CACHE.
|
||||
*/
|
||||
#define IOMMU_SYS_CACHE_ONLY_NWA (1 << 7)
|
||||
#define IOMMU_SYS_CACHE_NWA (1 << 7)
|
||||
|
||||
struct iommu_ops;
|
||||
struct iommu_group;
|
||||
|
@@ -72,6 +72,7 @@ enum irqchip_irq_state;
|
||||
* mechanism and from core side polling.
|
||||
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
|
||||
* IRQ_HIDDEN - Don't show up in /proc/interrupts
|
||||
* IRQ_RAW - Skip tick management and irqtime accounting
|
||||
*/
|
||||
enum {
|
||||
IRQ_TYPE_NONE = 0x00000000,
|
||||
@@ -99,6 +100,7 @@ enum {
|
||||
IRQ_IS_POLLED = (1 << 18),
|
||||
IRQ_DISABLE_UNLAZY = (1 << 19),
|
||||
IRQ_HIDDEN = (1 << 20),
|
||||
IRQ_RAW = (1 << 21),
|
||||
};
|
||||
|
||||
#define IRQF_MODIFY_MASK \
|
||||
@@ -751,6 +753,9 @@ void
|
||||
irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
|
||||
void *data);
|
||||
|
||||
void __irq_modify_status(unsigned int irq, unsigned long clr,
|
||||
unsigned long set, unsigned long mask);
|
||||
|
||||
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set);
|
||||
|
||||
static inline void irq_set_status_flags(unsigned int irq, unsigned long set)
|
||||
|
@@ -658,6 +658,10 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
|
||||
int rproc_boot(struct rproc *rproc);
|
||||
void rproc_shutdown(struct rproc *rproc);
|
||||
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
|
||||
|
||||
/* from remoteproc_coredump.c */
|
||||
void rproc_coredump_cleanup(struct rproc *rproc);
|
||||
void rproc_coredump(struct rproc *rproc);
|
||||
void rproc_coredump_using_sections(struct rproc *rproc);
|
||||
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
|
||||
int rproc_coredump_add_custom_segment(struct rproc *rproc,
|
||||
|
@@ -17,6 +17,7 @@ struct binder_alloc;
|
||||
struct binder_proc;
|
||||
struct binder_thread;
|
||||
struct binder_transaction_data;
|
||||
struct seq_file;
|
||||
DECLARE_HOOK(android_vh_binder_transaction_init,
|
||||
TP_PROTO(struct binder_transaction *t),
|
||||
TP_ARGS(t));
|
||||
@@ -65,6 +66,10 @@ DECLARE_HOOK(android_vh_binder_new_ref,
|
||||
DECLARE_HOOK(android_vh_binder_del_ref,
|
||||
TP_PROTO(struct task_struct *proc, uint32_t ref_desc),
|
||||
TP_ARGS(proc, ref_desc));
|
||||
DECLARE_HOOK(android_vh_binder_print_transaction_info,
|
||||
TP_PROTO(struct seq_file *m, struct binder_proc *proc,
|
||||
const char *prefix, struct binder_transaction *t),
|
||||
TP_ARGS(m, proc, prefix, t));
|
||||
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
|
@@ -10,16 +10,16 @@
|
||||
#include <linux/tracepoint.h>
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
|
||||
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
|
||||
struct printk_ringbuffer;
|
||||
struct printk_record;
|
||||
|
||||
DECLARE_HOOK(android_vh_logbuf,
|
||||
TP_PROTO(struct printk_ringbuffer *rb, struct printk_record *r),
|
||||
TP_ARGS(rb, r))
|
||||
#else
|
||||
#define trace_android_vh_logbuf(rb, r)
|
||||
#endif
|
||||
|
||||
DECLARE_HOOK(android_vh_logbuf_pr_cont,
|
||||
TP_PROTO(struct printk_record *r, size_t text_len),
|
||||
TP_ARGS(r, text_len))
|
||||
|
||||
#endif /* _TRACE_HOOK_LOGBUF_H */
|
||||
/* This part must be outside protection */
|
||||
|
@@ -20,6 +20,23 @@ DECLARE_HOOK(android_vh_try_to_freeze_todo_unfrozen,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p));
|
||||
|
||||
enum freq_qos_req_type;
|
||||
struct freq_qos_request;
|
||||
struct freq_constraints;
|
||||
|
||||
DECLARE_HOOK(android_vh_freq_qos_add_request,
|
||||
TP_PROTO(struct freq_constraints *qos, struct freq_qos_request *req,
|
||||
enum freq_qos_req_type type, int value, int ret),
|
||||
TP_ARGS(qos, req, type, value, ret));
|
||||
|
||||
DECLARE_HOOK(android_vh_freq_qos_update_request,
|
||||
TP_PROTO(struct freq_qos_request *req, int value),
|
||||
TP_ARGS(req, value));
|
||||
|
||||
DECLARE_HOOK(android_vh_freq_qos_remove_request,
|
||||
TP_PROTO(struct freq_qos_request *req),
|
||||
TP_ARGS(req));
|
||||
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_POWER_H */
|
||||
|
@@ -378,6 +378,13 @@ DECLARE_HOOK(android_vh_force_compatible_post,
|
||||
TP_PROTO(void *unused),
|
||||
TP_ARGS(unused));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_force_compatible_pre,
|
||||
TP_PROTO(void *unused),
|
||||
TP_ARGS(unused), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_force_compatible_post,
|
||||
TP_PROTO(void *unused),
|
||||
TP_ARGS(unused), 1);
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_SCHED_H */
|
||||
|
15
init/Kconfig
15
init/Kconfig
@@ -635,21 +635,6 @@ config PSI_DEFAULT_DISABLED
|
||||
|
||||
Say N if unsure.
|
||||
|
||||
config PSI_PER_CGROUP_ACCT
|
||||
bool "Enable per-cgroup pressure stall information tracking"
|
||||
default n
|
||||
depends on PSI
|
||||
depends on CGROUPS
|
||||
help
|
||||
If set, pressure stall information will be tracked for each
|
||||
individual cgroup. Otherwise, pressure stall information will
|
||||
be tracked only at the system level under /proc/pressure/.
|
||||
|
||||
This feature generates overhead that depends on the number of
|
||||
cgroups in the cgroup v2 hierarchy.
|
||||
|
||||
Say N if unsure.
|
||||
|
||||
endmenu # "CPU/Task time and stats accounting"
|
||||
|
||||
config CPU_ISOLATION
|
||||
|
@@ -212,6 +212,22 @@ struct cgroup_namespace init_cgroup_ns = {
|
||||
static struct file_system_type cgroup2_fs_type;
|
||||
static struct cftype cgroup_base_files[];
|
||||
|
||||
/* cgroup optional features */
|
||||
enum cgroup_opt_features {
|
||||
#ifdef CONFIG_PSI
|
||||
OPT_FEATURE_PRESSURE,
|
||||
#endif
|
||||
OPT_FEATURE_COUNT
|
||||
};
|
||||
|
||||
static const char *cgroup_opt_feature_names[OPT_FEATURE_COUNT] = {
|
||||
#ifdef CONFIG_PSI
|
||||
"pressure",
|
||||
#endif
|
||||
};
|
||||
|
||||
static u16 cgroup_feature_disable_mask __read_mostly;
|
||||
|
||||
static int cgroup_apply_control(struct cgroup *cgrp);
|
||||
static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
|
||||
static void css_task_iter_skip(struct css_task_iter *it,
|
||||
@@ -3632,6 +3648,18 @@ static void cgroup_pressure_release(struct kernfs_open_file *of)
|
||||
{
|
||||
psi_trigger_replace(&of->priv, NULL);
|
||||
}
|
||||
|
||||
bool cgroup_psi_enabled(void)
|
||||
{
|
||||
return (cgroup_feature_disable_mask & (1 << OPT_FEATURE_PRESSURE)) == 0;
|
||||
}
|
||||
|
||||
#else /* CONFIG_PSI */
|
||||
bool cgroup_psi_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PSI */
|
||||
|
||||
static int cgroup_freeze_show(struct seq_file *seq, void *v)
|
||||
@@ -3882,6 +3910,8 @@ static int cgroup_addrm_files(struct cgroup_subsys_state *css,
|
||||
restart:
|
||||
for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
|
||||
/* does cft->flags tell us to skip this file on @cgrp? */
|
||||
if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled())
|
||||
continue;
|
||||
if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
|
||||
continue;
|
||||
if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
|
||||
@@ -3959,6 +3989,9 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
|
||||
|
||||
WARN_ON(cft->ss || cft->kf_ops);
|
||||
|
||||
if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled())
|
||||
continue;
|
||||
|
||||
if (cft->seq_start)
|
||||
kf_ops = &cgroup_kf_ops;
|
||||
else
|
||||
@@ -4895,6 +4928,7 @@ static struct cftype cgroup_base_files[] = {
|
||||
#ifdef CONFIG_PSI
|
||||
{
|
||||
.name = "io.pressure",
|
||||
.flags = CFTYPE_PRESSURE,
|
||||
.seq_show = cgroup_io_pressure_show,
|
||||
.write = cgroup_io_pressure_write,
|
||||
.poll = cgroup_pressure_poll,
|
||||
@@ -4902,6 +4936,7 @@ static struct cftype cgroup_base_files[] = {
|
||||
},
|
||||
{
|
||||
.name = "memory.pressure",
|
||||
.flags = CFTYPE_PRESSURE,
|
||||
.seq_show = cgroup_memory_pressure_show,
|
||||
.write = cgroup_memory_pressure_write,
|
||||
.poll = cgroup_pressure_poll,
|
||||
@@ -4909,6 +4944,7 @@ static struct cftype cgroup_base_files[] = {
|
||||
},
|
||||
{
|
||||
.name = "cpu.pressure",
|
||||
.flags = CFTYPE_PRESSURE,
|
||||
.seq_show = cgroup_cpu_pressure_show,
|
||||
.write = cgroup_cpu_pressure_write,
|
||||
.poll = cgroup_pressure_poll,
|
||||
@@ -6256,6 +6292,15 @@ static int __init cgroup_disable(char *str)
|
||||
pr_info("Disabling %s control group subsystem\n",
|
||||
ss->name);
|
||||
}
|
||||
|
||||
for (i = 0; i < OPT_FEATURE_COUNT; i++) {
|
||||
if (strcmp(token, cgroup_opt_feature_names[i]))
|
||||
continue;
|
||||
cgroup_feature_disable_mask |= 1 << i;
|
||||
pr_info("Disabling %s control group feature\n",
|
||||
cgroup_opt_feature_names[i]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@@ -6554,6 +6599,9 @@ static ssize_t show_delegatable_files(struct cftype *files, char *buf,
|
||||
if (!(cft->flags & CFTYPE_NS_DELEGATABLE))
|
||||
continue;
|
||||
|
||||
if ((cft->flags & CFTYPE_PRESSURE) && !cgroup_psi_enabled())
|
||||
continue;
|
||||
|
||||
if (prefix)
|
||||
ret += snprintf(buf + ret, size - ret, "%s.", prefix);
|
||||
|
||||
|
@@ -109,6 +109,9 @@ config GENERIC_IRQ_MATRIX_ALLOCATOR
|
||||
config GENERIC_IRQ_RESERVATION_MODE
|
||||
bool
|
||||
|
||||
config ARCH_WANTS_IRQ_RAW
|
||||
bool
|
||||
|
||||
# Support forced irq threading
|
||||
config IRQ_FORCED_THREADING
|
||||
bool
|
||||
|
@@ -1122,7 +1122,8 @@ irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
|
||||
|
||||
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
||||
void __irq_modify_status(unsigned int irq, unsigned long clr,
|
||||
unsigned long set, unsigned long mask)
|
||||
{
|
||||
unsigned long flags, trigger, tmp;
|
||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||
@@ -1136,7 +1137,9 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
||||
*/
|
||||
WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
|
||||
|
||||
irq_settings_clr_and_set(desc, clr, set);
|
||||
/* Warn when trying to clear or set a bit disallowed by the mask */
|
||||
WARN_ON((clr | set) & ~mask);
|
||||
__irq_settings_clr_and_set(desc, clr, set, mask);
|
||||
|
||||
trigger = irqd_get_trigger_type(&desc->irq_data);
|
||||
|
||||
@@ -1159,6 +1162,11 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
||||
|
||||
irq_put_desc_unlock(desc, flags);
|
||||
}
|
||||
|
||||
void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
|
||||
{
|
||||
__irq_modify_status(irq, clr, set, _IRQF_MODIFY_MASK);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_modify_status);
|
||||
|
||||
/**
|
||||
|
@@ -140,6 +140,7 @@ static const struct irq_bit_descr irqdesc_states[] = {
|
||||
BIT_MASK_DESCR(_IRQ_IS_POLLED),
|
||||
BIT_MASK_DESCR(_IRQ_DISABLE_UNLAZY),
|
||||
BIT_MASK_DESCR(_IRQ_HIDDEN),
|
||||
BIT_MASK_DESCR(_IRQ_RAW),
|
||||
};
|
||||
|
||||
static const struct irq_bit_descr irqdesc_istates[] = {
|
||||
|
@@ -667,10 +667,9 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
unsigned int irq = hwirq;
|
||||
struct irq_desc *desc;
|
||||
int ret = 0;
|
||||
|
||||
irq_enter();
|
||||
|
||||
#ifdef CONFIG_IRQ_DOMAIN
|
||||
if (lookup)
|
||||
irq = irq_find_mapping(domain, hwirq);
|
||||
@@ -680,14 +679,22 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
|
||||
* Some hardware gives randomly wrong interrupts. Rather
|
||||
* than crashing, do something sensible.
|
||||
*/
|
||||
if (unlikely(!irq || irq >= nr_irqs)) {
|
||||
if (unlikely(!irq || irq >= nr_irqs || !(desc = irq_to_desc(irq)))) {
|
||||
ack_bad_irq(irq);
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
generic_handle_irq(irq);
|
||||
goto out;
|
||||
}
|
||||
|
||||
irq_exit();
|
||||
if (IS_ENABLED(CONFIG_ARCH_WANTS_IRQ_RAW) &&
|
||||
unlikely(irq_settings_is_raw(desc))) {
|
||||
generic_handle_irq_desc(desc);
|
||||
} else {
|
||||
irq_enter();
|
||||
generic_handle_irq_desc(desc);
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
out:
|
||||
set_irq_regs(old_regs);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -18,6 +18,7 @@ enum {
|
||||
_IRQ_IS_POLLED = IRQ_IS_POLLED,
|
||||
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
|
||||
_IRQ_HIDDEN = IRQ_HIDDEN,
|
||||
_IRQ_RAW = IRQ_RAW,
|
||||
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
|
||||
};
|
||||
|
||||
@@ -33,14 +34,21 @@ enum {
|
||||
#define IRQ_IS_POLLED GOT_YOU_MORON
|
||||
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
|
||||
#define IRQ_HIDDEN GOT_YOU_MORON
|
||||
#define IRQ_RAW GOT_YOU_MORON
|
||||
#undef IRQF_MODIFY_MASK
|
||||
#define IRQF_MODIFY_MASK GOT_YOU_MORON
|
||||
|
||||
static inline void
|
||||
__irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set, u32 mask)
|
||||
{
|
||||
desc->status_use_accessors &= ~(clr & mask);
|
||||
desc->status_use_accessors |= (set & mask);
|
||||
}
|
||||
|
||||
static inline void
|
||||
irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
|
||||
{
|
||||
desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK);
|
||||
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
|
||||
__irq_settings_clr_and_set(desc, clr, set, _IRQF_MODIFY_MASK);
|
||||
}
|
||||
|
||||
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
|
||||
@@ -174,3 +182,16 @@ static inline bool irq_settings_is_hidden(struct irq_desc *desc)
|
||||
{
|
||||
return desc->status_use_accessors & _IRQ_HIDDEN;
|
||||
}
|
||||
|
||||
static inline bool irq_settings_is_raw(struct irq_desc *desc)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARCH_WANTS_IRQ_RAW))
|
||||
return desc->status_use_accessors & _IRQ_RAW;
|
||||
|
||||
/*
|
||||
* Using IRQ_RAW on architectures that don't expect it is
|
||||
* likely to be wrong.
|
||||
*/
|
||||
WARN_ON_ONCE(1);
|
||||
return false;
|
||||
}
|
||||
|
@@ -38,6 +38,9 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/export.h>
|
||||
#include <trace/events/power.h>
|
||||
#undef CREATE_TRACE_POINT
|
||||
#include <trace/hooks/power.h>
|
||||
|
||||
|
||||
/*
|
||||
* locking rule: all changes to constraints or notifiers lists
|
||||
@@ -546,6 +549,7 @@ int freq_qos_add_request(struct freq_constraints *qos,
|
||||
req->type = 0;
|
||||
}
|
||||
|
||||
trace_android_vh_freq_qos_add_request(qos, req, type, value, ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(freq_qos_add_request);
|
||||
@@ -570,6 +574,7 @@ int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
|
||||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
trace_android_vh_freq_qos_update_request(req, new_value);
|
||||
if (req->pnode.prio == new_value)
|
||||
return 0;
|
||||
|
||||
@@ -598,6 +603,7 @@ int freq_qos_remove_request(struct freq_qos_request *req)
|
||||
"%s() called for unknown object\n", __func__))
|
||||
return -EINVAL;
|
||||
|
||||
trace_android_vh_freq_qos_remove_request(req);
|
||||
ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
||||
req->qos = NULL;
|
||||
req->type = 0;
|
||||
|
@@ -1958,6 +1958,8 @@ static size_t log_output(int facility, int level, enum log_flags lflags,
|
||||
} else {
|
||||
prb_commit(&e);
|
||||
}
|
||||
|
||||
trace_android_vh_logbuf_pr_cont(&r, text_len);
|
||||
return text_len;
|
||||
}
|
||||
}
|
||||
|
@@ -2106,6 +2106,7 @@ void force_compatible_cpus_allowed_ptr(struct task_struct *p)
|
||||
* lock to ensure that the migration succeeds.
|
||||
*/
|
||||
trace_android_vh_force_compatible_pre(NULL);
|
||||
trace_android_rvh_force_compatible_pre(NULL);
|
||||
cpus_read_lock();
|
||||
if (!cpumask_available(new_mask))
|
||||
goto out_set_mask;
|
||||
@@ -2131,6 +2132,7 @@ out_set_mask:
|
||||
out_free_mask:
|
||||
cpus_read_unlock();
|
||||
trace_android_vh_force_compatible_post(NULL);
|
||||
trace_android_rvh_force_compatible_post(NULL);
|
||||
free_cpumask_var(new_mask);
|
||||
}
|
||||
|
||||
@@ -2480,9 +2482,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
||||
|
||||
/* Look for allowed, online CPU in same node. */
|
||||
for_each_cpu(dest_cpu, nodemask) {
|
||||
if (!cpu_active(dest_cpu))
|
||||
continue;
|
||||
if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
|
||||
if (is_cpu_allowed(p, dest_cpu))
|
||||
return dest_cpu;
|
||||
}
|
||||
}
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include "sched.h"
|
||||
|
||||
DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(cpufreq_update_util_data);
|
||||
|
||||
/**
|
||||
* cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
|
||||
|
@@ -147,6 +147,7 @@
|
||||
static int psi_bug __read_mostly;
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(psi_disabled);
|
||||
DEFINE_STATIC_KEY_TRUE(psi_cgroups_enabled);
|
||||
|
||||
#ifdef CONFIG_PSI_DEFAULT_DISABLED
|
||||
static bool psi_enable;
|
||||
@@ -210,6 +211,9 @@ void __init psi_init(void)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!cgroup_psi_enabled())
|
||||
static_branch_disable(&psi_cgroups_enabled);
|
||||
|
||||
psi_period = jiffies_to_nsecs(PSI_FREQ);
|
||||
group_init(&psi_system);
|
||||
}
|
||||
@@ -750,23 +754,23 @@ static void psi_group_change(struct psi_group *group, int cpu,
|
||||
|
||||
static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
|
||||
{
|
||||
#if defined CONFIG_CGROUPS && defined CONFIG_PSI_PER_CGROUP_ACCT
|
||||
struct cgroup *cgroup = NULL;
|
||||
|
||||
if (!*iter)
|
||||
cgroup = task->cgroups->dfl_cgrp;
|
||||
else if (*iter == &psi_system)
|
||||
if (*iter == &psi_system)
|
||||
return NULL;
|
||||
else
|
||||
cgroup = cgroup_parent(*iter);
|
||||
|
||||
if (cgroup && cgroup_parent(cgroup)) {
|
||||
*iter = cgroup;
|
||||
return cgroup_psi(cgroup);
|
||||
#ifdef CONFIG_CGROUPS
|
||||
if (static_branch_likely(&psi_cgroups_enabled)) {
|
||||
struct cgroup *cgroup = NULL;
|
||||
|
||||
if (!*iter)
|
||||
cgroup = task->cgroups->dfl_cgrp;
|
||||
else
|
||||
cgroup = cgroup_parent(*iter);
|
||||
|
||||
if (cgroup && cgroup_parent(cgroup)) {
|
||||
*iter = cgroup;
|
||||
return cgroup_psi(cgroup);
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (*iter)
|
||||
return NULL;
|
||||
#endif
|
||||
*iter = &psi_system;
|
||||
return &psi_system;
|
||||
|
@@ -43,3 +43,4 @@ void show_mem(unsigned int filter, nodemask_t *nodemask)
|
||||
#endif
|
||||
trace_android_vh_show_mem(filter, nodemask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(show_mem);
|
||||
|
@@ -475,17 +475,17 @@ static int __init page_pinner_init(void)
|
||||
|
||||
pp_debugfs_root = debugfs_create_dir("page_pinner", NULL);
|
||||
|
||||
debugfs_create_file("longterm_pinner", 0400, pp_debugfs_root, NULL,
|
||||
debugfs_create_file("longterm_pinner", 0444, pp_debugfs_root, NULL,
|
||||
&proc_longterm_pinner_operations);
|
||||
|
||||
debugfs_create_file("threshold", 0444, pp_debugfs_root, NULL,
|
||||
debugfs_create_file("threshold", 0644, pp_debugfs_root, NULL,
|
||||
&pp_threshold_fops);
|
||||
|
||||
debugfs_create_file("alloc_contig_failed", 0400,
|
||||
debugfs_create_file("alloc_contig_failed", 0444,
|
||||
pp_debugfs_root, NULL,
|
||||
&proc_alloc_contig_failed_operations);
|
||||
|
||||
debugfs_create_file("failure_tracking", 0444,
|
||||
debugfs_create_file("failure_tracking", 0644,
|
||||
pp_debugfs_root, NULL,
|
||||
&failure_tracking_fops);
|
||||
return 0;
|
||||
|
20
mm/slub.c
20
mm/slub.c
@@ -5744,31 +5744,23 @@ static int slab_debugfs_show(struct seq_file *seq, void *v)
|
||||
|
||||
static void slab_debugfs_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
kfree(v);
|
||||
}
|
||||
|
||||
static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
|
||||
{
|
||||
loff_t *spos = v;
|
||||
struct loc_track *t = seq->private;
|
||||
|
||||
if (*ppos < t->count) {
|
||||
*ppos = ++*spos;
|
||||
return spos;
|
||||
}
|
||||
*ppos = ++*spos;
|
||||
v = ppos;
|
||||
++*ppos;
|
||||
if (*ppos <= t->count)
|
||||
return v;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
|
||||
{
|
||||
loff_t *spos = kmalloc(sizeof(loff_t), GFP_KERNEL);
|
||||
|
||||
if (!spos)
|
||||
return NULL;
|
||||
|
||||
*spos = *ppos;
|
||||
return spos;
|
||||
return ppos;
|
||||
}
|
||||
|
||||
static const struct seq_operations slab_debugfs_sops = {
|
||||
|
@@ -66,6 +66,9 @@
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#include <trace/hooks/vmscan.h>
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_direct_reclaim_begin);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(mm_vmscan_direct_reclaim_end);
|
||||
|
||||
struct scan_control {
|
||||
/* How many pages shrink_list() should reclaim */
|
||||
unsigned long nr_to_reclaim;
|
||||
|
@@ -3266,8 +3266,6 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
|
||||
shinfo->gso_type |= SKB_GSO_TCPV6;
|
||||
}
|
||||
|
||||
/* Due to IPv6 header, MSS needs to be downgraded. */
|
||||
skb_decrease_gso_size(shinfo, len_diff);
|
||||
/* Header must be checked, and gso_segs recomputed. */
|
||||
shinfo->gso_type |= SKB_GSO_DODGY;
|
||||
shinfo->gso_segs = 0;
|
||||
@@ -3307,8 +3305,6 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
|
||||
shinfo->gso_type |= SKB_GSO_TCPV4;
|
||||
}
|
||||
|
||||
/* Due to IPv4 header, MSS can be upgraded. */
|
||||
skb_increase_gso_size(shinfo, len_diff);
|
||||
/* Header must be checked, and gso_segs recomputed. */
|
||||
shinfo->gso_type |= SKB_GSO_DODGY;
|
||||
shinfo->gso_segs = 0;
|
||||
|
@@ -117,7 +117,8 @@ struct security_class_mapping secclass_map[] = {
|
||||
{ COMMON_IPC_PERMS, NULL } },
|
||||
{ "netlink_route_socket",
|
||||
{ COMMON_SOCK_PERMS,
|
||||
"nlmsg_read", "nlmsg_write", "nlmsg_readpriv", NULL } },
|
||||
"nlmsg_read", "nlmsg_write", "nlmsg_readpriv", "nlmsg_getneigh",
|
||||
NULL } },
|
||||
{ "netlink_tcpdiag_socket",
|
||||
{ COMMON_SOCK_PERMS,
|
||||
"nlmsg_read", "nlmsg_write", NULL } },
|
||||
|
@@ -98,6 +98,7 @@ struct selinux_state {
|
||||
bool initialized;
|
||||
bool policycap[__POLICYDB_CAPABILITY_MAX];
|
||||
bool android_netlink_route;
|
||||
bool android_netlink_getneigh;
|
||||
|
||||
struct page *status_page;
|
||||
struct mutex status_lock;
|
||||
@@ -227,6 +228,13 @@ static inline bool selinux_android_nlroute_getlink(void)
|
||||
return state->android_netlink_route;
|
||||
}
|
||||
|
||||
static inline bool selinux_android_nlroute_getneigh(void)
|
||||
{
|
||||
struct selinux_state *state = &selinux_state;
|
||||
|
||||
return state->android_netlink_getneigh;
|
||||
}
|
||||
|
||||
struct selinux_policy_convert_data;
|
||||
|
||||
struct selinux_load_state {
|
||||
|
@@ -212,12 +212,12 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void nlmsg_set_getlink_perm(u32 perm)
|
||||
static void nlmsg_set_perm_for_type(u32 perm, u16 type)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(nlmsg_route_perms); i++) {
|
||||
if (nlmsg_route_perms[i].nlmsg_type == RTM_GETLINK) {
|
||||
if (nlmsg_route_perms[i].nlmsg_type == type) {
|
||||
nlmsg_route_perms[i].perm = perm;
|
||||
break;
|
||||
}
|
||||
@@ -227,11 +227,27 @@ static void nlmsg_set_getlink_perm(u32 perm)
|
||||
/**
|
||||
* Use nlmsg_readpriv as the permission for RTM_GETLINK messages if the
|
||||
* netlink_route_getlink policy capability is set. Otherwise use nlmsg_read.
|
||||
* Similarly, use nlmsg_getneigh for RTM_GETNEIGH and RTM_GETNEIGHTBL if the
|
||||
* netlink_route_getneigh policy capability is set. Otherwise use nlmsg_read.
|
||||
*/
|
||||
void selinux_nlmsg_init(void)
|
||||
{
|
||||
if (selinux_android_nlroute_getlink())
|
||||
nlmsg_set_getlink_perm(NETLINK_ROUTE_SOCKET__NLMSG_READPRIV);
|
||||
nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READPRIV,
|
||||
RTM_GETLINK);
|
||||
else
|
||||
nlmsg_set_getlink_perm(NETLINK_ROUTE_SOCKET__NLMSG_READ);
|
||||
nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READ,
|
||||
RTM_GETLINK);
|
||||
|
||||
if (selinux_android_nlroute_getneigh()) {
|
||||
nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_GETNEIGH,
|
||||
RTM_GETNEIGH);
|
||||
nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_GETNEIGH,
|
||||
RTM_GETNEIGHTBL);
|
||||
} else {
|
||||
nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READ,
|
||||
RTM_GETNEIGH);
|
||||
nlmsg_set_perm_for_type(NETLINK_ROUTE_SOCKET__NLMSG_READ,
|
||||
RTM_GETNEIGHTBL);
|
||||
}
|
||||
}
|
||||
|
@@ -2497,6 +2497,10 @@ int policydb_read(struct policydb *p, void *fp)
|
||||
p->android_netlink_route = 1;
|
||||
}
|
||||
|
||||
if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_ANDROID_NETLINK_GETNEIGH)) {
|
||||
p->android_netlink_getneigh = 1;
|
||||
}
|
||||
|
||||
if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
|
||||
rc = ebitmap_read(&p->policycaps, fp);
|
||||
if (rc)
|
||||
|
@@ -239,6 +239,7 @@ struct genfs {
|
||||
struct policydb {
|
||||
int mls_enabled;
|
||||
int android_netlink_route;
|
||||
int android_netlink_getneigh;
|
||||
|
||||
/* symbol tables */
|
||||
struct symtab symtab[SYM_NUM];
|
||||
@@ -336,6 +337,7 @@ extern struct role_trans_datum *policydb_roletr_search(
|
||||
|
||||
#define POLICYDB_CONFIG_MLS 1
|
||||
#define POLICYDB_CONFIG_ANDROID_NETLINK_ROUTE (1 << 31)
|
||||
#define POLICYDB_CONFIG_ANDROID_NETLINK_GETNEIGH (1 << 30)
|
||||
|
||||
/* the config flags related to unknown classes/perms are bits 2 and 3 */
|
||||
#define REJECT_UNKNOWN 0x00000002
|
||||
|
@@ -2162,6 +2162,7 @@ static void security_load_policycaps(struct selinux_state *state,
|
||||
}
|
||||
|
||||
state->android_netlink_route = p->android_netlink_route;
|
||||
state->android_netlink_getneigh = p->android_netlink_getneigh;
|
||||
selinux_nlmsg_init();
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user