Merge branch 'android12-5.10' into android12-5.10-lts
Sync up with android12-5.10 for the following commits:2a2327c4e8
FROMLIST: power_supply: Use of-thermal cdev registration APIb90fe5ef8f
FROMLIST: power_supply: Register cooling device outside of probefc64efcd06
Revert "UPSTREAM: tracefs: Have tracefs directories not set OTH permission bits by default"1eb3049da0
FROMGIT: usb: dwc3: gadget: Prevent core from processing stale TRBsee1e2de73c
UPSTREAM: cgroup-v1: Require capabilities to set release_agent7e6f112beb
FROMGIT: f2fs: move f2fs to use reader-unfair rwsems23686f5ee8
UPSTREAM: f2fs: do not bother checkpoint by f2fs_get_node_infofa055ddfd5
BACKPORT: f2fs: avoid down_write on nat_tree_lock during checkpointc8701aa0a7
ANDROID: GKI: enable RCU_BOOSTc34fa06f4b
FROMLIST: rcu: Don't deboost before reporting expedited quiescent state3a49d3b677
FROMGIT: usb: f_fs: Fix use-after-free for epfile250abe08bb
UPSTREAM: usb: gadget: f_fs: Clear ffs_eventfd in ffs_data_clear.d449d91bc9
ANDROID: update new gki symbolb2fcb7b63b
ANDROID: abi: qcom: Add dma_{alloc,free}_noncoherent5d79e49205
UPSTREAM: binder: fix async_free_space accounting for empty parcels6aa9e78d6e
FROMGIT: rcu: Allow expedited RCU grace periods on incoming CPUs2f61ec09b0
ANDROID: abi_gki_aarch64_qcom: Add iommu_setup_dma_ops restricted vh6a9ff8fa26
ANDROID: iommu: Add restricted vendor hook2aba795b31
FROMLIST: arm64: cpufeature: List early Cortex-A510 parts as having broken dbm2861bbc5b5
FROMLIST: arm64: Add Cortex-A510 CPU part definitionb0d13db791
FROMGIT: printk: ringbuffer: Improve prb_next_seq() performance4b1862e4fc
ANDROID: incremental-fs: fix GPF in pending_reads_dispatch_ioctl445019bbca
UPSTREAM: bpf: Fix integer overflow in argument calculation for bpf_map_area_alloc032a676295
UPSTREAM: tee: handle lookup of shm with reference count 0d461f54be3
ANDROID: Incremental-fs: Doc: correct a sysfs path in incfs.rst1bfc9c16ae
ANDROID: selftests: fix incfs_testfd4c6594f5
ANDROID: incremental-fs: fix mount_fs issuea512242e66
BACKPORT: arm64: errata: Add workaround for TSB flush failurese48051244a
UPSTREAM: arm64: Add Neoverse-N2, Cortex-A710 CPU part definitiondd3256d439
UPSTREAM: coresight: trbe: Defer the probe on offline CPUs71aebf8793
UPSTREAM: coresight: etm4x: Use Trace Filtering controls dynamically2bb8b3c907
BACKPORT: coresight: etm4x: Save restore TRFCR_EL179b64fa780
UPSTREAM: coresight: tmc-etr: Speed up for bounce buffer in flat modeaee6af7046
UPSTREAM: coresight: tmc-etr: Add barrier after updating AUX ring buffera0009ade38
Revert half of "ANDROID: cpu/hotplug: create vendor hook for cpu_up/cpu_down"a863cef344
Revert half of "ANDROID: arm64: add vendor hooks for bti and pauth fault"9f58bcd614
Revert half of "ANDROID: vendor_hooks: Add param for android_vh_cpu_up/down"4b3396046c
Revert "ANDROID: vendor_hooks: Add a hook for task tagging"d8fe0b1fc2
Revert "ANDROID: GKI: net: add vendor hooks for 'struct nf_conn' lifecycle"92ab2aeca5
Revert "ANDROID: GKI: net: add vendor hooks for 'struct sock' lifecycle"b3e6d6eec6
Revert "ANDROID: vendor_hooks: add hook and OEM data for slab shrink"e09000ee19
Revert half of "ANDROID: vendor_hooks: Add hooks for memory when debug"3f305a9101
Revert half of "ANDROID: gic-v3: Add vendor hook to GIC v3"3b4ca92614
Merge tag 'android12-5.10.81_r00' into android12-5.10bdc732d112
UPSTREAM: tracefs: Set all files to the same group ownership as the mount option8455746a45
UPSTREAM: tracefs: Have new files inherit the ownership of their parent9c63be2ada
UPSTREAM: tracefs: Have tracefs directories not set OTH permission bits by default64095600fd
Revert "ANDROID: vendor_hooks: Add hooks to recognize special worker thread."7887091009
Revert "ANDROID: sysrq: add vendor hook for sysrq crash information"63e7148b27
Revert "ANDROID: user: Add vendor hook to user for GKI purpose"18975040b9
Revert portions of "ANDROID: sched: Add vendor hooks for sched."96c08d9210
Revert portions of "ANDROID: vendor_hooks: Add hooks for scheduler"a32e89883a
UPSTREAM: vfs: fs_context: fix up param length parsing in legacy_parse_param New functions/variables are now being tracked as well, that came from the android12-5.10 branch: Leaf changes summary: 5 artifacts changed Changed leaf types summary: 0 leaf type changed Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 4 Added functions Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 1 Added variable 4 Added functions: [A] 'function int __traceiter_android_rvh_iommu_setup_dma_ops(void*, device*, u64, u64)' [A] 'function void* dma_alloc_noncoherent(device*, size_t, dma_addr_t*, dma_data_direction, gfp_t)' [A] 'function void dma_free_noncoherent(device*, size_t, void*, dma_addr_t, dma_data_direction)' [A] 'function void static_key_enable_cpuslocked(static_key*)' 1 Added variable: [A] 'tracepoint __tracepoint_android_rvh_iommu_setup_dma_ops' Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I7a5a82681cc94f6b3dcd17e159da8976be0bcb78
This commit is contained in:
@@ -92,12 +92,18 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1349291 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | MMU-500 | #841119,826419 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
@@ -7,7 +7,7 @@ incfs: A stacked incremental filesystem for Linux
|
||||
/sys/fs interface
|
||||
=================
|
||||
|
||||
Please update Documentation/ABI/testing/sys-fs-incfs if you update this
|
||||
Please update Documentation/ABI/testing/sysfs-fs-incfs if you update this
|
||||
section.
|
||||
|
||||
incfs creates the following files in /sys/fs.
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -549,6 +549,7 @@
|
||||
divider_ro_round_rate_parent
|
||||
divider_round_rate_parent
|
||||
dma_alloc_attrs
|
||||
dma_alloc_noncoherent
|
||||
dma_async_device_register
|
||||
dma_async_device_unregister
|
||||
dma_async_tx_descriptor_init
|
||||
@@ -584,6 +585,7 @@
|
||||
dma_fence_signal_timestamp_locked
|
||||
dma_fence_wait_timeout
|
||||
dma_free_attrs
|
||||
dma_free_noncoherent
|
||||
dma_get_sgtable_attrs
|
||||
dma_get_slave_channel
|
||||
dma_heap_add
|
||||
@@ -2512,6 +2514,7 @@
|
||||
__traceiter_android_rvh_force_compatible_post
|
||||
__traceiter_android_rvh_force_compatible_pre
|
||||
__traceiter_android_rvh_gic_v3_set_affinity
|
||||
__traceiter_android_rvh_iommu_setup_dma_ops
|
||||
__traceiter_android_rvh_irqs_disable
|
||||
__traceiter_android_rvh_irqs_enable
|
||||
__traceiter_android_rvh_migrate_queued_task
|
||||
@@ -2625,6 +2628,7 @@
|
||||
__tracepoint_android_rvh_force_compatible_post
|
||||
__tracepoint_android_rvh_force_compatible_pre
|
||||
__tracepoint_android_rvh_gic_v3_set_affinity
|
||||
__tracepoint_android_rvh_iommu_setup_dma_ops
|
||||
__tracepoint_android_rvh_irqs_disable
|
||||
__tracepoint_android_rvh_irqs_enable
|
||||
__tracepoint_android_rvh_migrate_queued_task
|
||||
|
@@ -669,6 +669,49 @@ config ARM64_ERRATUM_1508412
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2051678
|
||||
bool "Cortex-A510: 2051678: disable Hardware Update of the page table's dirty bit"
|
||||
help
|
||||
This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
|
||||
Affected Coretex-A510 might not respect the ordering rules for
|
||||
hardware update of the page table's dirty bit. The workaround
|
||||
is to not enable the feature on affected CPUs.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_WORKAROUND_TSB_FLUSH_FAILURE
|
||||
bool
|
||||
|
||||
config ARM64_ERRATUM_2054223
|
||||
bool "Cortex-A710: 2054223: workaround TSB instruction failing to flush trace"
|
||||
default y
|
||||
select ARM64_WORKAROUND_TSB_FLUSH_FAILURE
|
||||
help
|
||||
Enable workaround for ARM Cortex-A710 erratum 2054223
|
||||
|
||||
Affected cores may fail to flush the trace data on a TSB instruction, when
|
||||
the PE is in trace prohibited state. This will cause losing a few bytes
|
||||
of the trace cached.
|
||||
|
||||
Workaround is to issue two TSB consecutively on affected cores.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2067961
|
||||
bool "Neoverse-N2: 2067961: workaround TSB instruction failing to flush trace"
|
||||
default y
|
||||
select ARM64_WORKAROUND_TSB_FLUSH_FAILURE
|
||||
help
|
||||
Enable workaround for ARM Neoverse-N2 erratum 2067961
|
||||
|
||||
Affected cores may fail to flush the trace data on a TSB instruction, when
|
||||
the PE is in trace prohibited state. This will cause losing a few bytes
|
||||
of the trace cached.
|
||||
|
||||
Workaround is to issue two TSB consecutively on affected cores.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_22375
|
||||
bool "Cavium erratum 22375, 24313"
|
||||
default y
|
||||
|
@@ -8,6 +8,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
|
||||
CONFIG_PSI=y
|
||||
CONFIG_RCU_EXPERT=y
|
||||
CONFIG_RCU_FAST_NO_HZ=y
|
||||
CONFIG_RCU_BOOST=y
|
||||
CONFIG_RCU_NOCB_CPU=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
|
@@ -23,7 +23,7 @@
|
||||
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
|
||||
|
||||
#define psb_csync() asm volatile("hint #17" : : : "memory")
|
||||
#define tsb_csync() asm volatile("hint #18" : : : "memory")
|
||||
#define __tsb_csync() asm volatile("hint #18" : : : "memory")
|
||||
#define csdb() asm volatile("hint #20" : : : "memory")
|
||||
|
||||
#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
|
||||
@@ -50,6 +50,20 @@
|
||||
#define dma_rmb() dmb(oshld)
|
||||
#define dma_wmb() dmb(oshst)
|
||||
|
||||
|
||||
#define tsb_csync() \
|
||||
do { \
|
||||
/* \
|
||||
* CPUs affected by Arm Erratum 2054223 or 2067961 needs \
|
||||
* another TSB to ensure the trace is flushed. The barriers \
|
||||
* don't have to be strictly back to back, as long as the \
|
||||
* CPU is in trace prohibited state. \
|
||||
*/ \
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \
|
||||
__tsb_csync(); \
|
||||
__tsb_csync(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
|
||||
* and 0 otherwise.
|
||||
|
@@ -69,6 +69,7 @@
|
||||
#define ARM64_WORKAROUND_1508412 58
|
||||
#define ARM64_HAS_LDAPR 59
|
||||
#define ARM64_KVM_PROTECTED_MODE 60
|
||||
#define ARM64_WORKAROUND_TSB_FLUSH_FAILURE 61
|
||||
|
||||
/* kabi: reserve 62 - 76 for future cpu capabilities */
|
||||
#define ARM64_NCAPS 76
|
||||
|
@@ -72,6 +72,9 @@
|
||||
#define ARM_CPU_PART_CORTEX_A76 0xD0B
|
||||
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
|
||||
#define ARM_CPU_PART_CORTEX_A77 0xD0D
|
||||
#define ARM_CPU_PART_CORTEX_A510 0xD46
|
||||
#define ARM_CPU_PART_CORTEX_A710 0xD47
|
||||
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
|
||||
|
||||
#define APM_CPU_PART_POTENZA 0x000
|
||||
|
||||
@@ -109,6 +112,9 @@
|
||||
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
|
||||
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
|
||||
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
|
||||
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
|
||||
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
|
||||
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
||||
|
@@ -342,6 +342,18 @@ static const struct midr_range erratum_1463225[] = {
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
|
||||
static const struct midr_range tsb_flush_fail_cpus[] = {
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2067961
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2054223
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
|
||||
|
||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||
{
|
||||
@@ -527,6 +539,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
0, 0,
|
||||
1, 0),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
|
||||
{
|
||||
.desc = "ARM erratum 2067961 or 2054223",
|
||||
.capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
|
||||
ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
@@ -1599,6 +1599,9 @@ static bool cpu_has_broken_dbm(void)
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
/* Kryo4xx Silver (rdpe => r1p0) */
|
||||
MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2051678
|
||||
MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
|
@@ -414,7 +414,6 @@ NOKPROBE_SYMBOL(do_undefinstr);
|
||||
|
||||
void do_bti(struct pt_regs *regs)
|
||||
{
|
||||
trace_android_rvh_do_bti(regs, user_mode(regs));
|
||||
BUG_ON(!user_mode(regs));
|
||||
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
|
||||
}
|
||||
|
@@ -53,6 +53,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
if (iommu) {
|
||||
iommu_setup_dma_ops(dev, dma_base, size);
|
||||
trace_android_vh_iommu_setup_dma_ops(dev, dma_base, size);
|
||||
trace_android_rvh_iommu_setup_dma_ops(dev, dma_base, size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
|
@@ -20,7 +20,6 @@
|
||||
#include <trace/hooks/gic.h>
|
||||
#include <trace/hooks/wqlockup.h>
|
||||
#include <trace/hooks/debug.h>
|
||||
#include <trace/hooks/sysrqcrash.h>
|
||||
#include <trace/hooks/printk.h>
|
||||
#include <trace/hooks/gic_v3.h>
|
||||
#include <trace/hooks/epoch.h>
|
||||
@@ -41,7 +40,6 @@
|
||||
#include <trace/hooks/ufshcd.h>
|
||||
#include <trace/hooks/block.h>
|
||||
#include <trace/hooks/cgroup.h>
|
||||
#include <trace/hooks/workqueue.h>
|
||||
#include <trace/hooks/sys.h>
|
||||
#include <trace/hooks/traps.h>
|
||||
#include <trace/hooks/avc.h>
|
||||
@@ -60,7 +58,6 @@
|
||||
#include <trace/hooks/v4l2core.h>
|
||||
#include <trace/hooks/v4l2mc.h>
|
||||
#include <trace/hooks/scmi.h>
|
||||
#include <trace/hooks/user.h>
|
||||
#include <trace/hooks/cpuidle_psci.h>
|
||||
#include <trace/hooks/fips140.h>
|
||||
#include <trace/hooks/remoteproc.h>
|
||||
@@ -92,10 +89,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_prepare_prio_fork);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_finish_prio_fork);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sk_alloc);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sk_free);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_nf_conn_alloc);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_nf_conn_free);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_arch_set_freq_scale);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_is_fpsimd_save);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_init);
|
||||
@@ -127,12 +120,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_group);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_resume);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wq_lockup_pool);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ipi_stop);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dump_throttled_rt_tasks);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_hotplug);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_jiffies_update);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_gic_v3_set_affinity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_affinity_init);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_suspend_epoch_val);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_resume_epoch_val);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_max_freq);
|
||||
@@ -191,7 +182,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_map_util_freq);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_report_bug);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_em_cpu_energy);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_up);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_down);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_balance_rt);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_timer_calc_index);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_watchdog_timer_softlockup);
|
||||
@@ -202,6 +192,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_die_kernel_fault);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_alloc_iova);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_iovad_alloc_iova);
|
||||
@@ -236,9 +227,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpufreq_transition);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_set_task);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cgroup_force_kthread_migration);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_syscall_prctl_finished);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_create_worker);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_tick);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_wakeup_ignore);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_replace_next_task_fair);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sched_yield);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wait_for_work);
|
||||
@@ -248,7 +237,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_mutex_list_add);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake_finish);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_undefinstr);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_bti);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_ptrauth_fault);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_bad_mode);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_arm64_serror_panic);
|
||||
@@ -274,11 +262,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_mm);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_from_fragment_pool);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exclude_reserved_zone);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_include_reserved_zone);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_mem);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_print_slabinfo_header);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_shrink_slab);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cache_show);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpci_override_toggling);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_typec_tcpci_chk_contaminant);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_typec_tcpci_get_vbus);
|
||||
@@ -316,7 +300,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_after_enqueue_task);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_after_dequeue_task);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_entity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_entity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_entity_tick);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_task_fair);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task_fair);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_stat_runtime_rt);
|
||||
@@ -341,7 +324,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_vmalloc_stack);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_stack_hash);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_track_hash);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmpressure);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_task_comm);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_acct_update_power);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpm_log);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_media_device_setup_link);
|
||||
@@ -357,8 +339,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_v4l2subdev_set_frame_interval);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_v4l2subdev_set_frame_interval);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_scmi_timeout_sync);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_new_ilb);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_add_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_update_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_remove_request);
|
||||
|
@@ -39,6 +39,7 @@
|
||||
|
||||
#include "coresight-etm4x.h"
|
||||
#include "coresight-etm-perf.h"
|
||||
#include "coresight-self-hosted-trace.h"
|
||||
|
||||
static int boot_enable;
|
||||
module_param(boot_enable, int, 0444);
|
||||
@@ -236,6 +237,45 @@ struct etm4_enable_arg {
|
||||
int rc;
|
||||
};
|
||||
|
||||
/*
|
||||
* etm4x_prohibit_trace - Prohibit the CPU from tracing at all ELs.
|
||||
* When the CPU supports FEAT_TRF, we could move the ETM to a trace
|
||||
* prohibited state by filtering the Exception levels via TRFCR_EL1.
|
||||
*/
|
||||
static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
/* If the CPU doesn't support FEAT_TRF, nothing to do */
|
||||
if (!drvdata->trfcr)
|
||||
return;
|
||||
cpu_prohibit_trace();
|
||||
}
|
||||
|
||||
/*
|
||||
* etm4x_allow_trace - Allow CPU tracing in the respective ELs,
|
||||
* as configured by the drvdata->config.mode for the current
|
||||
* session. Even though we have TRCVICTLR bits to filter the
|
||||
* trace in the ELs, it doesn't prevent the ETM from generating
|
||||
* a packet (e.g, TraceInfo) that might contain the addresses from
|
||||
* the excluded levels. Thus we use the additional controls provided
|
||||
* via the Trace Filtering controls (FEAT_TRF) to make sure no trace
|
||||
* is generated for the excluded ELs.
|
||||
*/
|
||||
static void etm4x_allow_trace(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
u64 trfcr = drvdata->trfcr;
|
||||
|
||||
/* If the CPU doesn't support FEAT_TRF, nothing to do */
|
||||
if (!trfcr)
|
||||
return;
|
||||
|
||||
if (drvdata->config.mode & ETM_MODE_EXCL_KERN)
|
||||
trfcr &= ~TRFCR_ELx_ExTRE;
|
||||
if (drvdata->config.mode & ETM_MODE_EXCL_USER)
|
||||
trfcr &= ~TRFCR_ELx_E0TRE;
|
||||
|
||||
write_trfcr(trfcr);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ETM4X_IMPDEF_FEATURE
|
||||
|
||||
#define HISI_HIP08_AMBA_ID 0x000b6d01
|
||||
@@ -440,6 +480,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
|
||||
if (etm4x_is_ete(drvdata))
|
||||
etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
|
||||
|
||||
etm4x_allow_trace(drvdata);
|
||||
/* Enable the trace unit */
|
||||
etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
|
||||
|
||||
@@ -723,7 +764,6 @@ static int etm4_enable(struct coresight_device *csdev,
|
||||
static void etm4_disable_hw(void *info)
|
||||
{
|
||||
u32 control;
|
||||
u64 trfcr;
|
||||
struct etmv4_drvdata *drvdata = info;
|
||||
struct etmv4_config *config = &drvdata->config;
|
||||
struct coresight_device *csdev = drvdata->csdev;
|
||||
@@ -750,12 +790,7 @@ static void etm4_disable_hw(void *info)
|
||||
* If the CPU supports v8.4 Trace filter Control,
|
||||
* set the ETM to trace prohibited region.
|
||||
*/
|
||||
if (drvdata->trfc) {
|
||||
trfcr = read_sysreg_s(SYS_TRFCR_EL1);
|
||||
write_sysreg_s(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE),
|
||||
SYS_TRFCR_EL1);
|
||||
isb();
|
||||
}
|
||||
etm4x_prohibit_trace(drvdata);
|
||||
/*
|
||||
* Make sure everything completes before disabling, as recommended
|
||||
* by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
|
||||
@@ -771,9 +806,6 @@ static void etm4_disable_hw(void *info)
|
||||
if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
|
||||
dev_err(etm_dev,
|
||||
"timeout while waiting for PM stable Trace Status\n");
|
||||
if (drvdata->trfc)
|
||||
write_sysreg_s(trfcr, SYS_TRFCR_EL1);
|
||||
|
||||
/* read the status of the single shot comparators */
|
||||
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
|
||||
config->ss_status[i] =
|
||||
@@ -968,15 +1000,15 @@ static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata,
|
||||
return false;
|
||||
}
|
||||
|
||||
static void cpu_enable_tracing(struct etmv4_drvdata *drvdata)
|
||||
static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
u64 dfr0 = read_sysreg(id_aa64dfr0_el1);
|
||||
u64 trfcr;
|
||||
|
||||
drvdata->trfcr = 0;
|
||||
if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT))
|
||||
return;
|
||||
|
||||
drvdata->trfc = true;
|
||||
/*
|
||||
* If the CPU supports v8.4 SelfHosted Tracing, enable
|
||||
* tracing at the kernel EL and EL0, forcing to use the
|
||||
@@ -990,7 +1022,7 @@ static void cpu_enable_tracing(struct etmv4_drvdata *drvdata)
|
||||
if (is_kernel_in_hyp_mode())
|
||||
trfcr |= TRFCR_EL2_CX;
|
||||
|
||||
write_sysreg_s(trfcr, SYS_TRFCR_EL1);
|
||||
drvdata->trfcr = trfcr;
|
||||
}
|
||||
|
||||
static void etm4_init_arch_data(void *info)
|
||||
@@ -1176,7 +1208,7 @@ static void etm4_init_arch_data(void *info)
|
||||
/* NUMCNTR, bits[30:28] number of counters available for tracing */
|
||||
drvdata->nr_cntr = BMVAL(etmidr5, 28, 30);
|
||||
etm4_cs_lock(drvdata, csa);
|
||||
cpu_enable_tracing(drvdata);
|
||||
cpu_detect_trace_filtering(drvdata);
|
||||
}
|
||||
|
||||
static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config)
|
||||
@@ -1528,7 +1560,7 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
|
||||
drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
|
||||
}
|
||||
|
||||
static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
|
||||
static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
int i, ret = 0;
|
||||
struct etmv4_save_state *state;
|
||||
@@ -1667,7 +1699,23 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
|
||||
static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* Save the TRFCR irrespective of whether the ETM is ON */
|
||||
if (drvdata->trfcr)
|
||||
drvdata->save_trfcr = read_trfcr();
|
||||
/*
|
||||
* Save and restore the ETM Trace registers only if
|
||||
* the ETM is active.
|
||||
*/
|
||||
if (local_read(&drvdata->mode) && drvdata->save_state)
|
||||
ret = __etm4_cpu_save(drvdata);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
int i;
|
||||
struct etmv4_save_state *state = drvdata->save_state;
|
||||
@@ -1763,6 +1811,14 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
|
||||
etm4_cs_lock(drvdata, csa);
|
||||
}
|
||||
|
||||
static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
if (drvdata->trfcr)
|
||||
write_trfcr(drvdata->save_trfcr);
|
||||
if (drvdata->state_needs_restore)
|
||||
__etm4_cpu_restore(drvdata);
|
||||
}
|
||||
|
||||
static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
|
||||
void *v)
|
||||
{
|
||||
@@ -1774,23 +1830,17 @@ static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
|
||||
|
||||
drvdata = etmdrvdata[cpu];
|
||||
|
||||
if (!drvdata->save_state)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (WARN_ON_ONCE(drvdata->cpu != cpu))
|
||||
return NOTIFY_BAD;
|
||||
|
||||
switch (cmd) {
|
||||
case CPU_PM_ENTER:
|
||||
/* save the state if self-hosted coresight is in use */
|
||||
if (local_read(&drvdata->mode))
|
||||
if (etm4_cpu_save(drvdata))
|
||||
return NOTIFY_BAD;
|
||||
if (etm4_cpu_save(drvdata))
|
||||
return NOTIFY_BAD;
|
||||
break;
|
||||
case CPU_PM_EXIT:
|
||||
case CPU_PM_ENTER_FAILED:
|
||||
if (drvdata->state_needs_restore)
|
||||
etm4_cpu_restore(drvdata);
|
||||
etm4_cpu_restore(drvdata);
|
||||
break;
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
|
@@ -919,8 +919,12 @@ struct etmv4_save_state {
|
||||
* @nooverflow: Indicate if overflow prevention is supported.
|
||||
* @atbtrig: If the implementation can support ATB triggers
|
||||
* @lpoverride: If the implementation can support low-power state over.
|
||||
* @trfc: If the implementation supports Arm v8.4 trace filter controls.
|
||||
* @trfcr: If the CPU supports FEAT_TRF, value of the TRFCR_ELx that
|
||||
* allows tracing at all ELs. We don't want to compute this
|
||||
* at runtime, due to the additional setting of TRFCR_CX when
|
||||
* in EL2. Otherwise, 0.
|
||||
* @config: structure holding configuration parameters.
|
||||
* @save_trfcr: Saved TRFCR_EL1 register during a CPU PM event.
|
||||
* @save_state: State to be preserved across power loss
|
||||
* @state_needs_restore: True when there is context to restore after PM exit
|
||||
* @skip_power_up: Indicates if an implementation can skip powering up
|
||||
@@ -971,8 +975,9 @@ struct etmv4_drvdata {
|
||||
bool nooverflow;
|
||||
bool atbtrig;
|
||||
bool lpoverride;
|
||||
bool trfc;
|
||||
u64 trfcr;
|
||||
struct etmv4_config config;
|
||||
u64 save_trfcr;
|
||||
struct etmv4_save_state *save_state;
|
||||
bool state_needs_restore;
|
||||
bool skip_power_up;
|
||||
|
31
drivers/hwtracing/coresight/coresight-self-hosted-trace.h
Normal file
31
drivers/hwtracing/coresight/coresight-self-hosted-trace.h
Normal file
@@ -0,0 +1,31 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Arm v8 Self-Hosted trace support.
|
||||
*
|
||||
* Copyright (C) 2021 ARM Ltd.
|
||||
*/
|
||||
|
||||
#ifndef __CORESIGHT_SELF_HOSTED_TRACE_H
|
||||
#define __CORESIGHT_SELF_HOSTED_TRACE_H
|
||||
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
static inline u64 read_trfcr(void)
|
||||
{
|
||||
return read_sysreg_s(SYS_TRFCR_EL1);
|
||||
}
|
||||
|
||||
static inline void write_trfcr(u64 val)
|
||||
{
|
||||
write_sysreg_s(val, SYS_TRFCR_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void cpu_prohibit_trace(void)
|
||||
{
|
||||
u64 trfcr = read_trfcr();
|
||||
|
||||
/* Prohibit tracing at EL0 & the kernel EL */
|
||||
write_trfcr(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE));
|
||||
}
|
||||
#endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */
|
@@ -609,8 +609,9 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
|
||||
if (!flat_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
flat_buf->vaddr = dma_alloc_coherent(real_dev, etr_buf->size,
|
||||
&flat_buf->daddr, GFP_KERNEL);
|
||||
flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
|
||||
&flat_buf->daddr,
|
||||
DMA_FROM_DEVICE, GFP_KERNEL);
|
||||
if (!flat_buf->vaddr) {
|
||||
kfree(flat_buf);
|
||||
return -ENOMEM;
|
||||
@@ -631,14 +632,18 @@ static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
|
||||
if (flat_buf && flat_buf->daddr) {
|
||||
struct device *real_dev = flat_buf->dev->parent;
|
||||
|
||||
dma_free_coherent(real_dev, flat_buf->size,
|
||||
flat_buf->vaddr, flat_buf->daddr);
|
||||
dma_free_noncoherent(real_dev, etr_buf->size,
|
||||
flat_buf->vaddr, flat_buf->daddr,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
kfree(flat_buf);
|
||||
}
|
||||
|
||||
static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
|
||||
{
|
||||
struct etr_flat_buf *flat_buf = etr_buf->private;
|
||||
struct device *real_dev = flat_buf->dev->parent;
|
||||
|
||||
/*
|
||||
* Adjust the buffer to point to the beginning of the trace data
|
||||
* and update the available trace data.
|
||||
@@ -648,6 +653,19 @@ static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
|
||||
etr_buf->len = etr_buf->size;
|
||||
else
|
||||
etr_buf->len = rwp - rrp;
|
||||
|
||||
/*
|
||||
* The driver always starts tracing at the beginning of the buffer,
|
||||
* the only reason why we would get a wrap around is when the buffer
|
||||
* is full. Sync the entire buffer in one go for this case.
|
||||
*/
|
||||
if (etr_buf->offset + etr_buf->len > etr_buf->size)
|
||||
dma_sync_single_for_cpu(real_dev, flat_buf->daddr,
|
||||
etr_buf->size, DMA_FROM_DEVICE);
|
||||
else
|
||||
dma_sync_single_for_cpu(real_dev,
|
||||
flat_buf->daddr + etr_buf->offset,
|
||||
etr_buf->len, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
|
||||
@@ -1563,6 +1581,14 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
|
||||
*/
|
||||
if (etr_perf->snapshot)
|
||||
handle->head += size;
|
||||
|
||||
/*
|
||||
* Ensure that the AUX trace data is visible before the aux_head
|
||||
* is updated via perf_aux_output_end(), as expected by the
|
||||
* perf ring buffer.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
out:
|
||||
/*
|
||||
* Don't set the TRUNCATED flag in snapshot mode because 1) the
|
||||
|
@@ -869,6 +869,10 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
|
||||
if (WARN_ON(trbe_csdev))
|
||||
return;
|
||||
|
||||
/* If the TRBE was not probed on the CPU, we shouldn't be here */
|
||||
if (WARN_ON(!cpudata->drvdata))
|
||||
return;
|
||||
|
||||
dev = &cpudata->drvdata->pdev->dev;
|
||||
desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
|
||||
if (!desc.name)
|
||||
@@ -950,7 +954,9 @@ static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_cpu(cpu, &drvdata->supported_cpus) {
|
||||
smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1);
|
||||
/* If we fail to probe the CPU, let us defer it to hotplug callbacks */
|
||||
if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1))
|
||||
continue;
|
||||
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
|
||||
arm_trbe_register_coresight_cpu(drvdata, cpu);
|
||||
if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
|
||||
|
@@ -812,15 +812,11 @@ static void __init gic_dist_init(void)
|
||||
* enabled.
|
||||
*/
|
||||
affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
|
||||
for (i = 32; i < GIC_LINE_NR; i++) {
|
||||
trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTER, &affinity);
|
||||
for (i = 32; i < GIC_LINE_NR; i++)
|
||||
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
|
||||
}
|
||||
|
||||
for (i = 0; i < GIC_ESPI_NR; i++) {
|
||||
trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTERnE, &affinity);
|
||||
for (i = 0; i < GIC_ESPI_NR; i++)
|
||||
gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
|
||||
}
|
||||
}
|
||||
|
||||
static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
|
||||
|
@@ -132,6 +132,7 @@ void power_supply_changed(struct power_supply *psy)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(power_supply_changed);
|
||||
|
||||
static int psy_register_cooler(struct power_supply *psy);
|
||||
/*
|
||||
* Notify that power supply was registered after parent finished the probing.
|
||||
*
|
||||
@@ -139,6 +140,8 @@ EXPORT_SYMBOL_GPL(power_supply_changed);
|
||||
* calling power_supply_changed() directly from power_supply_register()
|
||||
* would lead to execution of get_property() function provided by the driver
|
||||
* too early - before the probe ends.
|
||||
* Also, registering cooling device from the probe will execute the
|
||||
* get_property() function. So register the cooling device after the probe.
|
||||
*
|
||||
* Avoid that by waiting on parent's mutex.
|
||||
*/
|
||||
@@ -156,6 +159,7 @@ static void power_supply_deferred_register_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
power_supply_changed(psy);
|
||||
psy_register_cooler(psy);
|
||||
|
||||
if (psy->dev.parent)
|
||||
mutex_unlock(&psy->dev.parent->mutex);
|
||||
@@ -1134,9 +1138,15 @@ static int psy_register_cooler(struct power_supply *psy)
|
||||
for (i = 0; i < psy->desc->num_properties; i++) {
|
||||
if (psy->desc->properties[i] ==
|
||||
POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT) {
|
||||
psy->tcd = thermal_cooling_device_register(
|
||||
(char *)psy->desc->name,
|
||||
psy, &psy_tcd_ops);
|
||||
if (psy->dev.parent)
|
||||
psy->tcd = thermal_of_cooling_device_register(
|
||||
dev_of_node(psy->dev.parent),
|
||||
(char *)psy->desc->name,
|
||||
psy, &psy_tcd_ops);
|
||||
else
|
||||
psy->tcd = thermal_cooling_device_register(
|
||||
(char *)psy->desc->name,
|
||||
psy, &psy_tcd_ops);
|
||||
return PTR_ERR_OR_ZERO(psy->tcd);
|
||||
}
|
||||
}
|
||||
@@ -1242,10 +1252,6 @@ __power_supply_register(struct device *parent,
|
||||
if (rc)
|
||||
goto register_thermal_failed;
|
||||
|
||||
rc = psy_register_cooler(psy);
|
||||
if (rc)
|
||||
goto register_cooler_failed;
|
||||
|
||||
rc = power_supply_create_triggers(psy);
|
||||
if (rc)
|
||||
goto create_triggers_failed;
|
||||
@@ -1275,8 +1281,6 @@ __power_supply_register(struct device *parent,
|
||||
add_hwmon_sysfs_failed:
|
||||
power_supply_remove_triggers(psy);
|
||||
create_triggers_failed:
|
||||
psy_unregister_cooler(psy);
|
||||
register_cooler_failed:
|
||||
psy_unregister_thermal(psy);
|
||||
register_thermal_failed:
|
||||
device_del(dev);
|
||||
|
@@ -55,8 +55,6 @@
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
#include <trace/hooks/sysrqcrash.h>
|
||||
|
||||
/* Whether we react on sysrq keys or just ignore them */
|
||||
static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
|
||||
static bool __read_mostly sysrq_always_enabled;
|
||||
@@ -153,8 +151,6 @@ static void sysrq_handle_crash(int key)
|
||||
/* release the RCU read lock before crashing */
|
||||
rcu_read_unlock();
|
||||
|
||||
trace_android_vh_sysrq_crash(current);
|
||||
|
||||
panic("sysrq triggered crash\n");
|
||||
}
|
||||
static const struct sysrq_key_op sysrq_crash_op = {
|
||||
|
@@ -1269,6 +1269,19 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
|
||||
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
|
||||
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
|
||||
|
||||
/*
|
||||
* As per data book 4.2.3.2TRB Control Bit Rules section
|
||||
*
|
||||
* The controller autonomously checks the HWO field of a TRB to determine if the
|
||||
* entire TRB is valid. Therefore, software must ensure that the rest of the TRB
|
||||
* is valid before setting the HWO field to '1'. In most systems, this means that
|
||||
* software must update the fourth DWORD of a TRB last.
|
||||
*
|
||||
* However there is a possibility of CPU re-ordering here which can cause
|
||||
* controller to observe the HWO bit set prematurely.
|
||||
* Add a write memory barrier to prevent CPU re-ordering.
|
||||
*/
|
||||
wmb();
|
||||
trb->ctrl |= DWC3_TRB_CTRL_HWO;
|
||||
|
||||
dwc3_ep_inc_enq(dep);
|
||||
|
@@ -1710,16 +1710,24 @@ static void ffs_data_put(struct ffs_data *ffs)
|
||||
|
||||
static void ffs_data_closed(struct ffs_data *ffs)
|
||||
{
|
||||
struct ffs_epfile *epfiles;
|
||||
unsigned long flags;
|
||||
|
||||
ENTER();
|
||||
|
||||
if (atomic_dec_and_test(&ffs->opened)) {
|
||||
if (ffs->no_disconnect) {
|
||||
ffs->state = FFS_DEACTIVATED;
|
||||
if (ffs->epfiles) {
|
||||
ffs_epfiles_destroy(ffs->epfiles,
|
||||
ffs->eps_count);
|
||||
ffs->epfiles = NULL;
|
||||
}
|
||||
spin_lock_irqsave(&ffs->eps_lock, flags);
|
||||
epfiles = ffs->epfiles;
|
||||
ffs->epfiles = NULL;
|
||||
spin_unlock_irqrestore(&ffs->eps_lock,
|
||||
flags);
|
||||
|
||||
if (epfiles)
|
||||
ffs_epfiles_destroy(epfiles,
|
||||
ffs->eps_count);
|
||||
|
||||
if (ffs->setup_state == FFS_SETUP_PENDING)
|
||||
__ffs_ep0_stall(ffs);
|
||||
} else {
|
||||
@@ -1766,14 +1774,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name)
|
||||
|
||||
static void ffs_data_clear(struct ffs_data *ffs)
|
||||
{
|
||||
struct ffs_epfile *epfiles;
|
||||
unsigned long flags;
|
||||
|
||||
ENTER();
|
||||
|
||||
ffs_closed(ffs);
|
||||
|
||||
BUG_ON(ffs->gadget);
|
||||
|
||||
if (ffs->epfiles) {
|
||||
ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
|
||||
spin_lock_irqsave(&ffs->eps_lock, flags);
|
||||
epfiles = ffs->epfiles;
|
||||
ffs->epfiles = NULL;
|
||||
spin_unlock_irqrestore(&ffs->eps_lock, flags);
|
||||
|
||||
/*
|
||||
* potential race possible between ffs_func_eps_disable
|
||||
* & ffs_epfile_release therefore maintaining a local
|
||||
* copy of epfile will save us from use-after-free.
|
||||
*/
|
||||
if (epfiles) {
|
||||
ffs_epfiles_destroy(epfiles, ffs->eps_count);
|
||||
ffs->epfiles = NULL;
|
||||
}
|
||||
|
||||
@@ -1921,12 +1942,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
|
||||
|
||||
static void ffs_func_eps_disable(struct ffs_function *func)
|
||||
{
|
||||
struct ffs_ep *ep = func->eps;
|
||||
struct ffs_epfile *epfile = func->ffs->epfiles;
|
||||
unsigned count = func->ffs->eps_count;
|
||||
struct ffs_ep *ep;
|
||||
struct ffs_epfile *epfile;
|
||||
unsigned short count;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&func->ffs->eps_lock, flags);
|
||||
count = func->ffs->eps_count;
|
||||
epfile = func->ffs->epfiles;
|
||||
ep = func->eps;
|
||||
while (count--) {
|
||||
/* pending requests get nuked */
|
||||
if (likely(ep->ep))
|
||||
@@ -1944,14 +1968,18 @@ static void ffs_func_eps_disable(struct ffs_function *func)
|
||||
|
||||
static int ffs_func_eps_enable(struct ffs_function *func)
|
||||
{
|
||||
struct ffs_data *ffs = func->ffs;
|
||||
struct ffs_ep *ep = func->eps;
|
||||
struct ffs_epfile *epfile = ffs->epfiles;
|
||||
unsigned count = ffs->eps_count;
|
||||
struct ffs_data *ffs;
|
||||
struct ffs_ep *ep;
|
||||
struct ffs_epfile *epfile;
|
||||
unsigned short count;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&func->ffs->eps_lock, flags);
|
||||
ffs = func->ffs;
|
||||
ep = func->eps;
|
||||
epfile = ffs->epfiles;
|
||||
count = ffs->eps_count;
|
||||
while(count--) {
|
||||
ep->ep->driver_data = ep;
|
||||
|
||||
|
@@ -737,7 +737,7 @@ hitted:
|
||||
retry:
|
||||
err = z_erofs_attach_page(clt, page, page_type,
|
||||
clt->mode >= COLLECT_PRIMARY_FOLLOWED);
|
||||
/* should allocate an additional short-lived page for pagevec */
|
||||
/* should allocate an additional staging page for pagevec */
|
||||
if (err == -EAGAIN) {
|
||||
struct page *const newpage =
|
||||
alloc_page(GFP_NOFS | __GFP_NOFAIL);
|
||||
|
@@ -73,7 +73,6 @@
|
||||
#include "internal.h"
|
||||
|
||||
#include <trace/events/sched.h>
|
||||
#include <trace/hooks/sched.h>
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(task_rename);
|
||||
|
||||
@@ -1230,7 +1229,6 @@ void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
|
||||
strlcpy(tsk->comm, buf, sizeof(tsk->comm));
|
||||
task_unlock(tsk);
|
||||
perf_event_comm(tsk, exec);
|
||||
trace_android_vh_set_task_comm(tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -350,13 +350,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
|
||||
goto skip_write;
|
||||
|
||||
/* if locked failed, cp will flush dirty pages instead */
|
||||
if (!down_write_trylock(&sbi->cp_global_sem))
|
||||
if (!f2fs_down_write_trylock(&sbi->cp_global_sem))
|
||||
goto skip_write;
|
||||
|
||||
trace_f2fs_writepages(mapping->host, wbc, META);
|
||||
diff = nr_pages_to_write(sbi, META, wbc);
|
||||
written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
|
||||
up_write(&sbi->cp_global_sem);
|
||||
f2fs_up_write(&sbi->cp_global_sem);
|
||||
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
|
||||
return 0;
|
||||
|
||||
@@ -650,7 +650,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
|
||||
/* truncate all the data during iput */
|
||||
iput(inode);
|
||||
|
||||
err = f2fs_get_node_info(sbi, ino, &ni);
|
||||
err = f2fs_get_node_info(sbi, ino, &ni, false);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
@@ -1148,7 +1148,7 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
|
||||
if (!is_journalled_quota(sbi))
|
||||
return false;
|
||||
|
||||
if (!down_write_trylock(&sbi->quota_sem))
|
||||
if (!f2fs_down_write_trylock(&sbi->quota_sem))
|
||||
return true;
|
||||
if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
|
||||
ret = false;
|
||||
@@ -1160,7 +1160,7 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
|
||||
} else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
|
||||
ret = true;
|
||||
}
|
||||
up_write(&sbi->quota_sem);
|
||||
f2fs_up_write(&sbi->quota_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1217,10 +1217,10 @@ retry_flush_dents:
|
||||
* POR: we should ensure that there are no dirty node pages
|
||||
* until finishing nat/sit flush. inode->i_blocks can be updated.
|
||||
*/
|
||||
down_write(&sbi->node_change);
|
||||
f2fs_down_write(&sbi->node_change);
|
||||
|
||||
if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
|
||||
up_write(&sbi->node_change);
|
||||
f2fs_up_write(&sbi->node_change);
|
||||
f2fs_unlock_all(sbi);
|
||||
err = f2fs_sync_inode_meta(sbi);
|
||||
if (err)
|
||||
@@ -1230,15 +1230,15 @@ retry_flush_dents:
|
||||
}
|
||||
|
||||
retry_flush_nodes:
|
||||
down_write(&sbi->node_write);
|
||||
f2fs_down_write(&sbi->node_write);
|
||||
|
||||
if (get_pages(sbi, F2FS_DIRTY_NODES)) {
|
||||
up_write(&sbi->node_write);
|
||||
f2fs_up_write(&sbi->node_write);
|
||||
atomic_inc(&sbi->wb_sync_req[NODE]);
|
||||
err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
|
||||
atomic_dec(&sbi->wb_sync_req[NODE]);
|
||||
if (err) {
|
||||
up_write(&sbi->node_change);
|
||||
f2fs_up_write(&sbi->node_change);
|
||||
f2fs_unlock_all(sbi);
|
||||
return err;
|
||||
}
|
||||
@@ -1251,13 +1251,13 @@ retry_flush_nodes:
|
||||
* dirty node blocks and some checkpoint values by block allocation.
|
||||
*/
|
||||
__prepare_cp_block(sbi);
|
||||
up_write(&sbi->node_change);
|
||||
f2fs_up_write(&sbi->node_change);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void unblock_operations(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
up_write(&sbi->node_write);
|
||||
f2fs_up_write(&sbi->node_write);
|
||||
f2fs_unlock_all(sbi);
|
||||
}
|
||||
|
||||
@@ -1592,7 +1592,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
f2fs_warn(sbi, "Start checkpoint disabled!");
|
||||
}
|
||||
if (cpc->reason != CP_RESIZE)
|
||||
down_write(&sbi->cp_global_sem);
|
||||
f2fs_down_write(&sbi->cp_global_sem);
|
||||
|
||||
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
|
||||
((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
|
||||
@@ -1667,7 +1667,7 @@ stop:
|
||||
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
|
||||
out:
|
||||
if (cpc->reason != CP_RESIZE)
|
||||
up_write(&sbi->cp_global_sem);
|
||||
f2fs_up_write(&sbi->cp_global_sem);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1715,9 +1715,9 @@ static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
|
||||
struct cp_control cpc = { .reason = CP_SYNC, };
|
||||
int err;
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
err = f2fs_write_checkpoint(sbi, &cpc);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -1805,9 +1805,9 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
|
||||
if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) {
|
||||
int ret;
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
ret = f2fs_write_checkpoint(sbi, &cpc);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -1203,7 +1203,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
|
||||
* checkpoint. This can only happen to quota writes which can cause
|
||||
* the below discard race condition.
|
||||
*/
|
||||
down_read(&sbi->node_write);
|
||||
f2fs_down_read(&sbi->node_write);
|
||||
} else if (!f2fs_trylock_op(sbi)) {
|
||||
goto out_free;
|
||||
}
|
||||
@@ -1222,7 +1222,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
|
||||
|
||||
psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
|
||||
|
||||
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
|
||||
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
|
||||
if (err)
|
||||
goto out_put_dnode;
|
||||
|
||||
@@ -1320,7 +1320,7 @@ unlock_continue:
|
||||
|
||||
f2fs_put_dnode(&dn);
|
||||
if (IS_NOQUOTA(inode))
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
else
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
@@ -1346,7 +1346,7 @@ out_put_dnode:
|
||||
f2fs_put_dnode(&dn);
|
||||
out_unlock_op:
|
||||
if (IS_NOQUOTA(inode))
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
else
|
||||
f2fs_unlock_op(sbi);
|
||||
out_free:
|
||||
|
@@ -593,7 +593,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
|
||||
enum page_type btype = PAGE_TYPE_OF_BIO(type);
|
||||
struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
|
||||
|
||||
down_write(&io->io_rwsem);
|
||||
f2fs_down_write(&io->io_rwsem);
|
||||
|
||||
/* change META to META_FLUSH in the checkpoint procedure */
|
||||
if (type >= META_FLUSH) {
|
||||
@@ -604,7 +604,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
|
||||
io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
|
||||
}
|
||||
__submit_merged_bio(io);
|
||||
up_write(&io->io_rwsem);
|
||||
f2fs_up_write(&io->io_rwsem);
|
||||
}
|
||||
|
||||
static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
|
||||
@@ -619,9 +619,9 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
|
||||
enum page_type btype = PAGE_TYPE_OF_BIO(type);
|
||||
struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
|
||||
|
||||
down_read(&io->io_rwsem);
|
||||
f2fs_down_read(&io->io_rwsem);
|
||||
ret = __has_merged_page(io->bio, inode, page, ino);
|
||||
up_read(&io->io_rwsem);
|
||||
f2fs_up_read(&io->io_rwsem);
|
||||
}
|
||||
if (ret)
|
||||
__f2fs_submit_merged_write(sbi, type, temp);
|
||||
@@ -745,9 +745,9 @@ static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
|
||||
if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
|
||||
f2fs_bug_on(sbi, 1);
|
||||
|
||||
down_write(&io->bio_list_lock);
|
||||
f2fs_down_write(&io->bio_list_lock);
|
||||
list_add_tail(&be->list, &io->bio_list);
|
||||
up_write(&io->bio_list_lock);
|
||||
f2fs_up_write(&io->bio_list_lock);
|
||||
}
|
||||
|
||||
static void del_bio_entry(struct bio_entry *be)
|
||||
@@ -769,7 +769,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
|
||||
struct list_head *head = &io->bio_list;
|
||||
struct bio_entry *be;
|
||||
|
||||
down_write(&io->bio_list_lock);
|
||||
f2fs_down_write(&io->bio_list_lock);
|
||||
list_for_each_entry(be, head, list) {
|
||||
if (be->bio != *bio)
|
||||
continue;
|
||||
@@ -793,7 +793,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
|
||||
__submit_bio(sbi, *bio, DATA);
|
||||
break;
|
||||
}
|
||||
up_write(&io->bio_list_lock);
|
||||
f2fs_up_write(&io->bio_list_lock);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
@@ -819,7 +819,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
|
||||
if (list_empty(head))
|
||||
continue;
|
||||
|
||||
down_read(&io->bio_list_lock);
|
||||
f2fs_down_read(&io->bio_list_lock);
|
||||
list_for_each_entry(be, head, list) {
|
||||
if (target)
|
||||
found = (target == be->bio);
|
||||
@@ -829,14 +829,14 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
up_read(&io->bio_list_lock);
|
||||
f2fs_up_read(&io->bio_list_lock);
|
||||
|
||||
if (!found)
|
||||
continue;
|
||||
|
||||
found = false;
|
||||
|
||||
down_write(&io->bio_list_lock);
|
||||
f2fs_down_write(&io->bio_list_lock);
|
||||
list_for_each_entry(be, head, list) {
|
||||
if (target)
|
||||
found = (target == be->bio);
|
||||
@@ -849,7 +849,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_write(&io->bio_list_lock);
|
||||
f2fs_up_write(&io->bio_list_lock);
|
||||
}
|
||||
|
||||
if (found)
|
||||
@@ -909,7 +909,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
|
||||
|
||||
f2fs_bug_on(sbi, is_read_io(fio->op));
|
||||
|
||||
down_write(&io->io_rwsem);
|
||||
f2fs_down_write(&io->io_rwsem);
|
||||
next:
|
||||
if (fio->in_list) {
|
||||
spin_lock(&io->io_lock);
|
||||
@@ -976,7 +976,7 @@ out:
|
||||
if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
|
||||
!f2fs_is_checkpoint_ready(sbi))
|
||||
__submit_merged_bio(io);
|
||||
up_write(&io->io_rwsem);
|
||||
f2fs_up_write(&io->io_rwsem);
|
||||
}
|
||||
|
||||
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
|
||||
@@ -1356,7 +1356,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
|
||||
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
|
||||
return -EPERM;
|
||||
|
||||
err = f2fs_get_node_info(sbi, dn->nid, &ni);
|
||||
err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -1437,9 +1437,9 @@ void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
|
||||
{
|
||||
if (flag == F2FS_GET_BLOCK_PRE_AIO) {
|
||||
if (lock)
|
||||
down_read(&sbi->node_change);
|
||||
f2fs_down_read(&sbi->node_change);
|
||||
else
|
||||
up_read(&sbi->node_change);
|
||||
f2fs_up_read(&sbi->node_change);
|
||||
} else {
|
||||
if (lock)
|
||||
f2fs_lock_op(sbi);
|
||||
@@ -1791,7 +1791,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
|
||||
err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
|
||||
if (err) {
|
||||
f2fs_put_page(page, 1);
|
||||
return err;
|
||||
@@ -1823,7 +1823,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
err = f2fs_get_node_info(sbi, xnid, &ni);
|
||||
err = f2fs_get_node_info(sbi, xnid, &ni, false);
|
||||
if (err) {
|
||||
f2fs_put_page(page, 1);
|
||||
return err;
|
||||
@@ -2655,7 +2655,7 @@ got_it:
|
||||
fio->need_lock = LOCK_REQ;
|
||||
}
|
||||
|
||||
err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
|
||||
err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
|
||||
if (err)
|
||||
goto out_writepage;
|
||||
|
||||
@@ -2768,13 +2768,13 @@ write:
|
||||
* the below discard race condition.
|
||||
*/
|
||||
if (IS_NOQUOTA(inode))
|
||||
down_read(&sbi->node_write);
|
||||
f2fs_down_read(&sbi->node_write);
|
||||
|
||||
fio.need_lock = LOCK_DONE;
|
||||
err = f2fs_do_write_data_page(&fio);
|
||||
|
||||
if (IS_NOQUOTA(inode))
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
|
||||
goto done;
|
||||
}
|
||||
@@ -3232,14 +3232,14 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
||||
/* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
|
||||
if (to > i_size && !f2fs_verity_in_progress(inode)) {
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
truncate_pagecache(inode, i_size);
|
||||
f2fs_truncate_blocks(inode, i_size, true);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3646,21 +3646,21 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
iocb->ki_hint = WRITE_LIFE_NOT_SET;
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
|
||||
if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[rw])) {
|
||||
iocb->ki_hint = hint;
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
|
||||
up_read(&fi->i_gc_rwsem[rw]);
|
||||
if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
|
||||
f2fs_up_read(&fi->i_gc_rwsem[rw]);
|
||||
iocb->ki_hint = hint;
|
||||
err = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
down_read(&fi->i_gc_rwsem[rw]);
|
||||
f2fs_down_read(&fi->i_gc_rwsem[rw]);
|
||||
if (do_opu)
|
||||
down_read(&fi->i_gc_rwsem[READ]);
|
||||
f2fs_down_read(&fi->i_gc_rwsem[READ]);
|
||||
}
|
||||
|
||||
err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
|
||||
@@ -3670,9 +3670,9 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
||||
DIO_SKIP_HOLES);
|
||||
|
||||
if (do_opu)
|
||||
up_read(&fi->i_gc_rwsem[READ]);
|
||||
f2fs_up_read(&fi->i_gc_rwsem[READ]);
|
||||
|
||||
up_read(&fi->i_gc_rwsem[rw]);
|
||||
f2fs_up_read(&fi->i_gc_rwsem[rw]);
|
||||
|
||||
if (rw == WRITE) {
|
||||
if (whint_mode == WHINT_MODE_OFF)
|
||||
@@ -3944,13 +3944,13 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
|
||||
unsigned int end_sec = secidx + blkcnt / blk_per_sec;
|
||||
int ret = 0;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
set_inode_flag(inode, FI_ALIGNED_WRITE);
|
||||
|
||||
for (; secidx < end_sec; secidx++) {
|
||||
down_write(&sbi->pin_sem);
|
||||
f2fs_down_write(&sbi->pin_sem);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
|
||||
@@ -3964,7 +3964,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
|
||||
|
||||
page = f2fs_get_lock_data_page(inode, blkidx, true);
|
||||
if (IS_ERR(page)) {
|
||||
up_write(&sbi->pin_sem);
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
ret = PTR_ERR(page);
|
||||
goto done;
|
||||
}
|
||||
@@ -3977,7 +3977,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
|
||||
|
||||
ret = filemap_fdatawrite(inode->i_mapping);
|
||||
|
||||
up_write(&sbi->pin_sem);
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
@@ -3987,8 +3987,8 @@ done:
|
||||
clear_inode_flag(inode, FI_DO_DEFRAG);
|
||||
clear_inode_flag(inode, FI_ALIGNED_WRITE);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -768,7 +768,7 @@ add_dentry:
|
||||
f2fs_wait_on_page_writeback(dentry_page, DATA, true, true);
|
||||
|
||||
if (inode) {
|
||||
down_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
|
||||
if (IS_ERR(page)) {
|
||||
err = PTR_ERR(page);
|
||||
@@ -795,7 +795,7 @@ add_dentry:
|
||||
f2fs_update_parent_metadata(dir, inode, current_depth);
|
||||
fail:
|
||||
if (inode)
|
||||
up_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
|
||||
f2fs_put_page(dentry_page, 1);
|
||||
|
||||
@@ -860,7 +860,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
|
||||
struct page *page;
|
||||
int err = 0;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
page = f2fs_init_inode_metadata(inode, dir, NULL, NULL);
|
||||
if (IS_ERR(page)) {
|
||||
err = PTR_ERR(page);
|
||||
@@ -871,7 +871,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
|
||||
clear_inode_flag(inode, FI_NEW_INODE);
|
||||
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
|
||||
fail:
|
||||
up_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -879,7 +879,7 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
|
||||
|
||||
down_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
f2fs_i_links_write(dir, false);
|
||||
@@ -890,7 +890,7 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
|
||||
f2fs_i_links_write(inode, false);
|
||||
f2fs_i_size_write(inode, 0);
|
||||
}
|
||||
up_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
|
||||
if (inode->i_nlink == 0)
|
||||
f2fs_add_orphan_inode(inode);
|
||||
|
114
fs/f2fs/f2fs.h
114
fs/f2fs/f2fs.h
@@ -117,6 +117,18 @@ typedef u32 nid_t;
|
||||
|
||||
#define COMPRESS_EXT_NUM 16
|
||||
|
||||
/*
|
||||
* An implementation of an rwsem that is explicitly unfair to readers. This
|
||||
* prevents priority inversion when a low-priority reader acquires the read lock
|
||||
* while sleeping on the write lock but the write lock is needed by
|
||||
* higher-priority clients.
|
||||
*/
|
||||
|
||||
struct f2fs_rwsem {
|
||||
struct rw_semaphore internal_rwsem;
|
||||
wait_queue_head_t read_waiters;
|
||||
};
|
||||
|
||||
struct f2fs_mount_info {
|
||||
unsigned int opt;
|
||||
int write_io_size_bits; /* Write IO size bits */
|
||||
@@ -726,7 +738,7 @@ struct f2fs_inode_info {
|
||||
|
||||
/* Use below internally in f2fs*/
|
||||
unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
|
||||
struct rw_semaphore i_sem; /* protect fi info */
|
||||
struct f2fs_rwsem i_sem; /* protect fi info */
|
||||
atomic_t dirty_pages; /* # of dirty pages */
|
||||
f2fs_hash_t chash; /* hash value of given file name */
|
||||
unsigned int clevel; /* maximum level of given file name */
|
||||
@@ -751,9 +763,9 @@ struct f2fs_inode_info {
|
||||
struct extent_tree *extent_tree; /* cached extent_tree entry */
|
||||
|
||||
/* avoid racing between foreground op and gc */
|
||||
struct rw_semaphore i_gc_rwsem[2];
|
||||
struct rw_semaphore i_mmap_sem;
|
||||
struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
|
||||
struct f2fs_rwsem i_gc_rwsem[2];
|
||||
struct f2fs_rwsem i_mmap_sem;
|
||||
struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */
|
||||
|
||||
int i_extra_isize; /* size of extra space located in i_addr */
|
||||
kprojid_t i_projid; /* id for project quota */
|
||||
@@ -870,7 +882,7 @@ struct f2fs_nm_info {
|
||||
/* NAT cache management */
|
||||
struct radix_tree_root nat_root;/* root of the nat entry cache */
|
||||
struct radix_tree_root nat_set_root;/* root of the nat set cache */
|
||||
struct rw_semaphore nat_tree_lock; /* protect nat entry tree */
|
||||
struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */
|
||||
struct list_head nat_entries; /* cached nat entry list (clean) */
|
||||
spinlock_t nat_list_lock; /* protect clean nat entry list */
|
||||
unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
|
||||
@@ -983,7 +995,7 @@ struct f2fs_sm_info {
|
||||
struct dirty_seglist_info *dirty_info; /* dirty segment information */
|
||||
struct curseg_info *curseg_array; /* active segment information */
|
||||
|
||||
struct rw_semaphore curseg_lock; /* for preventing curseg change */
|
||||
struct f2fs_rwsem curseg_lock; /* for preventing curseg change */
|
||||
|
||||
block_t seg0_blkaddr; /* block address of 0'th segment */
|
||||
block_t main_blkaddr; /* start block address of main area */
|
||||
@@ -1167,11 +1179,11 @@ struct f2fs_bio_info {
|
||||
struct bio *bio; /* bios to merge */
|
||||
sector_t last_block_in_bio; /* last block number */
|
||||
struct f2fs_io_info fio; /* store buffered io info. */
|
||||
struct rw_semaphore io_rwsem; /* blocking op for bio */
|
||||
struct f2fs_rwsem io_rwsem; /* blocking op for bio */
|
||||
spinlock_t io_lock; /* serialize DATA/NODE IOs */
|
||||
struct list_head io_list; /* track fios */
|
||||
struct list_head bio_list; /* bio entry list head */
|
||||
struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */
|
||||
struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */
|
||||
};
|
||||
|
||||
#define FDEV(i) (sbi->devs[i])
|
||||
@@ -1528,7 +1540,7 @@ struct f2fs_sb_info {
|
||||
struct super_block *sb; /* pointer to VFS super block */
|
||||
struct proc_dir_entry *s_proc; /* proc entry */
|
||||
struct f2fs_super_block *raw_super; /* raw super block pointer */
|
||||
struct rw_semaphore sb_lock; /* lock for raw super block */
|
||||
struct f2fs_rwsem sb_lock; /* lock for raw super block */
|
||||
int valid_super_block; /* valid super block no */
|
||||
unsigned long s_flag; /* flags for sbi */
|
||||
struct mutex writepages; /* mutex for writepages() */
|
||||
@@ -1548,7 +1560,7 @@ struct f2fs_sb_info {
|
||||
/* for bio operations */
|
||||
struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
|
||||
/* keep migration IO order for LFS mode */
|
||||
struct rw_semaphore io_order_lock;
|
||||
struct f2fs_rwsem io_order_lock;
|
||||
mempool_t *write_io_dummy; /* Dummy pages */
|
||||
|
||||
/* for checkpoint */
|
||||
@@ -1556,10 +1568,10 @@ struct f2fs_sb_info {
|
||||
int cur_cp_pack; /* remain current cp pack */
|
||||
spinlock_t cp_lock; /* for flag in ckpt */
|
||||
struct inode *meta_inode; /* cache meta blocks */
|
||||
struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */
|
||||
struct rw_semaphore cp_rwsem; /* blocking FS operations */
|
||||
struct rw_semaphore node_write; /* locking node writes */
|
||||
struct rw_semaphore node_change; /* locking node change */
|
||||
struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */
|
||||
struct f2fs_rwsem cp_rwsem; /* blocking FS operations */
|
||||
struct f2fs_rwsem node_write; /* locking node writes */
|
||||
struct f2fs_rwsem node_change; /* locking node change */
|
||||
wait_queue_head_t cp_wait;
|
||||
unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
|
||||
long interval_time[MAX_TIME]; /* to store thresholds */
|
||||
@@ -1619,7 +1631,7 @@ struct f2fs_sb_info {
|
||||
block_t unusable_block_count; /* # of blocks saved by last cp */
|
||||
|
||||
unsigned int nquota_files; /* # of quota sysfile */
|
||||
struct rw_semaphore quota_sem; /* blocking cp for flags */
|
||||
struct f2fs_rwsem quota_sem; /* blocking cp for flags */
|
||||
|
||||
/* # of pages, see count_type */
|
||||
atomic_t nr_pages[NR_COUNT_TYPE];
|
||||
@@ -1635,7 +1647,7 @@ struct f2fs_sb_info {
|
||||
struct f2fs_mount_info mount_opt; /* mount options */
|
||||
|
||||
/* for cleaning operations */
|
||||
struct rw_semaphore gc_lock; /*
|
||||
struct f2fs_rwsem gc_lock; /*
|
||||
* semaphore for GC, avoid
|
||||
* race between GC and GC or CP
|
||||
*/
|
||||
@@ -1652,7 +1664,7 @@ struct f2fs_sb_info {
|
||||
|
||||
/* threshold for gc trials on pinned files */
|
||||
u64 gc_pin_file_threshold;
|
||||
struct rw_semaphore pin_sem;
|
||||
struct f2fs_rwsem pin_sem;
|
||||
|
||||
/* maximum # of trials to find a victim segment for SSR and GC */
|
||||
unsigned int max_victim_search;
|
||||
@@ -2069,29 +2081,85 @@ static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
|
||||
return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
|
||||
}
|
||||
|
||||
static inline void init_f2fs_rwsem(struct f2fs_rwsem *sem)
|
||||
{
|
||||
init_rwsem(&sem->internal_rwsem);
|
||||
init_waitqueue_head(&sem->read_waiters);
|
||||
}
|
||||
|
||||
static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem)
|
||||
{
|
||||
return rwsem_is_locked(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem)
|
||||
{
|
||||
return rwsem_is_contended(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_down_read(struct f2fs_rwsem *sem)
|
||||
{
|
||||
wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem));
|
||||
}
|
||||
|
||||
static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
|
||||
{
|
||||
return down_read_trylock(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
|
||||
{
|
||||
down_read_nested(&sem->internal_rwsem, subclass);
|
||||
}
|
||||
#else
|
||||
#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
|
||||
#endif
|
||||
|
||||
static inline void f2fs_up_read(struct f2fs_rwsem *sem)
|
||||
{
|
||||
up_read(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_down_write(struct f2fs_rwsem *sem)
|
||||
{
|
||||
down_write(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
|
||||
{
|
||||
return down_write_trylock(&sem->internal_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_up_write(struct f2fs_rwsem *sem)
|
||||
{
|
||||
up_write(&sem->internal_rwsem);
|
||||
wake_up_all(&sem->read_waiters);
|
||||
}
|
||||
|
||||
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
down_read(&sbi->cp_rwsem);
|
||||
f2fs_down_read(&sbi->cp_rwsem);
|
||||
}
|
||||
|
||||
static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
return down_read_trylock(&sbi->cp_rwsem);
|
||||
return f2fs_down_read_trylock(&sbi->cp_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
up_read(&sbi->cp_rwsem);
|
||||
f2fs_up_read(&sbi->cp_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
down_write(&sbi->cp_rwsem);
|
||||
f2fs_down_write(&sbi->cp_rwsem);
|
||||
}
|
||||
|
||||
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
up_write(&sbi->cp_rwsem);
|
||||
f2fs_up_write(&sbi->cp_rwsem);
|
||||
}
|
||||
|
||||
static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
|
||||
@@ -3421,7 +3489,7 @@ int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
|
||||
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
|
||||
bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
|
||||
int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
struct node_info *ni);
|
||||
struct node_info *ni, bool checkpoint_context);
|
||||
pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
|
||||
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
|
||||
int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
|
||||
|
152
fs/f2fs/file.c
152
fs/f2fs/file.c
@@ -37,9 +37,9 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
|
||||
struct inode *inode = file_inode(vmf->vma->vm_file);
|
||||
vm_fault_t ret;
|
||||
|
||||
down_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
ret = filemap_fault(vmf);
|
||||
up_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
if (!ret)
|
||||
f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
|
||||
@@ -100,7 +100,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
||||
f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
|
||||
|
||||
file_update_time(vmf->vma->vm_file);
|
||||
down_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
lock_page(page);
|
||||
if (unlikely(page->mapping != inode->i_mapping ||
|
||||
page_offset(page) > i_size_read(inode) ||
|
||||
@@ -158,7 +158,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
||||
|
||||
trace_f2fs_vm_page_mkwrite(page, DATA);
|
||||
out_sem:
|
||||
up_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
sb_end_pagefault(inode->i_sb);
|
||||
err:
|
||||
@@ -239,13 +239,13 @@ static void try_to_fix_pino(struct inode *inode)
|
||||
struct f2fs_inode_info *fi = F2FS_I(inode);
|
||||
nid_t pino;
|
||||
|
||||
down_write(&fi->i_sem);
|
||||
f2fs_down_write(&fi->i_sem);
|
||||
if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
|
||||
get_parent_ino(inode, &pino)) {
|
||||
f2fs_i_pino_write(inode, pino);
|
||||
file_got_pino(inode);
|
||||
}
|
||||
up_write(&fi->i_sem);
|
||||
f2fs_up_write(&fi->i_sem);
|
||||
}
|
||||
|
||||
static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
|
||||
@@ -308,9 +308,9 @@ go_write:
|
||||
* Both of fdatasync() and fsync() are able to be recovered from
|
||||
* sudden-power-off.
|
||||
*/
|
||||
down_read(&F2FS_I(inode)->i_sem);
|
||||
f2fs_down_read(&F2FS_I(inode)->i_sem);
|
||||
cp_reason = need_do_checkpoint(inode);
|
||||
up_read(&F2FS_I(inode)->i_sem);
|
||||
f2fs_up_read(&F2FS_I(inode)->i_sem);
|
||||
|
||||
if (cp_reason) {
|
||||
/* all the dirty node pages should be flushed for POR */
|
||||
@@ -938,8 +938,8 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
return err;
|
||||
}
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
truncate_setsize(inode, attr->ia_size);
|
||||
|
||||
@@ -949,8 +949,8 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
* do not trim all blocks after i_size if target size is
|
||||
* larger than i_size.
|
||||
*/
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -1090,8 +1090,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||
blk_start = (loff_t)pg_start << PAGE_SHIFT;
|
||||
blk_end = (loff_t)pg_end << PAGE_SHIFT;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
truncate_pagecache_range(inode, blk_start, blk_end - 1);
|
||||
|
||||
@@ -1099,8 +1099,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
|
||||
ret = f2fs_truncate_hole(inode, pg_start, pg_end);
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1211,7 +1211,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = f2fs_get_node_info(sbi, dn.nid, &ni);
|
||||
ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
|
||||
if (ret) {
|
||||
f2fs_put_dnode(&dn);
|
||||
return ret;
|
||||
@@ -1333,8 +1333,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
/* avoid gc operation during block exchange */
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
f2fs_drop_extent_tree(inode);
|
||||
@@ -1342,8 +1342,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
|
||||
ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1373,13 +1373,13 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
|
||||
return ret;
|
||||
|
||||
/* write out all moved pages, if possible */
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
|
||||
truncate_pagecache(inode, offset);
|
||||
|
||||
new_size = i_size_read(inode) - len;
|
||||
ret = f2fs_truncate_blocks(inode, new_size, true);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
if (!ret)
|
||||
f2fs_i_size_write(inode, new_size);
|
||||
return ret;
|
||||
@@ -1478,8 +1478,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
|
||||
unsigned int end_offset;
|
||||
pgoff_t end;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
truncate_pagecache_range(inode,
|
||||
(loff_t)index << PAGE_SHIFT,
|
||||
@@ -1491,8 +1491,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
|
||||
ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
|
||||
if (ret) {
|
||||
f2fs_unlock_op(sbi);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -1503,8 +1503,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
|
||||
f2fs_put_dnode(&dn);
|
||||
|
||||
f2fs_unlock_op(sbi);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
f2fs_balance_fs(sbi, dn.node_changed);
|
||||
|
||||
@@ -1560,9 +1560,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
||||
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1577,8 +1577,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
||||
idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
|
||||
/* avoid gc operation during block exchange */
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
truncate_pagecache(inode, offset);
|
||||
|
||||
while (!ret && idx > pg_start) {
|
||||
@@ -1594,14 +1594,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
||||
idx + delta, nr, false);
|
||||
f2fs_unlock_op(sbi);
|
||||
}
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
/* write out all moved pages, if possible */
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
|
||||
truncate_pagecache(inode, offset);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
if (!ret)
|
||||
f2fs_i_size_write(inode, new_size);
|
||||
@@ -1651,13 +1651,13 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
|
||||
next_alloc:
|
||||
if (has_not_enough_free_secs(sbi, 0,
|
||||
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
|
||||
if (err && err != -ENODATA && err != -EAGAIN)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
down_write(&sbi->pin_sem);
|
||||
f2fs_down_write(&sbi->pin_sem);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
|
||||
@@ -1666,7 +1666,7 @@ next_alloc:
|
||||
map.m_seg_type = CURSEG_COLD_DATA_PINNED;
|
||||
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
|
||||
|
||||
up_write(&sbi->pin_sem);
|
||||
f2fs_up_write(&sbi->pin_sem);
|
||||
|
||||
expanded += map.m_len;
|
||||
sec_len -= map.m_len;
|
||||
@@ -2050,7 +2050,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
/*
|
||||
* Should wait end_io to count F2FS_WB_CP_DATA correctly by
|
||||
@@ -2061,7 +2061,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
|
||||
inode->i_ino, get_dirty_pages(inode));
|
||||
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
|
||||
if (ret) {
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@@ -2074,7 +2074,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
|
||||
/* add inode in inmem_list first and set atomic_file */
|
||||
set_inode_flag(inode, FI_ATOMIC_FILE);
|
||||
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
|
||||
F2FS_I(inode)->inmem_task = current;
|
||||
@@ -2381,7 +2381,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
down_write(&sbi->sb_lock);
|
||||
f2fs_down_write(&sbi->sb_lock);
|
||||
|
||||
if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
|
||||
goto got_it;
|
||||
@@ -2400,7 +2400,7 @@ got_it:
|
||||
16))
|
||||
err = -EFAULT;
|
||||
out_err:
|
||||
up_write(&sbi->sb_lock);
|
||||
f2fs_up_write(&sbi->sb_lock);
|
||||
mnt_drop_write_file(filp);
|
||||
return err;
|
||||
}
|
||||
@@ -2477,12 +2477,12 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
|
||||
return ret;
|
||||
|
||||
if (!sync) {
|
||||
if (!down_write_trylock(&sbi->gc_lock)) {
|
||||
if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
}
|
||||
|
||||
ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
|
||||
@@ -2513,12 +2513,12 @@ static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
|
||||
|
||||
do_more:
|
||||
if (!range->sync) {
|
||||
if (!down_write_trylock(&sbi->gc_lock)) {
|
||||
if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
}
|
||||
|
||||
ret = f2fs_gc(sbi, range->sync, true, false,
|
||||
@@ -2850,10 +2850,10 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
|
||||
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
|
||||
if (src != dst) {
|
||||
ret = -EBUSY;
|
||||
if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
|
||||
if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
|
||||
goto out_src;
|
||||
}
|
||||
|
||||
@@ -2871,9 +2871,9 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
if (src != dst)
|
||||
up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
|
||||
out_src:
|
||||
up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
|
||||
out_unlock:
|
||||
if (src != dst)
|
||||
inode_unlock(dst);
|
||||
@@ -2968,7 +2968,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
|
||||
end_segno = min(start_segno + range.segments, dev_end_segno);
|
||||
|
||||
while (start_segno < end_segno) {
|
||||
if (!down_write_trylock(&sbi->gc_lock)) {
|
||||
if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
@@ -3314,9 +3314,9 @@ int f2fs_precache_extents(struct inode *inode)
|
||||
while (map.m_lblk < end) {
|
||||
map.m_len = end - map.m_lblk;
|
||||
|
||||
down_write(&fi->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
|
||||
err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
|
||||
up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -3393,11 +3393,11 @@ static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
|
||||
if (!vbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
down_read(&sbi->sb_lock);
|
||||
f2fs_down_read(&sbi->sb_lock);
|
||||
count = utf16s_to_utf8s(sbi->raw_super->volume_name,
|
||||
ARRAY_SIZE(sbi->raw_super->volume_name),
|
||||
UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
|
||||
up_read(&sbi->sb_lock);
|
||||
f2fs_up_read(&sbi->sb_lock);
|
||||
|
||||
if (copy_to_user((char __user *)arg, vbuf,
|
||||
min(FSLABEL_MAX, count)))
|
||||
@@ -3425,7 +3425,7 @@ static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
down_write(&sbi->sb_lock);
|
||||
f2fs_down_write(&sbi->sb_lock);
|
||||
|
||||
memset(sbi->raw_super->volume_name, 0,
|
||||
sizeof(sbi->raw_super->volume_name));
|
||||
@@ -3435,7 +3435,7 @@ static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
|
||||
|
||||
err = f2fs_commit_super(sbi, false);
|
||||
|
||||
up_write(&sbi->sb_lock);
|
||||
f2fs_up_write(&sbi->sb_lock);
|
||||
|
||||
mnt_drop_write_file(filp);
|
||||
out:
|
||||
@@ -3561,8 +3561,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
|
||||
if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
|
||||
goto out;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
|
||||
@@ -3597,8 +3597,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
|
||||
released_blocks += ret;
|
||||
}
|
||||
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
out:
|
||||
inode_unlock(inode);
|
||||
|
||||
@@ -3714,8 +3714,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
||||
goto unlock_inode;
|
||||
}
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
|
||||
|
||||
@@ -3750,8 +3750,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
|
||||
reserved_blocks += ret;
|
||||
}
|
||||
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
|
||||
if (ret >= 0) {
|
||||
clear_inode_flag(inode, FI_COMPRESS_RELEASED);
|
||||
@@ -3869,8 +3869,8 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
|
||||
ret = filemap_write_and_wait_range(mapping, range.start,
|
||||
to_end ? LLONG_MAX : end_addr - 1);
|
||||
@@ -3957,8 +3957,8 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
|
||||
ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
|
||||
prev_block, len, range.flags);
|
||||
out:
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
err:
|
||||
inode_unlock(inode);
|
||||
file_end_write(filp);
|
||||
@@ -4442,11 +4442,11 @@ write:
|
||||
|
||||
/* if we couldn't write data, we should deallocate blocks. */
|
||||
if (preallocated && i_size_read(inode) < target_size) {
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_truncate(inode);
|
||||
up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
}
|
||||
|
||||
if (ret > 0)
|
||||
|
52
fs/f2fs/gc.c
52
fs/f2fs/gc.c
@@ -91,21 +91,21 @@ static int gc_thread_func(void *data)
|
||||
*/
|
||||
if (sbi->gc_mode == GC_URGENT_HIGH) {
|
||||
wait_ms = gc_th->urgent_sleep_time;
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
goto do_gc;
|
||||
}
|
||||
|
||||
if (foreground) {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
goto do_gc;
|
||||
} else if (!down_write_trylock(&sbi->gc_lock)) {
|
||||
} else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
|
||||
stat_other_skip_bggc_count(sbi);
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (!is_idle(sbi, GC_TIME)) {
|
||||
increase_sleep_time(gc_th, &wait_ms);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
stat_io_skip_bggc_count(sbi);
|
||||
goto next;
|
||||
}
|
||||
@@ -941,7 +941,7 @@ next_step:
|
||||
continue;
|
||||
}
|
||||
|
||||
if (f2fs_get_node_info(sbi, nid, &ni)) {
|
||||
if (f2fs_get_node_info(sbi, nid, &ni, false)) {
|
||||
f2fs_put_page(node_page, 1);
|
||||
continue;
|
||||
}
|
||||
@@ -1009,7 +1009,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
if (IS_ERR(node_page))
|
||||
return false;
|
||||
|
||||
if (f2fs_get_node_info(sbi, nid, dni)) {
|
||||
if (f2fs_get_node_info(sbi, nid, dni, false)) {
|
||||
f2fs_put_page(node_page, 1);
|
||||
return false;
|
||||
}
|
||||
@@ -1203,7 +1203,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
|
||||
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
|
||||
|
||||
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
|
||||
err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
|
||||
if (err)
|
||||
goto put_out;
|
||||
|
||||
@@ -1212,7 +1212,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
|
||||
fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
|
||||
|
||||
if (lfs_mode)
|
||||
down_write(&fio.sbi->io_order_lock);
|
||||
f2fs_down_write(&fio.sbi->io_order_lock);
|
||||
|
||||
mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
|
||||
fio.old_blkaddr, false);
|
||||
@@ -1298,7 +1298,7 @@ recover_block:
|
||||
true, true, true);
|
||||
up_out:
|
||||
if (lfs_mode)
|
||||
up_write(&fio.sbi->io_order_lock);
|
||||
f2fs_up_write(&fio.sbi->io_order_lock);
|
||||
put_out:
|
||||
f2fs_put_dnode(&dn);
|
||||
out:
|
||||
@@ -1457,7 +1457,7 @@ next_step:
|
||||
if (IS_ERR(inode) || is_bad_inode(inode))
|
||||
continue;
|
||||
|
||||
if (!down_write_trylock(
|
||||
if (!f2fs_down_write_trylock(
|
||||
&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
|
||||
iput(inode);
|
||||
sbi->skipped_gc_rwsem++;
|
||||
@@ -1470,7 +1470,7 @@ next_step:
|
||||
if (f2fs_post_read_required(inode)) {
|
||||
int err = ra_data_block(inode, start_bidx);
|
||||
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
if (err) {
|
||||
iput(inode);
|
||||
continue;
|
||||
@@ -1481,7 +1481,7 @@ next_step:
|
||||
|
||||
data_page = f2fs_get_read_data_page(inode,
|
||||
start_bidx, REQ_RAHEAD, true);
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
if (IS_ERR(data_page)) {
|
||||
iput(inode);
|
||||
continue;
|
||||
@@ -1500,14 +1500,14 @@ next_step:
|
||||
int err;
|
||||
|
||||
if (S_ISREG(inode->i_mode)) {
|
||||
if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
|
||||
if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
|
||||
sbi->skipped_gc_rwsem++;
|
||||
continue;
|
||||
}
|
||||
if (!down_write_trylock(
|
||||
if (!f2fs_down_write_trylock(
|
||||
&fi->i_gc_rwsem[WRITE])) {
|
||||
sbi->skipped_gc_rwsem++;
|
||||
up_write(&fi->i_gc_rwsem[READ]);
|
||||
f2fs_up_write(&fi->i_gc_rwsem[READ]);
|
||||
continue;
|
||||
}
|
||||
locked = true;
|
||||
@@ -1530,8 +1530,8 @@ next_step:
|
||||
submitted++;
|
||||
|
||||
if (locked) {
|
||||
up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
up_write(&fi->i_gc_rwsem[READ]);
|
||||
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&fi->i_gc_rwsem[READ]);
|
||||
}
|
||||
|
||||
stat_inc_data_blk_count(sbi, 1, gc_type);
|
||||
@@ -1789,7 +1789,7 @@ stop:
|
||||
reserved_segments(sbi),
|
||||
prefree_segments(sbi));
|
||||
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
|
||||
put_gc_inode(&gc_list);
|
||||
|
||||
@@ -1918,7 +1918,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
long long block_count;
|
||||
int segs = secs * sbi->segs_per_sec;
|
||||
|
||||
down_write(&sbi->sb_lock);
|
||||
f2fs_down_write(&sbi->sb_lock);
|
||||
|
||||
section_count = le32_to_cpu(raw_sb->section_count);
|
||||
segment_count = le32_to_cpu(raw_sb->segment_count);
|
||||
@@ -1939,7 +1939,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
cpu_to_le32(dev_segs + segs);
|
||||
}
|
||||
|
||||
up_write(&sbi->sb_lock);
|
||||
f2fs_up_write(&sbi->sb_lock);
|
||||
}
|
||||
|
||||
static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
|
||||
@@ -2013,7 +2013,7 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
|
||||
secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
|
||||
|
||||
/* stop other GC */
|
||||
if (!down_write_trylock(&sbi->gc_lock))
|
||||
if (!f2fs_down_write_trylock(&sbi->gc_lock))
|
||||
return -EAGAIN;
|
||||
|
||||
/* stop CP to protect MAIN_SEC in free_segment_range */
|
||||
@@ -2033,15 +2033,15 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
|
||||
|
||||
out_unlock:
|
||||
f2fs_unlock_op(sbi);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
set_sbi_flag(sbi, SBI_IS_RESIZEFS);
|
||||
|
||||
freeze_super(sbi->sb);
|
||||
down_write(&sbi->gc_lock);
|
||||
down_write(&sbi->cp_global_sem);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->cp_global_sem);
|
||||
|
||||
spin_lock(&sbi->stat_lock);
|
||||
if (shrunk_blocks + valid_user_blocks(sbi) +
|
||||
@@ -2086,8 +2086,8 @@ recover_out:
|
||||
spin_unlock(&sbi->stat_lock);
|
||||
}
|
||||
out_err:
|
||||
up_write(&sbi->cp_global_sem);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->cp_global_sem);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
thaw_super(sbi->sb);
|
||||
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
|
||||
return err;
|
||||
|
@@ -149,7 +149,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
|
||||
err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false);
|
||||
if (err) {
|
||||
f2fs_truncate_data_blocks_range(dn, 1);
|
||||
f2fs_put_dnode(dn);
|
||||
@@ -647,7 +647,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
|
||||
}
|
||||
|
||||
if (inode) {
|
||||
down_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_sem);
|
||||
page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
|
||||
if (IS_ERR(page)) {
|
||||
err = PTR_ERR(page);
|
||||
@@ -676,7 +676,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
|
||||
f2fs_update_parent_metadata(dir, inode, 0);
|
||||
fail:
|
||||
if (inode)
|
||||
up_write(&F2FS_I(inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_sem);
|
||||
out:
|
||||
f2fs_put_page(ipage, 1);
|
||||
return err;
|
||||
@@ -804,7 +804,7 @@ int f2fs_inline_data_fiemap(struct inode *inode,
|
||||
ilen = start + len;
|
||||
ilen -= start;
|
||||
|
||||
err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
|
||||
err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni, false);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@@ -868,7 +868,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
|
||||
* so we can prevent losing this orphan when encoutering checkpoint
|
||||
* and following suddenly power-off.
|
||||
*/
|
||||
err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
|
||||
err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
|
||||
if (err) {
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
|
||||
|
@@ -196,7 +196,7 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
|
||||
__u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list;
|
||||
int i, cold_count, hot_count;
|
||||
|
||||
down_read(&sbi->sb_lock);
|
||||
f2fs_down_read(&sbi->sb_lock);
|
||||
|
||||
cold_count = le32_to_cpu(sbi->raw_super->extension_count);
|
||||
hot_count = sbi->raw_super->hot_ext_count;
|
||||
@@ -206,7 +206,7 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode *
|
||||
break;
|
||||
}
|
||||
|
||||
up_read(&sbi->sb_lock);
|
||||
f2fs_up_read(&sbi->sb_lock);
|
||||
|
||||
if (i == cold_count + hot_count)
|
||||
return;
|
||||
@@ -297,19 +297,19 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode,
|
||||
!f2fs_may_compress(inode))
|
||||
return;
|
||||
|
||||
down_read(&sbi->sb_lock);
|
||||
f2fs_down_read(&sbi->sb_lock);
|
||||
|
||||
cold_count = le32_to_cpu(sbi->raw_super->extension_count);
|
||||
hot_count = sbi->raw_super->hot_ext_count;
|
||||
|
||||
for (i = cold_count; i < cold_count + hot_count; i++) {
|
||||
if (is_extension_exist(name, extlist[i], false)) {
|
||||
up_read(&sbi->sb_lock);
|
||||
f2fs_up_read(&sbi->sb_lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
up_read(&sbi->sb_lock);
|
||||
f2fs_up_read(&sbi->sb_lock);
|
||||
|
||||
ext = F2FS_OPTION(sbi).extensions;
|
||||
|
||||
@@ -1012,11 +1012,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
new_page = NULL;
|
||||
|
||||
new_inode->i_ctime = current_time(new_inode);
|
||||
down_write(&F2FS_I(new_inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(new_inode)->i_sem);
|
||||
if (old_dir_entry)
|
||||
f2fs_i_links_write(new_inode, false);
|
||||
f2fs_i_links_write(new_inode, false);
|
||||
up_write(&F2FS_I(new_inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(new_inode)->i_sem);
|
||||
|
||||
if (!new_inode->i_nlink)
|
||||
f2fs_add_orphan_inode(new_inode);
|
||||
@@ -1037,13 +1037,13 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
f2fs_i_links_write(new_dir, true);
|
||||
}
|
||||
|
||||
down_write(&F2FS_I(old_inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(old_inode)->i_sem);
|
||||
if (!old_dir_entry || whiteout)
|
||||
file_lost_pino(old_inode);
|
||||
else
|
||||
/* adjust dir's i_pino to pass fsck check */
|
||||
f2fs_i_pino_write(old_inode, new_dir->i_ino);
|
||||
up_write(&F2FS_I(old_inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(old_inode)->i_sem);
|
||||
|
||||
old_inode->i_ctime = current_time(old_inode);
|
||||
f2fs_mark_inode_dirty_sync(old_inode, false);
|
||||
@@ -1203,38 +1203,38 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
/* update directory entry info of old dir inode */
|
||||
f2fs_set_link(old_dir, old_entry, old_page, new_inode);
|
||||
|
||||
down_write(&F2FS_I(old_inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(old_inode)->i_sem);
|
||||
if (!old_dir_entry)
|
||||
file_lost_pino(old_inode);
|
||||
else
|
||||
/* adjust dir's i_pino to pass fsck check */
|
||||
f2fs_i_pino_write(old_inode, new_dir->i_ino);
|
||||
up_write(&F2FS_I(old_inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(old_inode)->i_sem);
|
||||
|
||||
old_dir->i_ctime = current_time(old_dir);
|
||||
if (old_nlink) {
|
||||
down_write(&F2FS_I(old_dir)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(old_dir)->i_sem);
|
||||
f2fs_i_links_write(old_dir, old_nlink > 0);
|
||||
up_write(&F2FS_I(old_dir)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(old_dir)->i_sem);
|
||||
}
|
||||
f2fs_mark_inode_dirty_sync(old_dir, false);
|
||||
|
||||
/* update directory entry info of new dir inode */
|
||||
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
|
||||
|
||||
down_write(&F2FS_I(new_inode)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(new_inode)->i_sem);
|
||||
if (!new_dir_entry)
|
||||
file_lost_pino(new_inode);
|
||||
else
|
||||
/* adjust dir's i_pino to pass fsck check */
|
||||
f2fs_i_pino_write(new_inode, old_dir->i_ino);
|
||||
up_write(&F2FS_I(new_inode)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(new_inode)->i_sem);
|
||||
|
||||
new_dir->i_ctime = current_time(new_dir);
|
||||
if (new_nlink) {
|
||||
down_write(&F2FS_I(new_dir)->i_sem);
|
||||
f2fs_down_write(&F2FS_I(new_dir)->i_sem);
|
||||
f2fs_i_links_write(new_dir, new_nlink > 0);
|
||||
up_write(&F2FS_I(new_dir)->i_sem);
|
||||
f2fs_up_write(&F2FS_I(new_dir)->i_sem);
|
||||
}
|
||||
f2fs_mark_inode_dirty_sync(new_dir, false);
|
||||
|
||||
|
@@ -380,14 +380,14 @@ int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
|
||||
struct nat_entry *e;
|
||||
bool need = false;
|
||||
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, nid);
|
||||
if (e) {
|
||||
if (!get_nat_flag(e, IS_CHECKPOINTED) &&
|
||||
!get_nat_flag(e, HAS_FSYNCED_INODE))
|
||||
need = true;
|
||||
}
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
return need;
|
||||
}
|
||||
|
||||
@@ -397,11 +397,11 @@ bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
|
||||
struct nat_entry *e;
|
||||
bool is_cp = true;
|
||||
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, nid);
|
||||
if (e && !get_nat_flag(e, IS_CHECKPOINTED))
|
||||
is_cp = false;
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
return is_cp;
|
||||
}
|
||||
|
||||
@@ -411,13 +411,13 @@ bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
|
||||
struct nat_entry *e;
|
||||
bool need_update = true;
|
||||
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, ino);
|
||||
if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
|
||||
(get_nat_flag(e, IS_CHECKPOINTED) ||
|
||||
get_nat_flag(e, HAS_FSYNCED_INODE)))
|
||||
need_update = false;
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
return need_update;
|
||||
}
|
||||
|
||||
@@ -428,11 +428,15 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
||||
struct nat_entry *new, *e;
|
||||
|
||||
/* Let's mitigate lock contention of nat_tree_lock during checkpoint */
|
||||
if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
|
||||
return;
|
||||
|
||||
new = __alloc_nat_entry(nid, false);
|
||||
if (!new)
|
||||
return;
|
||||
|
||||
down_write(&nm_i->nat_tree_lock);
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, nid);
|
||||
if (!e)
|
||||
e = __init_nat_entry(nm_i, new, ne, false);
|
||||
@@ -441,7 +445,7 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
nat_get_blkaddr(e) !=
|
||||
le32_to_cpu(ne->block_addr) ||
|
||||
nat_get_version(e) != ne->version);
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
if (e != new)
|
||||
__free_nat_entry(new);
|
||||
}
|
||||
@@ -453,7 +457,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
|
||||
struct nat_entry *e;
|
||||
struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
|
||||
|
||||
down_write(&nm_i->nat_tree_lock);
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, ni->nid);
|
||||
if (!e) {
|
||||
e = __init_nat_entry(nm_i, new, NULL, true);
|
||||
@@ -502,7 +506,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
|
||||
set_nat_flag(e, HAS_FSYNCED_INODE, true);
|
||||
set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
|
||||
}
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
}
|
||||
|
||||
int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
|
||||
@@ -510,7 +514,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
|
||||
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
||||
int nr = nr_shrink;
|
||||
|
||||
if (!down_write_trylock(&nm_i->nat_tree_lock))
|
||||
if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
|
||||
return 0;
|
||||
|
||||
spin_lock(&nm_i->nat_list_lock);
|
||||
@@ -532,12 +536,12 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
|
||||
}
|
||||
spin_unlock(&nm_i->nat_list_lock);
|
||||
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
return nr - nr_shrink;
|
||||
}
|
||||
|
||||
int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
struct node_info *ni)
|
||||
struct node_info *ni, bool checkpoint_context)
|
||||
{
|
||||
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
||||
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
|
||||
@@ -554,13 +558,13 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
|
||||
ni->nid = nid;
|
||||
retry:
|
||||
/* Check nat cache */
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
e = __lookup_nat_cache(nm_i, nid);
|
||||
if (e) {
|
||||
ni->ino = nat_get_ino(e);
|
||||
ni->blk_addr = nat_get_blkaddr(e);
|
||||
ni->version = nat_get_version(e);
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -570,10 +574,11 @@ retry:
|
||||
* nat_tree_lock. Therefore, we should retry, if we failed to grab here
|
||||
* while not bothering checkpoint.
|
||||
*/
|
||||
if (!rwsem_is_locked(&sbi->cp_global_sem)) {
|
||||
if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
|
||||
down_read(&curseg->journal_rwsem);
|
||||
} else if (!down_read_trylock(&curseg->journal_rwsem)) {
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
} else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
|
||||
!down_read_trylock(&curseg->journal_rwsem)) {
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@@ -582,15 +587,15 @@ retry:
|
||||
ne = nat_in_journal(journal, i);
|
||||
node_info_from_raw_nat(ni, &ne);
|
||||
}
|
||||
up_read(&curseg->journal_rwsem);
|
||||
up_read(&curseg->journal_rwsem);
|
||||
if (i >= 0) {
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
goto cache;
|
||||
}
|
||||
|
||||
/* Fill node_info from nat page */
|
||||
index = current_nat_addr(sbi, nid);
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
|
||||
page = f2fs_get_meta_page(sbi, index);
|
||||
if (IS_ERR(page))
|
||||
@@ -865,7 +870,7 @@ static int truncate_node(struct dnode_of_data *dn)
|
||||
int err;
|
||||
pgoff_t index;
|
||||
|
||||
err = f2fs_get_node_info(sbi, dn->nid, &ni);
|
||||
err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -1264,7 +1269,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
|
||||
goto fail;
|
||||
|
||||
#ifdef CONFIG_F2FS_CHECK_FS
|
||||
err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
|
||||
err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
|
||||
if (err) {
|
||||
dec_valid_node_count(sbi, dn->inode, !ofs);
|
||||
goto fail;
|
||||
@@ -1326,7 +1331,7 @@ static int read_node_page(struct page *page, int op_flags)
|
||||
return LOCKED_PAGE;
|
||||
}
|
||||
|
||||
err = f2fs_get_node_info(sbi, page->index, &ni);
|
||||
err = f2fs_get_node_info(sbi, page->index, &ni, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -1580,21 +1585,21 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
|
||||
nid = nid_of_node(page);
|
||||
f2fs_bug_on(sbi, page->index != nid);
|
||||
|
||||
if (f2fs_get_node_info(sbi, nid, &ni))
|
||||
if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
|
||||
goto redirty_out;
|
||||
|
||||
if (wbc->for_reclaim) {
|
||||
if (!down_read_trylock(&sbi->node_write))
|
||||
if (!f2fs_down_read_trylock(&sbi->node_write))
|
||||
goto redirty_out;
|
||||
} else {
|
||||
down_read(&sbi->node_write);
|
||||
f2fs_down_read(&sbi->node_write);
|
||||
}
|
||||
|
||||
/* This page is already truncated */
|
||||
if (unlikely(ni.blk_addr == NULL_ADDR)) {
|
||||
ClearPageUptodate(page);
|
||||
dec_page_count(sbi, F2FS_DIRTY_NODES);
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
unlock_page(page);
|
||||
return 0;
|
||||
}
|
||||
@@ -1602,7 +1607,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
|
||||
if (__is_valid_data_blkaddr(ni.blk_addr) &&
|
||||
!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
|
||||
DATA_GENERIC_ENHANCE)) {
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
goto redirty_out;
|
||||
}
|
||||
|
||||
@@ -1623,7 +1628,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
|
||||
f2fs_do_write_node_page(nid, &fio);
|
||||
set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
|
||||
dec_page_count(sbi, F2FS_DIRTY_NODES);
|
||||
up_read(&sbi->node_write);
|
||||
f2fs_up_read(&sbi->node_write);
|
||||
|
||||
if (wbc->for_reclaim) {
|
||||
f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
|
||||
@@ -2372,7 +2377,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
|
||||
unsigned int i, idx;
|
||||
nid_t nid;
|
||||
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
|
||||
for (i = 0; i < nm_i->nat_blocks; i++) {
|
||||
if (!test_bit_le(i, nm_i->nat_block_bitmap))
|
||||
@@ -2395,7 +2400,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
|
||||
out:
|
||||
scan_curseg_cache(sbi);
|
||||
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
}
|
||||
|
||||
static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
|
||||
@@ -2430,7 +2435,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
|
||||
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
|
||||
META_NAT, true);
|
||||
|
||||
down_read(&nm_i->nat_tree_lock);
|
||||
f2fs_down_read(&nm_i->nat_tree_lock);
|
||||
|
||||
while (1) {
|
||||
if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
|
||||
@@ -2445,7 +2450,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
|
||||
return ret;
|
||||
}
|
||||
@@ -2465,7 +2470,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
|
||||
/* find free nids from current sum_pages */
|
||||
scan_curseg_cache(sbi);
|
||||
|
||||
up_read(&nm_i->nat_tree_lock);
|
||||
f2fs_up_read(&nm_i->nat_tree_lock);
|
||||
|
||||
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
|
||||
nm_i->ra_nid_pages, META_NAT, false);
|
||||
@@ -2663,7 +2668,7 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
|
||||
goto recover_xnid;
|
||||
|
||||
/* 1: invalidate the previous xattr nid */
|
||||
err = f2fs_get_node_info(sbi, prev_xnid, &ni);
|
||||
err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -2703,7 +2708,7 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
|
||||
struct page *ipage;
|
||||
int err;
|
||||
|
||||
err = f2fs_get_node_info(sbi, ino, &old_ni);
|
||||
err = f2fs_get_node_info(sbi, ino, &old_ni, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -2993,15 +2998,15 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
* nat_cnt[DIRTY_NAT].
|
||||
*/
|
||||
if (enabled_nat_bits(sbi, cpc)) {
|
||||
down_write(&nm_i->nat_tree_lock);
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
remove_nats_in_journal(sbi);
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
}
|
||||
|
||||
if (!nm_i->nat_cnt[DIRTY_NAT])
|
||||
return 0;
|
||||
|
||||
down_write(&nm_i->nat_tree_lock);
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
|
||||
/*
|
||||
* if there are no enough space in journal to store dirty nat
|
||||
@@ -3030,7 +3035,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||
break;
|
||||
}
|
||||
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
/* Allow dirty nats by node block allocation in write_begin */
|
||||
|
||||
return err;
|
||||
@@ -3148,7 +3153,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
|
||||
|
||||
mutex_init(&nm_i->build_lock);
|
||||
spin_lock_init(&nm_i->nid_list_lock);
|
||||
init_rwsem(&nm_i->nat_tree_lock);
|
||||
init_f2fs_rwsem(&nm_i->nat_tree_lock);
|
||||
|
||||
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
|
||||
nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
|
||||
@@ -3254,7 +3259,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
|
||||
spin_unlock(&nm_i->nid_list_lock);
|
||||
|
||||
/* destroy nat cache */
|
||||
down_write(&nm_i->nat_tree_lock);
|
||||
f2fs_down_write(&nm_i->nat_tree_lock);
|
||||
while ((found = __gang_lookup_nat_cache(nm_i,
|
||||
nid, NATVEC_SIZE, natvec))) {
|
||||
unsigned idx;
|
||||
@@ -3284,7 +3289,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
|
||||
kmem_cache_free(nat_entry_set_slab, setvec[idx]);
|
||||
}
|
||||
}
|
||||
up_write(&nm_i->nat_tree_lock);
|
||||
f2fs_up_write(&nm_i->nat_tree_lock);
|
||||
|
||||
kvfree(nm_i->nat_block_bitmap);
|
||||
if (nm_i->free_nid_bitmap) {
|
||||
|
@@ -594,7 +594,7 @@ retry_dn:
|
||||
|
||||
f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
|
||||
|
||||
err = f2fs_get_node_info(sbi, dn.nid, &ni);
|
||||
err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
@@ -797,7 +797,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
|
||||
INIT_LIST_HEAD(&dir_list);
|
||||
|
||||
/* prevent checkpoint */
|
||||
down_write(&sbi->cp_global_sem);
|
||||
f2fs_down_write(&sbi->cp_global_sem);
|
||||
|
||||
/* step #1: find fsynced inode numbers */
|
||||
err = find_fsync_dnodes(sbi, &inode_list, check_only);
|
||||
@@ -848,7 +848,7 @@ skip:
|
||||
if (!err)
|
||||
clear_sbi_flag(sbi, SBI_POR_DOING);
|
||||
|
||||
up_write(&sbi->cp_global_sem);
|
||||
f2fs_up_write(&sbi->cp_global_sem);
|
||||
|
||||
/* let's drop all the directory inodes for clean checkpoint */
|
||||
destroy_fsync_dnodes(&dir_list, err);
|
||||
|
@@ -251,7 +251,7 @@ retry:
|
||||
goto next;
|
||||
}
|
||||
|
||||
err = f2fs_get_node_info(sbi, dn.nid, &ni);
|
||||
err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
|
||||
if (err) {
|
||||
f2fs_put_dnode(&dn);
|
||||
return err;
|
||||
@@ -471,7 +471,7 @@ int f2fs_commit_inmem_pages(struct inode *inode)
|
||||
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
down_write(&fi->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
set_inode_flag(inode, FI_ATOMIC_COMMIT);
|
||||
@@ -483,7 +483,7 @@ int f2fs_commit_inmem_pages(struct inode *inode)
|
||||
clear_inode_flag(inode, FI_ATOMIC_COMMIT);
|
||||
|
||||
f2fs_unlock_op(sbi);
|
||||
up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
|
||||
|
||||
return err;
|
||||
}
|
||||
@@ -521,7 +521,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
|
||||
io_schedule();
|
||||
finish_wait(&sbi->gc_thread->fggc_wq, &wait);
|
||||
} else {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
f2fs_gc(sbi, false, false, false, NULL_SEGNO);
|
||||
}
|
||||
}
|
||||
@@ -551,7 +551,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
|
||||
|
||||
/* there is background inflight IO or foreground operation recently */
|
||||
if (is_inflight_io(sbi, REQ_TIME) ||
|
||||
(!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem)))
|
||||
(!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem)))
|
||||
return;
|
||||
|
||||
/* exceed periodical checkpoint timeout threshold */
|
||||
@@ -2746,7 +2746,7 @@ static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
|
||||
if (!sbi->am.atgc_enabled)
|
||||
return;
|
||||
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_read(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
mutex_lock(&curseg->curseg_mutex);
|
||||
down_write(&SIT_I(sbi)->sentry_lock);
|
||||
@@ -2756,7 +2756,7 @@ static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi)
|
||||
up_write(&SIT_I(sbi)->sentry_lock);
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
}
|
||||
void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi)
|
||||
@@ -2907,7 +2907,7 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
|
||||
struct curseg_info *curseg = CURSEG_I(sbi, type);
|
||||
unsigned int segno;
|
||||
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_read(&SM_I(sbi)->curseg_lock);
|
||||
mutex_lock(&curseg->curseg_mutex);
|
||||
down_write(&SIT_I(sbi)->sentry_lock);
|
||||
|
||||
@@ -2931,7 +2931,7 @@ unlock:
|
||||
type, segno, curseg->segno);
|
||||
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type,
|
||||
@@ -2963,23 +2963,23 @@ static void __allocate_new_section(struct f2fs_sb_info *sbi,
|
||||
|
||||
void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force)
|
||||
{
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_read(&SM_I(sbi)->curseg_lock);
|
||||
down_write(&SIT_I(sbi)->sentry_lock);
|
||||
__allocate_new_section(sbi, type, force);
|
||||
up_write(&SIT_I(sbi)->sentry_lock);
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
int i;
|
||||
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_read(&SM_I(sbi)->curseg_lock);
|
||||
down_write(&SIT_I(sbi)->sentry_lock);
|
||||
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
|
||||
__allocate_new_segment(sbi, i, false, false);
|
||||
up_write(&SIT_I(sbi)->sentry_lock);
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
static const struct segment_allocation default_salloc_ops = {
|
||||
@@ -3117,9 +3117,9 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
|
||||
if (sbi->discard_blks == 0)
|
||||
goto out;
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
err = f2fs_write_checkpoint(sbi, &cpc);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
@@ -3356,7 +3356,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
bool from_gc = (type == CURSEG_ALL_DATA_ATGC);
|
||||
struct seg_entry *se = NULL;
|
||||
|
||||
down_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_read(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
mutex_lock(&curseg->curseg_mutex);
|
||||
down_write(&sit_i->sentry_lock);
|
||||
@@ -3439,7 +3439,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
|
||||
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
|
||||
up_read(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_read(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
static void update_device_state(struct f2fs_io_info *fio)
|
||||
@@ -3469,7 +3469,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
|
||||
bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
|
||||
|
||||
if (keep_order)
|
||||
down_read(&fio->sbi->io_order_lock);
|
||||
f2fs_down_read(&fio->sbi->io_order_lock);
|
||||
reallocate:
|
||||
f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
|
||||
&fio->new_blkaddr, sum, type, fio);
|
||||
@@ -3489,7 +3489,7 @@ reallocate:
|
||||
update_device_state(fio);
|
||||
|
||||
if (keep_order)
|
||||
up_read(&fio->sbi->io_order_lock);
|
||||
f2fs_up_read(&fio->sbi->io_order_lock);
|
||||
}
|
||||
|
||||
void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
|
||||
@@ -3620,7 +3620,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
se = get_seg_entry(sbi, segno);
|
||||
type = se->type;
|
||||
|
||||
down_write(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_down_write(&SM_I(sbi)->curseg_lock);
|
||||
|
||||
if (!recover_curseg) {
|
||||
/* for recovery flow */
|
||||
@@ -3689,7 +3689,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
|
||||
up_write(&sit_i->sentry_lock);
|
||||
mutex_unlock(&curseg->curseg_mutex);
|
||||
up_write(&SM_I(sbi)->curseg_lock);
|
||||
f2fs_up_write(&SM_I(sbi)->curseg_lock);
|
||||
}
|
||||
|
||||
void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
|
||||
@@ -5165,7 +5165,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
|
||||
|
||||
INIT_LIST_HEAD(&sm_info->sit_entry_set);
|
||||
|
||||
init_rwsem(&sm_info->curseg_lock);
|
||||
init_f2fs_rwsem(&sm_info->curseg_lock);
|
||||
|
||||
if (!f2fs_readonly(sbi->sb)) {
|
||||
err = f2fs_create_flush_cmd_control(sbi);
|
||||
|
@@ -1248,17 +1248,17 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
|
||||
/* Initialize f2fs-specific inode info */
|
||||
atomic_set(&fi->dirty_pages, 0);
|
||||
atomic_set(&fi->i_compr_blocks, 0);
|
||||
init_rwsem(&fi->i_sem);
|
||||
init_f2fs_rwsem(&fi->i_sem);
|
||||
spin_lock_init(&fi->i_size_lock);
|
||||
INIT_LIST_HEAD(&fi->dirty_list);
|
||||
INIT_LIST_HEAD(&fi->gdirty_list);
|
||||
INIT_LIST_HEAD(&fi->inmem_ilist);
|
||||
INIT_LIST_HEAD(&fi->inmem_pages);
|
||||
mutex_init(&fi->inmem_lock);
|
||||
init_rwsem(&fi->i_gc_rwsem[READ]);
|
||||
init_rwsem(&fi->i_gc_rwsem[WRITE]);
|
||||
init_rwsem(&fi->i_mmap_sem);
|
||||
init_rwsem(&fi->i_xattr_sem);
|
||||
init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
|
||||
init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
|
||||
init_f2fs_rwsem(&fi->i_mmap_sem);
|
||||
init_f2fs_rwsem(&fi->i_xattr_sem);
|
||||
|
||||
/* Will be used by directory only */
|
||||
fi->i_dir_level = F2FS_SB(sb)->dir_level;
|
||||
@@ -1963,7 +1963,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
f2fs_update_time(sbi, DISABLE_TIME);
|
||||
|
||||
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
|
||||
if (err == -ENODATA) {
|
||||
err = 0;
|
||||
@@ -1985,7 +1985,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
goto restore_flag;
|
||||
}
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
cpc.reason = CP_PAUSE;
|
||||
set_sbi_flag(sbi, SBI_CP_DISABLED);
|
||||
err = f2fs_write_checkpoint(sbi, &cpc);
|
||||
@@ -1997,7 +1997,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
spin_unlock(&sbi->stat_lock);
|
||||
|
||||
out_unlock:
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
restore_flag:
|
||||
sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
|
||||
return err;
|
||||
@@ -2017,12 +2017,12 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
|
||||
if (unlikely(retry < 0))
|
||||
f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
|
||||
|
||||
down_write(&sbi->gc_lock);
|
||||
f2fs_down_write(&sbi->gc_lock);
|
||||
f2fs_dirty_to_prefree(sbi);
|
||||
|
||||
clear_sbi_flag(sbi, SBI_CP_DISABLED);
|
||||
set_sbi_flag(sbi, SBI_IS_DIRTY);
|
||||
up_write(&sbi->gc_lock);
|
||||
f2fs_up_write(&sbi->gc_lock);
|
||||
|
||||
f2fs_sync_fs(sbi->sb, 1);
|
||||
}
|
||||
@@ -2544,18 +2544,18 @@ int f2fs_quota_sync(struct super_block *sb, int type)
|
||||
/*
|
||||
* do_quotactl
|
||||
* f2fs_quota_sync
|
||||
* down_read(quota_sem)
|
||||
* f2fs_down_read(quota_sem)
|
||||
* dquot_writeback_dquots()
|
||||
* f2fs_dquot_commit
|
||||
* block_operation
|
||||
* down_read(quota_sem)
|
||||
* f2fs_down_read(quota_sem)
|
||||
*/
|
||||
f2fs_lock_op(sbi);
|
||||
down_read(&sbi->quota_sem);
|
||||
f2fs_down_read(&sbi->quota_sem);
|
||||
|
||||
ret = f2fs_quota_sync_file(sbi, cnt);
|
||||
|
||||
up_read(&sbi->quota_sem);
|
||||
f2fs_up_read(&sbi->quota_sem);
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
inode_unlock(dqopt->files[cnt]);
|
||||
@@ -2680,11 +2680,11 @@ static int f2fs_dquot_commit(struct dquot *dquot)
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
|
||||
int ret;
|
||||
|
||||
down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
|
||||
f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
|
||||
ret = dquot_commit(dquot);
|
||||
if (ret < 0)
|
||||
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
|
||||
up_read(&sbi->quota_sem);
|
||||
f2fs_up_read(&sbi->quota_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2693,11 +2693,11 @@ static int f2fs_dquot_acquire(struct dquot *dquot)
|
||||
struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
|
||||
int ret;
|
||||
|
||||
down_read(&sbi->quota_sem);
|
||||
f2fs_down_read(&sbi->quota_sem);
|
||||
ret = dquot_acquire(dquot);
|
||||
if (ret < 0)
|
||||
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
|
||||
up_read(&sbi->quota_sem);
|
||||
f2fs_up_read(&sbi->quota_sem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -3430,14 +3430,14 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
|
||||
|
||||
INIT_LIST_HEAD(&sbi->s_list);
|
||||
mutex_init(&sbi->umount_mutex);
|
||||
init_rwsem(&sbi->io_order_lock);
|
||||
init_f2fs_rwsem(&sbi->io_order_lock);
|
||||
spin_lock_init(&sbi->cp_lock);
|
||||
|
||||
sbi->dirty_device = 0;
|
||||
spin_lock_init(&sbi->dev_lock);
|
||||
|
||||
init_rwsem(&sbi->sb_lock);
|
||||
init_rwsem(&sbi->pin_sem);
|
||||
init_f2fs_rwsem(&sbi->sb_lock);
|
||||
init_f2fs_rwsem(&sbi->pin_sem);
|
||||
}
|
||||
|
||||
static int init_percpu_info(struct f2fs_sb_info *sbi)
|
||||
@@ -3881,11 +3881,11 @@ try_onemore:
|
||||
|
||||
/* init f2fs-specific super block info */
|
||||
sbi->valid_super_block = valid_super_block;
|
||||
init_rwsem(&sbi->gc_lock);
|
||||
init_f2fs_rwsem(&sbi->gc_lock);
|
||||
mutex_init(&sbi->writepages);
|
||||
init_rwsem(&sbi->cp_global_sem);
|
||||
init_rwsem(&sbi->node_write);
|
||||
init_rwsem(&sbi->node_change);
|
||||
init_f2fs_rwsem(&sbi->cp_global_sem);
|
||||
init_f2fs_rwsem(&sbi->node_write);
|
||||
init_f2fs_rwsem(&sbi->node_change);
|
||||
|
||||
/* disallow all the data/node/meta page writes */
|
||||
set_sbi_flag(sbi, SBI_POR_DOING);
|
||||
@@ -3911,18 +3911,18 @@ try_onemore:
|
||||
}
|
||||
|
||||
for (j = HOT; j < n; j++) {
|
||||
init_rwsem(&sbi->write_io[i][j].io_rwsem);
|
||||
init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
|
||||
sbi->write_io[i][j].sbi = sbi;
|
||||
sbi->write_io[i][j].bio = NULL;
|
||||
spin_lock_init(&sbi->write_io[i][j].io_lock);
|
||||
INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
|
||||
INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
|
||||
init_rwsem(&sbi->write_io[i][j].bio_list_lock);
|
||||
init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
init_rwsem(&sbi->cp_rwsem);
|
||||
init_rwsem(&sbi->quota_sem);
|
||||
init_f2fs_rwsem(&sbi->cp_rwsem);
|
||||
init_f2fs_rwsem(&sbi->quota_sem);
|
||||
init_waitqueue_head(&sbi->cp_wait);
|
||||
init_sb_info(sbi);
|
||||
|
||||
|
@@ -363,7 +363,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
|
||||
if (strlen(name) >= F2FS_EXTENSION_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
down_write(&sbi->sb_lock);
|
||||
f2fs_down_write(&sbi->sb_lock);
|
||||
|
||||
ret = f2fs_update_extension_list(sbi, name, hot, set);
|
||||
if (ret)
|
||||
@@ -373,7 +373,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
|
||||
if (ret)
|
||||
f2fs_update_extension_list(sbi, name, hot, !set);
|
||||
out:
|
||||
up_write(&sbi->sb_lock);
|
||||
f2fs_up_write(&sbi->sb_lock);
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
|
@@ -208,7 +208,7 @@ cleanup:
|
||||
* from re-instantiating cached pages we are truncating (since unlike
|
||||
* normal file accesses, garbage collection isn't limited by i_size).
|
||||
*/
|
||||
down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
truncate_inode_pages(inode->i_mapping, inode->i_size);
|
||||
err2 = f2fs_truncate(inode);
|
||||
if (err2) {
|
||||
@@ -216,7 +216,7 @@ cleanup:
|
||||
err2);
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
}
|
||||
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
|
||||
clear_inode_flag(inode, FI_VERITY_IN_PROGRESS);
|
||||
return err ?: err2;
|
||||
}
|
||||
|
@@ -529,10 +529,10 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
|
||||
if (len > F2FS_NAME_LEN)
|
||||
return -ERANGE;
|
||||
|
||||
down_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
error = lookup_all_xattrs(inode, ipage, index, len, name,
|
||||
&entry, &base_addr, &base_size, &is_inline);
|
||||
up_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
@@ -566,9 +566,9 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
int error;
|
||||
size_t rest = buffer_size;
|
||||
|
||||
down_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
error = read_all_xattrs(inode, NULL, &base_addr);
|
||||
up_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
@@ -790,9 +790,9 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
|
||||
f2fs_balance_fs(sbi, true);
|
||||
|
||||
f2fs_lock_op(sbi);
|
||||
down_write(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_down_write(&F2FS_I(inode)->i_xattr_sem);
|
||||
err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags);
|
||||
up_write(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_up_write(&F2FS_I(inode)->i_xattr_sem);
|
||||
f2fs_unlock_op(sbi);
|
||||
|
||||
f2fs_update_time(sbi, REQ_TIME);
|
||||
|
@@ -175,6 +175,7 @@ void incfs_free_mount_info(struct mount_info *mi)
|
||||
kfree(mi->pseudo_file_xattr[i].data);
|
||||
kfree(mi->mi_per_uid_read_timeouts);
|
||||
incfs_free_sysfs_node(mi->mi_sysfs_node);
|
||||
kfree(mi->mi_options.sysfs_name);
|
||||
kfree(mi);
|
||||
}
|
||||
|
||||
|
@@ -147,8 +147,12 @@ static long ioctl_permit_fill(struct file *f, void __user *arg)
|
||||
return -EFAULT;
|
||||
|
||||
file = fget(permit_fill.file_descriptor);
|
||||
if (IS_ERR(file))
|
||||
if (IS_ERR_OR_NULL(file)) {
|
||||
if (!file)
|
||||
return -ENOENT;
|
||||
|
||||
return PTR_ERR(file);
|
||||
}
|
||||
|
||||
if (file->f_op != &incfs_file_ops) {
|
||||
error = -EPERM;
|
||||
|
@@ -393,7 +393,7 @@ static int iterate_incfs_dir(struct file *file, struct dir_context *ctx)
|
||||
struct mount_info *mi = get_mount_info(file_superblock(file));
|
||||
bool root;
|
||||
|
||||
if (!dir) {
|
||||
if (!dir || !mi) {
|
||||
error = -EBADF;
|
||||
goto out;
|
||||
}
|
||||
@@ -1336,6 +1336,9 @@ static int dir_rename(struct inode *old_dir, struct dentry *old_dentry,
|
||||
struct dentry *trap;
|
||||
int error = 0;
|
||||
|
||||
if (!mi)
|
||||
return -EBADF;
|
||||
|
||||
error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
|
||||
if (error)
|
||||
return error;
|
||||
@@ -1664,6 +1667,9 @@ static ssize_t incfs_getxattr(struct dentry *d, const char *name,
|
||||
size_t stored_size;
|
||||
int i;
|
||||
|
||||
if (!mi)
|
||||
return -EBADF;
|
||||
|
||||
if (di && di->backing_path.dentry)
|
||||
return vfs_getxattr(di->backing_path.dentry, name, value, size);
|
||||
|
||||
@@ -1698,6 +1704,9 @@ static ssize_t incfs_setxattr(struct dentry *d, const char *name,
|
||||
size_t *stored_size;
|
||||
int i;
|
||||
|
||||
if (!mi)
|
||||
return -EBADF;
|
||||
|
||||
if (di && di->backing_path.dentry)
|
||||
return vfs_setxattr(di->backing_path.dentry, name, value, size,
|
||||
flags);
|
||||
@@ -1736,6 +1745,11 @@ static ssize_t incfs_listxattr(struct dentry *d, char *list, size_t size)
|
||||
return vfs_listxattr(di->backing_path.dentry, list, size);
|
||||
}
|
||||
|
||||
static int incfs_test_super(struct super_block *s, void *p)
|
||||
{
|
||||
return s->s_fs_info != NULL;
|
||||
}
|
||||
|
||||
struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
|
||||
const char *dev_name, void *data)
|
||||
{
|
||||
@@ -1746,7 +1760,8 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
|
||||
struct dentry *incomplete_dir = NULL;
|
||||
struct super_block *src_fs_sb = NULL;
|
||||
struct inode *root_inode = NULL;
|
||||
struct super_block *sb = sget(type, NULL, set_anon_super, flags, NULL);
|
||||
struct super_block *sb = sget(type, incfs_test_super, set_anon_super,
|
||||
flags, NULL);
|
||||
int error = 0;
|
||||
|
||||
if (IS_ERR(sb))
|
||||
@@ -1787,13 +1802,18 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
|
||||
src_fs_sb = backing_dir_path.dentry->d_sb;
|
||||
sb->s_maxbytes = src_fs_sb->s_maxbytes;
|
||||
|
||||
mi = incfs_alloc_mount_info(sb, &options, &backing_dir_path);
|
||||
if (!sb->s_fs_info) {
|
||||
mi = incfs_alloc_mount_info(sb, &options, &backing_dir_path);
|
||||
|
||||
if (IS_ERR_OR_NULL(mi)) {
|
||||
error = PTR_ERR(mi);
|
||||
pr_err("incfs: Error allocating mount info. %d\n", error);
|
||||
mi = NULL;
|
||||
goto err;
|
||||
if (IS_ERR_OR_NULL(mi)) {
|
||||
error = PTR_ERR(mi);
|
||||
pr_err("incfs: Error allocating mount info. %d\n", error);
|
||||
mi = NULL;
|
||||
goto err;
|
||||
}
|
||||
sb->s_fs_info = mi;
|
||||
} else {
|
||||
mi = sb->s_fs_info;
|
||||
}
|
||||
|
||||
index_dir = open_or_create_special_dir(backing_dir_path.dentry,
|
||||
@@ -1818,21 +1838,22 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
|
||||
}
|
||||
mi->mi_incomplete_dir = incomplete_dir;
|
||||
|
||||
sb->s_fs_info = mi;
|
||||
root_inode = fetch_regular_inode(sb, backing_dir_path.dentry);
|
||||
if (IS_ERR(root_inode)) {
|
||||
error = PTR_ERR(root_inode);
|
||||
goto err;
|
||||
}
|
||||
|
||||
sb->s_root = d_make_root(root_inode);
|
||||
if (!sb->s_root) {
|
||||
error = -ENOMEM;
|
||||
goto err;
|
||||
sb->s_root = d_make_root(root_inode);
|
||||
if (!sb->s_root) {
|
||||
error = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
error = incfs_init_dentry(sb->s_root, &backing_dir_path);
|
||||
if (error)
|
||||
goto err;
|
||||
}
|
||||
error = incfs_init_dentry(sb->s_root, &backing_dir_path);
|
||||
if (error)
|
||||
goto err;
|
||||
|
||||
path_put(&backing_dir_path);
|
||||
sb->s_flags |= SB_ACTIVE;
|
||||
@@ -1854,6 +1875,9 @@ static int incfs_remount_fs(struct super_block *sb, int *flags, char *data)
|
||||
struct mount_info *mi = get_mount_info(sb);
|
||||
int err = 0;
|
||||
|
||||
if (!mi)
|
||||
return err;
|
||||
|
||||
sync_filesystem(sb);
|
||||
err = parse_options(&options, (char *)data);
|
||||
if (err)
|
||||
@@ -1883,12 +1907,16 @@ void incfs_kill_sb(struct super_block *sb)
|
||||
pr_debug("incfs: unmount\n");
|
||||
generic_shutdown_super(sb);
|
||||
incfs_free_mount_info(mi);
|
||||
sb->s_fs_info = NULL;
|
||||
}
|
||||
|
||||
static int show_options(struct seq_file *m, struct dentry *root)
|
||||
{
|
||||
struct mount_info *mi = get_mount_info(root->d_sb);
|
||||
|
||||
if (!mi)
|
||||
return -EBADF;
|
||||
|
||||
seq_printf(m, ",read_timeout_ms=%u", mi->mi_options.read_timeout_ms);
|
||||
seq_printf(m, ",readahead=%u", mi->mi_options.readahead_pages);
|
||||
if (mi->mi_options.read_log_pages != 0) {
|
||||
|
@@ -19,7 +19,6 @@ static inline struct mount_info *get_mount_info(struct super_block *sb)
|
||||
{
|
||||
struct mount_info *result = sb->s_fs_info;
|
||||
|
||||
WARN_ON(!result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@@ -504,8 +504,7 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
|
||||
if (unlikely(!inode))
|
||||
return failed_creating(dentry);
|
||||
|
||||
/* Do not set bits for OTH */
|
||||
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
|
||||
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
|
||||
inode->i_op = ops;
|
||||
inode->i_fop = &simple_dir_operations;
|
||||
inode->i_uid = d_inode(dentry->d_parent)->i_uid;
|
||||
|
@@ -14,10 +14,6 @@ DECLARE_HOOK(android_vh_cpu_up,
|
||||
TP_PROTO(unsigned int cpu),
|
||||
TP_ARGS(cpu));
|
||||
|
||||
DECLARE_HOOK(android_vh_cpu_down,
|
||||
TP_PROTO(unsigned int cpu),
|
||||
TP_ARGS(cpu));
|
||||
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_CPU_H */
|
||||
|
@@ -12,9 +12,6 @@
|
||||
*/
|
||||
struct irq_data;
|
||||
struct cpumask;
|
||||
DECLARE_HOOK(android_vh_gic_v3_affinity_init,
|
||||
TP_PROTO(int irq, u32 offset, u64 *affinity),
|
||||
TP_ARGS(irq, offset, affinity));
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_gic_v3_set_affinity,
|
||||
TP_PROTO(struct irq_data *d, const struct cpumask *mask_val,
|
||||
u64 *affinity, bool force, void __iomem *base),
|
||||
|
@@ -12,6 +12,10 @@
|
||||
#include <linux/tracepoint.h>
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_iommu_setup_dma_ops,
|
||||
TP_PROTO(struct device *dev, u64 dma_base, u64 size),
|
||||
TP_ARGS(dev, dma_base, size), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_iommu_setup_dma_ops,
|
||||
TP_PROTO(struct device *dev, u64 dma_base, u64 size),
|
||||
TP_ARGS(dev, dma_base, size));
|
||||
|
@@ -68,16 +68,7 @@ DECLARE_HOOK(android_vh_include_reserved_zone,
|
||||
DECLARE_HOOK(android_vh_show_mem,
|
||||
TP_PROTO(unsigned int filter, nodemask_t *nodemask),
|
||||
TP_ARGS(filter, nodemask));
|
||||
DECLARE_HOOK(android_vh_alloc_pages_slowpath,
|
||||
TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long delta),
|
||||
TP_ARGS(gfp_mask, order, delta));
|
||||
DECLARE_HOOK(android_vh_print_slabinfo_header,
|
||||
TP_PROTO(struct seq_file *m),
|
||||
TP_ARGS(m));
|
||||
struct slabinfo;
|
||||
DECLARE_HOOK(android_vh_cache_show,
|
||||
TP_PROTO(struct seq_file *m, struct slabinfo *sinfo, struct kmem_cache *s),
|
||||
TP_ARGS(m, sinfo, s));
|
||||
struct dirty_throttle_control;
|
||||
DECLARE_HOOK(android_vh_mm_dirty_limits,
|
||||
TP_PROTO(struct dirty_throttle_control *const gdtc, bool strictlimit,
|
||||
|
@@ -18,16 +18,8 @@ DECLARE_HOOK(android_vh_ptype_head,
|
||||
DECLARE_HOOK(android_vh_kfree_skb,
|
||||
TP_PROTO(struct sk_buff *skb), TP_ARGS(skb));
|
||||
|
||||
struct nf_conn;
|
||||
struct sock;
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_nf_conn_alloc,
|
||||
TP_PROTO(struct nf_conn *nf_conn), TP_ARGS(nf_conn), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_nf_conn_free,
|
||||
TP_PROTO(struct nf_conn *nf_conn), TP_ARGS(nf_conn), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_sk_alloc,
|
||||
TP_PROTO(struct sock *sock), TP_ARGS(sock), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_sk_free,
|
||||
TP_PROTO(struct sock *sock), TP_ARGS(sock), 1);
|
||||
struct nf_conn; /* needed for CRC preservation */
|
||||
struct sock; /* needed for CRC preservation */
|
||||
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
|
@@ -283,9 +283,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_check_preempt_tick,
|
||||
unsigned long delta_exec, struct cfs_rq *cfs_rq, struct sched_entity *curr,
|
||||
unsigned int granularity),
|
||||
TP_ARGS(p, ideal_runtime, skip_preempt, delta_exec, cfs_rq, curr, granularity), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_check_preempt_wakeup_ignore,
|
||||
TP_PROTO(struct task_struct *p, bool *ignore),
|
||||
TP_ARGS(p, ignore), 1);
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_replace_next_task_fair,
|
||||
TP_PROTO(struct rq *rq, struct task_struct **p, struct sched_entity **se, bool *repick,
|
||||
bool simple, struct task_struct *prev),
|
||||
@@ -342,10 +339,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_dequeue_entity,
|
||||
TP_PROTO(struct cfs_rq *cfs, struct sched_entity *se),
|
||||
TP_ARGS(cfs, se), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_entity_tick,
|
||||
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
|
||||
TP_ARGS(cfs_rq, se), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_task_fair,
|
||||
TP_PROTO(struct rq *rq, struct task_struct *p, int flags),
|
||||
TP_ARGS(rq, p, flags), 1);
|
||||
@@ -370,10 +363,6 @@ DECLARE_HOOK(android_vh_dup_task_struct,
|
||||
TP_PROTO(struct task_struct *tsk, struct task_struct *orig),
|
||||
TP_ARGS(tsk, orig));
|
||||
|
||||
DECLARE_HOOK(android_vh_set_task_comm,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_find_new_ilb,
|
||||
TP_PROTO(struct cpumask *nohz_idle_cpus_mask, int *ilb),
|
||||
TP_ARGS(nohz_idle_cpus_mask, ilb), 1);
|
||||
|
@@ -1,22 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM sysrqcrash
|
||||
#define TRACE_INCLUDE_PATH trace/hooks
|
||||
|
||||
#if !defined(_TRACE_HOOK_SYSRQCRASH_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HOOK_SYSRQCRASH_H
|
||||
#include <linux/tracepoint.h>
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
/*
|
||||
* Following tracepoints are not exported in tracefs and provide a
|
||||
* mechanism for vendor modules to hook and extend functionality
|
||||
*/
|
||||
DECLARE_HOOK(android_vh_sysrq_crash,
|
||||
TP_PROTO(void *data),
|
||||
TP_ARGS(data));
|
||||
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_SYSRQCRASH_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@@ -17,11 +17,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_do_undefinstr,
|
||||
TP_ARGS(regs, user),
|
||||
TP_CONDITION(!user));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_do_bti,
|
||||
TP_PROTO(struct pt_regs *regs, bool user),
|
||||
TP_ARGS(regs, user),
|
||||
TP_CONDITION(!user));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_do_ptrauth_fault,
|
||||
TP_PROTO(struct pt_regs *regs, unsigned int esr, bool user),
|
||||
TP_ARGS(regs, esr, user),
|
||||
|
@@ -1,23 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM user
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH trace/hooks
|
||||
#if !defined(_TRACE_HOOK_USER_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HOOK_USER_H
|
||||
#include <linux/tracepoint.h>
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
|
||||
struct user_struct;
|
||||
DECLARE_HOOK(android_vh_alloc_uid,
|
||||
TP_PROTO(struct user_struct *user),
|
||||
TP_ARGS(user));
|
||||
|
||||
DECLARE_HOOK(android_vh_free_user,
|
||||
TP_PROTO(struct user_struct *up),
|
||||
TP_ARGS(up));
|
||||
|
||||
#endif /* _TRACE_HOOK_USER_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
||||
|
@@ -22,9 +22,6 @@ DECLARE_HOOK(android_vh_shrink_slab_bypass,
|
||||
DECLARE_HOOK(android_vh_tune_inactive_ratio,
|
||||
TP_PROTO(unsigned long *inactive_ratio, int file),
|
||||
TP_ARGS(inactive_ratio, file))
|
||||
DECLARE_HOOK(android_vh_do_shrink_slab,
|
||||
TP_PROTO(struct shrinker *shrinker, struct shrink_control *shrinkctl, int priority),
|
||||
TP_ARGS(shrinker, shrinkctl, priority));
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_set_balance_anon_file_reclaim,
|
||||
TP_PROTO(bool *balance_anon_file_reclaim),
|
||||
TP_ARGS(balance_anon_file_reclaim), 1);
|
||||
|
@@ -1,22 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM workqueue
|
||||
#define TRACE_INCLUDE_PATH trace/hooks
|
||||
|
||||
#if !defined(_TRACE_HOOK_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HOOK_WORKQUEUE_H
|
||||
#include <linux/tracepoint.h>
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
/*
|
||||
* Following tracepoints are not exported in tracefs and provide a
|
||||
* mechanism for vendor modules to hook and extend functionality
|
||||
*/
|
||||
struct worker;
|
||||
DECLARE_HOOK(android_vh_create_worker,
|
||||
TP_PROTO(struct worker *worker, struct workqueue_attrs *attrs),
|
||||
TP_ARGS(worker, attrs));
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_WORKQUEUE_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@@ -3307,7 +3307,6 @@ void cpuset_wait_for_hotplug(void)
|
||||
{
|
||||
flush_work(&cpuset_hotplug_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpuset_wait_for_hotplug);
|
||||
|
||||
/*
|
||||
* Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
|
||||
|
@@ -1127,8 +1127,6 @@ static int cpu_down(unsigned int cpu, enum cpuhp_state target)
|
||||
{
|
||||
int err;
|
||||
|
||||
trace_android_vh_cpu_down(cpu);
|
||||
|
||||
cpu_maps_update_begin();
|
||||
err = cpu_down_maps_locked(cpu, target);
|
||||
cpu_maps_update_done();
|
||||
|
@@ -474,8 +474,10 @@ static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
|
||||
* state has been re-checked. A memcpy() for all of @desc
|
||||
* cannot be used because of the atomic_t @state_var field.
|
||||
*/
|
||||
memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
|
||||
sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
|
||||
if (desc_out) {
|
||||
memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
|
||||
sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
|
||||
}
|
||||
if (seq_out)
|
||||
*seq_out = info->seq; /* also part of desc_read:C */
|
||||
if (caller_id_out)
|
||||
@@ -528,7 +530,8 @@ static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
|
||||
state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */
|
||||
d_state = get_desc_state(id, state_val);
|
||||
out:
|
||||
atomic_long_set(&desc_out->state_var, state_val);
|
||||
if (desc_out)
|
||||
atomic_long_set(&desc_out->state_var, state_val);
|
||||
return d_state;
|
||||
}
|
||||
|
||||
@@ -1450,6 +1453,9 @@ static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
|
||||
|
||||
atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
|
||||
DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
|
||||
|
||||
/* Best effort to remember the last finalized @id. */
|
||||
atomic_long_set(&desc_ring->last_finalized_id, id);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1659,7 +1665,12 @@ void prb_commit(struct prb_reserved_entry *e)
|
||||
*/
|
||||
void prb_final_commit(struct prb_reserved_entry *e)
|
||||
{
|
||||
struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
|
||||
|
||||
_prb_commit(e, desc_finalized);
|
||||
|
||||
/* Best effort to remember the last finalized @id. */
|
||||
atomic_long_set(&desc_ring->last_finalized_id, e->id);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2007,9 +2018,39 @@ u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
|
||||
*/
|
||||
u64 prb_next_seq(struct printk_ringbuffer *rb)
|
||||
{
|
||||
u64 seq = 0;
|
||||
struct prb_desc_ring *desc_ring = &rb->desc_ring;
|
||||
enum desc_state d_state;
|
||||
unsigned long id;
|
||||
u64 seq;
|
||||
|
||||
/* Search forward from the oldest descriptor. */
|
||||
/* Check if the cached @id still points to a valid @seq. */
|
||||
id = atomic_long_read(&desc_ring->last_finalized_id);
|
||||
d_state = desc_read(desc_ring, id, NULL, &seq, NULL);
|
||||
|
||||
if (d_state == desc_finalized || d_state == desc_reusable) {
|
||||
/*
|
||||
* Begin searching after the last finalized record.
|
||||
*
|
||||
* On 0, the search must begin at 0 because of hack#2
|
||||
* of the bootstrapping phase it is not known if a
|
||||
* record at index 0 exists.
|
||||
*/
|
||||
if (seq != 0)
|
||||
seq++;
|
||||
} else {
|
||||
/*
|
||||
* The information about the last finalized sequence number
|
||||
* has gone. It should happen only when there is a flood of
|
||||
* new messages and the ringbuffer is rapidly recycled.
|
||||
* Give up and start from the beginning.
|
||||
*/
|
||||
seq = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The information about the last finalized @seq might be inaccurate.
|
||||
* Search forward to find the current one.
|
||||
*/
|
||||
while (_prb_read_valid(rb, &seq, NULL, NULL))
|
||||
seq++;
|
||||
|
||||
@@ -2046,6 +2087,7 @@ void prb_init(struct printk_ringbuffer *rb,
|
||||
rb->desc_ring.infos = infos;
|
||||
atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
|
||||
atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
|
||||
atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits));
|
||||
|
||||
rb->text_data_ring.size_bits = textbits;
|
||||
rb->text_data_ring.data = text_buf;
|
||||
|
@@ -75,6 +75,7 @@ struct prb_desc_ring {
|
||||
struct printk_info *infos;
|
||||
atomic_long_t head_id;
|
||||
atomic_long_t tail_id;
|
||||
atomic_long_t last_finalized_id;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -258,6 +259,7 @@ static struct printk_ringbuffer name = { \
|
||||
.infos = &_##name##_infos[0], \
|
||||
.head_id = ATOMIC_INIT(DESC0_ID(descbits)), \
|
||||
.tail_id = ATOMIC_INIT(DESC0_ID(descbits)), \
|
||||
.last_finalized_id = ATOMIC_INIT(DESC0_ID(descbits)), \
|
||||
}, \
|
||||
.text_data_ring = { \
|
||||
.size_bits = (avgtextbits) + (descbits), \
|
||||
|
@@ -813,7 +813,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
|
||||
*/
|
||||
void synchronize_rcu_expedited(void)
|
||||
{
|
||||
bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
|
||||
bool no_wq;
|
||||
struct rcu_exp_work rew;
|
||||
struct rcu_node *rnp;
|
||||
unsigned long s;
|
||||
@@ -838,9 +838,15 @@ void synchronize_rcu_expedited(void)
|
||||
if (exp_funnel_lock(s))
|
||||
return; /* Someone else did our work for us. */
|
||||
|
||||
/* Don't use workqueue during boot or from an incoming CPU. */
|
||||
preempt_disable();
|
||||
no_wq = rcu_scheduler_active == RCU_SCHEDULER_INIT ||
|
||||
!cpumask_test_cpu(smp_processor_id(), cpu_active_mask);
|
||||
preempt_enable();
|
||||
|
||||
/* Ensure that load happens before action based on it. */
|
||||
if (unlikely(boottime)) {
|
||||
/* Direct call during scheduler init and early_initcalls(). */
|
||||
if (unlikely(no_wq)) {
|
||||
/* Direct call for scheduler init, early_initcall()s, and incoming CPUs. */
|
||||
rcu_exp_sel_wait_wake(s);
|
||||
} else {
|
||||
/* Marshall arguments & schedule the expedited grace period. */
|
||||
@@ -858,7 +864,7 @@ void synchronize_rcu_expedited(void)
|
||||
/* Let the next expedited grace period start. */
|
||||
mutex_unlock(&rcu_state.exp_mutex);
|
||||
|
||||
if (likely(!boottime))
|
||||
if (likely(!no_wq))
|
||||
destroy_work_on_stack(&rew.rew_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
|
||||
|
@@ -531,16 +531,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
}
|
||||
|
||||
/* Unboost if we were boosted. */
|
||||
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
|
||||
rt_mutex_futex_unlock(&rnp->boost_mtx);
|
||||
|
||||
/*
|
||||
* If this was the last task on the expedited lists,
|
||||
* then we need to report up the rcu_node hierarchy.
|
||||
*/
|
||||
if (!empty_exp && empty_exp_now)
|
||||
rcu_report_exp_rnp(rnp, true);
|
||||
|
||||
/* Unboost if we were boosted. */
|
||||
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
|
||||
rt_mutex_futex_unlock(&rnp->boost_mtx);
|
||||
} else {
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@@ -4617,7 +4617,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
||||
|
||||
if (cfs_rq->nr_running > 1)
|
||||
check_preempt_tick(cfs_rq, curr);
|
||||
trace_android_rvh_entity_tick(cfs_rq, curr);
|
||||
}
|
||||
|
||||
|
||||
@@ -7076,13 +7075,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
|
||||
int scale = cfs_rq->nr_running >= sched_nr_latency;
|
||||
int next_buddy_marked = 0;
|
||||
bool preempt = false, nopreempt = false;
|
||||
bool ignore = false;
|
||||
|
||||
if (unlikely(se == pse))
|
||||
return;
|
||||
trace_android_rvh_check_preempt_wakeup_ignore(curr, &ignore);
|
||||
if (ignore)
|
||||
return;
|
||||
|
||||
/*
|
||||
* This is possible from callers such as attach_tasks(), in which we
|
||||
|
@@ -20,8 +20,6 @@
|
||||
#include <linux/user_namespace.h>
|
||||
#include <linux/proc_ns.h>
|
||||
|
||||
#include <trace/hooks/user.h>
|
||||
|
||||
/*
|
||||
* userns count is 1 for root user, 1 for init_uts_ns,
|
||||
* and 1 for... ?
|
||||
@@ -141,7 +139,6 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
|
||||
static void free_user(struct user_struct *up, unsigned long flags)
|
||||
__releases(&uidhash_lock)
|
||||
{
|
||||
trace_android_vh_free_user(up);
|
||||
uid_hash_remove(up);
|
||||
spin_unlock_irqrestore(&uidhash_lock, flags);
|
||||
kmem_cache_free(uid_cachep, up);
|
||||
@@ -193,7 +190,6 @@ struct user_struct *alloc_uid(kuid_t uid)
|
||||
|
||||
new->uid = uid;
|
||||
refcount_set(&new->__count, 1);
|
||||
trace_android_vh_alloc_uid(new);
|
||||
ratelimit_state_init(&new->ratelimit, HZ, 100);
|
||||
ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
|
||||
|
||||
|
@@ -55,7 +55,6 @@
|
||||
#include "workqueue_internal.h"
|
||||
|
||||
#include <trace/hooks/wqlockup.h>
|
||||
#include <trace/hooks/workqueue.h>
|
||||
/* events/workqueue.h uses default TRACE_INCLUDE_PATH */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
|
||||
@@ -1959,7 +1958,6 @@ static struct worker *create_worker(struct worker_pool *pool)
|
||||
if (IS_ERR(worker->task))
|
||||
goto fail;
|
||||
|
||||
trace_android_vh_create_worker(worker, pool->attrs);
|
||||
set_user_nice(worker->task, pool->attrs->nice);
|
||||
kthread_bind_mask(worker->task, pool->attrs->cpumask);
|
||||
|
||||
|
@@ -4780,7 +4780,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
int no_progress_loops;
|
||||
unsigned int cpuset_mems_cookie;
|
||||
int reserve_flags;
|
||||
unsigned long alloc_start = jiffies;
|
||||
|
||||
/*
|
||||
* We also sanity check to catch abuse of atomic reserves being used by
|
||||
* callers that are not in atomic context.
|
||||
@@ -5022,7 +5022,6 @@ fail:
|
||||
warn_alloc(gfp_mask, ac->nodemask,
|
||||
"page allocation failure: order:%u", order);
|
||||
got_pg:
|
||||
trace_android_vh_alloc_pages_slowpath(gfp_mask, order, alloc_start);
|
||||
return page;
|
||||
}
|
||||
|
||||
|
@@ -950,7 +950,6 @@ static void print_slabinfo_header(struct seq_file *m)
|
||||
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
|
||||
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
|
||||
#endif
|
||||
trace_android_vh_print_slabinfo_header(m);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
@@ -986,7 +985,6 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m)
|
||||
seq_printf(m, " : slabdata %6lu %6lu %6lu",
|
||||
sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
|
||||
slabinfo_show_stats(m, s);
|
||||
trace_android_vh_cache_show(m, &sinfo, s);
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
|
||||
|
@@ -451,8 +451,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
||||
: SHRINK_BATCH;
|
||||
long scanned = 0, next_deferred;
|
||||
|
||||
trace_android_vh_do_shrink_slab(shrinker, shrinkctl, priority);
|
||||
|
||||
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
|
||||
nid = 0;
|
||||
|
||||
|
@@ -136,7 +136,6 @@
|
||||
|
||||
#include <trace/events/sock.h>
|
||||
#include <trace/hooks/sched.h>
|
||||
#include <trace/hooks/net.h>
|
||||
|
||||
#include <net/tcp.h>
|
||||
#include <net/busy_poll.h>
|
||||
@@ -1696,8 +1695,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
||||
if (security_sk_alloc(sk, family, priority))
|
||||
goto out_free;
|
||||
|
||||
trace_android_rvh_sk_alloc(sk);
|
||||
|
||||
if (!try_module_get(prot->owner))
|
||||
goto out_free_sec;
|
||||
sk_tx_queue_clear(sk);
|
||||
@@ -1707,7 +1704,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
|
||||
|
||||
out_free_sec:
|
||||
security_sk_free(sk);
|
||||
trace_android_rvh_sk_free(sk);
|
||||
out_free:
|
||||
if (slab != NULL)
|
||||
kmem_cache_free(slab, sk);
|
||||
@@ -1727,7 +1723,6 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
|
||||
cgroup_sk_free(&sk->sk_cgrp_data);
|
||||
mem_cgroup_sk_free(sk);
|
||||
security_sk_free(sk);
|
||||
trace_android_rvh_sk_free(sk);
|
||||
if (slab != NULL)
|
||||
kmem_cache_free(slab, sk);
|
||||
else
|
||||
|
@@ -1496,8 +1496,6 @@ __nf_conntrack_alloc(struct net *net,
|
||||
|
||||
nf_ct_zone_add(ct, zone);
|
||||
|
||||
trace_android_rvh_nf_conn_alloc(ct);
|
||||
|
||||
/* Because we use RCU lookups, we set ct_general.use to zero before
|
||||
* this is inserted in any list.
|
||||
*/
|
||||
@@ -1530,7 +1528,6 @@ void nf_conntrack_free(struct nf_conn *ct)
|
||||
nf_ct_ext_destroy(ct);
|
||||
kmem_cache_free(nf_conntrack_cachep, ct);
|
||||
smp_mb__before_atomic();
|
||||
trace_android_rvh_nf_conn_free(ct);
|
||||
atomic_dec(&net->ct.count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_conntrack_free);
|
||||
|
@@ -4409,6 +4409,7 @@ static int sysfs_test(const char *mount_dir)
|
||||
int fd = -1;
|
||||
int pid = -1;
|
||||
char buffer[32];
|
||||
char *null_buf = NULL;
|
||||
int status;
|
||||
struct incfs_per_uid_read_timeouts purt_set[] = {
|
||||
{
|
||||
@@ -4437,13 +4438,13 @@ static int sysfs_test(const char *mount_dir)
|
||||
TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
|
||||
TESTEQUAL(ioctl_test_last_error(cmd_fd, NULL, 0, 0), 0);
|
||||
TESTEQUAL(sysfs_test_value("reads_failed_timed_out", 0), 0);
|
||||
TEST(read(fd, NULL, 1), -1);
|
||||
TESTEQUAL(read(fd, null_buf, 1), -1);
|
||||
TESTEQUAL(ioctl_test_last_error(cmd_fd, &file.id, 0, -ETIME), 0);
|
||||
TESTEQUAL(sysfs_test_value("reads_failed_timed_out", 2), 0);
|
||||
|
||||
TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
|
||||
TESTEQUAL(sysfs_test_value("reads_failed_hash_verification", 0), 0);
|
||||
TESTEQUAL(read(fd, NULL, 1), -1);
|
||||
TESTEQUAL(read(fd, null_buf, 1), -1);
|
||||
TESTEQUAL(sysfs_test_value("reads_failed_hash_verification", 1), 0);
|
||||
TESTSYSCALL(close(fd));
|
||||
fd = -1;
|
||||
|
Reference in New Issue
Block a user