Merge branch 'android12-5.10' into android12-5.10-lts

Sync up with android12-5.10 for the following commits:

976d98e9aa ANDROID: ABI: Add page_pinner_inited into symbols list
16c2b1d94f ANDROID: page_pinner: prevent pp_buffer access before initialization
cd1d9c42a2 UPSTREAM: hwrng: virtio - add an internal buffer
05fa7d8eee ANDROID: fix ABI by undoing atomic64_t -> u64 type conversion
cda90416c0 UPSTREAM: net: retrieve netns cookie via getsocketopt
78a559e2a9 UPSTREAM: net: initialize net->net_cookie at netns setup
fb0cece721 Merge tag 'android12-5.10.168_r00' into android12-5.10
989d4c69a9 UPSTREAM: ext4: fix another off-by-one fsmap error on 1k block filesystems
b0d829f27f UPSTREAM: ext4: block range must be validated before use in ext4_mb_clear_bb()
0301fe419a UPSTREAM: ext4: add strict range checks while freeing blocks
1d4b2a4ad7 UPSTREAM: ext4: add ext4_sb_block_valid() refactored out of ext4_inode_block_valid()
8ddbd3df93 UPSTREAM: ext4: refactor ext4_free_blocks() to pull out ext4_mb_clear_bb()
370cb1c270 UPSTREAM: usb: dwc3: core: do not use 3.0 clock when operating in 2.0 mode
eb53a59b4d ANDROID: GKI: rockchip: Add symbols for clk api
a13e8447e8 BACKPORT: arm64: mte: move register initialization to C
eddac45546 UPSTREAM: rcu: Remove __read_mostly annotations from rcu_scheduler_active externs
afff17f583 ANDROID: GKI: Update symbol list for mtk
62f5fae173 UPSTREAM: ext4: refuse to create ea block when umounted
33245a0eac UPSTREAM: ext4: optimize ea_inode block expansion
09e5cc649d UPSTREAM: ext4: allocate extended attribute value in vmalloc area
8926771f7e UPSTREAM: usb: gadget: composite: Draw 100mA current if not configured
87a065fb94 UPSTREAM: usb: dwc3: gadget: Change condition for processing suspend event
26638f8e54 ANDROID: GKI: update xiaomi symbol list
193b312b2f UPSTREAM: net/sched: tcindex: update imperfect hash filters respecting rcu
9a1be9a472 FROMGIT: KVM: arm64: Ignore kvm-arm.mode if !is_hyp_mode_available()
dbcd8cb535 UPSTREAM: KVM: arm64: Allow KVM to be disabled from the command line
631630d75f ANDROID: ABI: Cuttlefish Symbol update
278dfb09d7 Revert "ANDROID: dma-ops: Add restricted vendor hook"
c2e3f757d3 UPSTREAM: io_uring: ensure that io_init_req() passes in the right issue_flags
9abdacf47f FROMGIT: usb: gadget: configfs: Restrict symlink creation is UDC already binded
d415c6e56f UPSTREAM: io_uring: add missing lock in io_get_file_fixed
52cc662810 ANDROID: ABI: Update oplus symbol list
d01f7e1269 ANDROID: vendor_hooks: Add hooks for mutex and rwsem optimistic spin
d4d05c6e6e ANDROID: dma-buf: heaps: Don't lock unused dmabuf_page_pool mutex
1d05213028 ANDROID: mm/filemap: Fix missing put_page() for speculative page fault
fda8a58faa UPSTREAM: KVM: VMX: Execute IBPB on emulated VM-exit when guest has IBRS
5692e2bb4e UPSTREAM: net: qrtr: combine nameservice into main module
4b9d11ae5f ANDROID: GKI: Update symbol list for mtk
b086cc7361 FROMLIST: rcu-tasks: Fix build error
7fd4fbe615 ANDROID: incremental fs: Move throttling to outside page lock
5d9b0e83e3 ANDROID: incremental fs: Fix race between truncate and write last block
6a8037d4eb UPSTREAM: usb: gadget: u_serial: Add null pointer check in gserial_resume
f0be4b9779 Revert "ANDROID: GKI: loadavg: Export for get_avenrun"
781e1c83ef ANDROID: ABI: Update allowed list for QCOM
579f8bf863 ANDROID: Update symbol list for mtk
80b27def69 UPSTREAM: ext4: add inode table check in __ext4_get_inode_loc to aovid possible infinite loop
a4d6d4d1e7 UPSTREAM: net_sched: reject TCF_EM_SIMPLE case for complex ematch module
fb952695c8 UPSTREAM: io_uring/rw: remove leftover debug statement
ca331f289a UPSTREAM: io_uring/rw: ensure kiocb_end_write() is always called
d54d41716d UPSTREAM: io_uring: fix double poll leak on repolling
fc978be7b2 UPSTREAM: io_uring: Clean up a false-positive warning from GCC 9.3.0
827f8fcb29 UPSTREAM: io_uring/net: fix fast_iov assignment in io_setup_async_msg()
403642c036 UPSTREAM: io_uring: io_kiocb_update_pos() should not touch file for non -1 offset
0c50a117bf UPSTREAM: io_uring/rw: defer fsnotify calls to task context
b29c357309 UPSTREAM: io_uring: do not recalculate ppos unnecessarily
84e34d2ef5 UPSTREAM: io_uring: update kiocb->ki_pos at execution time
b543e0d210 UPSTREAM: io_uring: remove duplicated calls to io_kiocb_ppos
9166f5418a UPSTREAM: io_uring: ensure that cached task references are always put on exit
fee5372abf UPSTREAM: io_uring: fix CQ waiting timeout handling
a4d056e350 UPSTREAM: io_uring: lock overflowing for IOPOLL
0dfe72e890 UPSTREAM: io_uring: check for valid register opcode earlier
1b735b5eb2 UPSTREAM: io_uring: fix async accept on O_NONBLOCK sockets
63bf975936 UPSTREAM: io_uring: allow re-poll if we made progress
a64d6ea01b UPSTREAM: io_uring: support MSG_WAITALL for IORING_OP_SEND(MSG)
cf7ef78842 UPSTREAM: io_uring: add flag for disabling provided buffer recycling
45b2a34e21 UPSTREAM: io_uring: ensure recv and recvmsg handle MSG_WAITALL correctly
4b912a635e UPSTREAM: io_uring: improve send/recv error handling
ef0c71d0f1 UPSTREAM: io_uring: don't gate task_work run on TIF_NOTIFY_SIGNAL
1531e1fb8d BACKPORT: iommu: Avoid races around device probe
60944bdddc UPSTREAM: io_uring/io-wq: only free worker if it was allocated for creation
ac06912075 UPSTREAM: io_uring/io-wq: free worker if task_work creation is canceled
98a15feed0 UPSTREAM: io_uring: Fix unsigned 'res' comparison with zero in io_fixup_rw_res()
a234cc4e55 UPSTREAM: um: Increase stack frame size threshold for signal.c
d40d310e5e ANDROID: GKI: Enable ARM64_ERRATUM_2454944
9d2ec2e0b6 ANDROID: dma-ops: Add restricted vendor hook
3c75a6fb7f ANDROID: arm64: Work around Cortex-A510 erratum 2454944
865f370bf9 ANDROID: mm/vmalloc: Add override for lazy vunmap
1eb5992d60 ANDROID: cpuidle-psci: Fix suspicious RCU usage
d6b2899ce6 ANDROID: ABI: update allowed list for galaxy
3fcc69ca4d FROMGIT: f2fs: add sysfs nodes to set last_age_weight
899476c3af FROMGIT: f2fs: fix wrong calculation of block age
d0f788b8fa ANDROID: struct io_uring ABI preservation hack for 5.10.162 changes
fef924db72 ANDROID: fix up struct task_struct ABI change in 5.10.162
d369ac0b2a ANDROID: add flags variable back to struct proto_ops
5756328b3f UPSTREAM: io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups
72d1c48675 UPSTREAM: eventfd: provide a eventfd_signal_mask() helper
d7a47b29d5 UPSTREAM: eventpoll: add EPOLL_URING_WAKE poll wakeup flag
7c9f38c09b UPSTREAM: Revert "proc: don't allow async path resolution of /proc/self components"
498b35b3c4 UPSTREAM: Revert "proc: don't allow async path resolution of /proc/thread-self components"
4b17dea786 UPSTREAM: net: remove cmsg restriction from io_uring based send/recvmsg calls
d10f30da0d UPSTREAM: task_work: unconditionally run task_work from get_signal()
62822bf630 UPSTREAM: signal: kill JOBCTL_TASK_WORK
5e6347b586 UPSTREAM: io_uring: import 5.15-stable io_uring
518e02ed06 UPSTREAM: task_work: add helper for more targeted task_work canceling
86acb6a529 UPSTREAM: kernel: don't call do_exit() for PF_IO_WORKER threads
52f564e57b UPSTREAM: kernel: stop masking signals in create_io_thread()
bcb749b0b1 UPSTREAM: x86/process: setup io_threads more like normal user space threads
1f4eb35546 UPSTREAM: arch: ensure parisc/powerpc handle PF_IO_WORKER in copy_thread()
150dea15cb UPSTREAM: arch: setup PF_IO_WORKER threads like PF_KTHREAD
cf487d3c6a UPSTREAM: entry/kvm: Exit to user mode when TIF_NOTIFY_SIGNAL is set
6e4362caf9 UPSTREAM: kernel: allow fork with TIF_NOTIFY_SIGNAL pending
b25b8c55ba UPSTREAM: coredump: Limit what can interrupt coredumps
723de95c0c UPSTREAM: kernel: remove checking for TIF_NOTIFY_SIGNAL
8492c5dd3b UPSTREAM: task_work: remove legacy TWA_SIGNAL path
1987566815 UPSTREAM: alpha: fix TIF_NOTIFY_SIGNAL handling
ad4ba3038a UPSTREAM: ARC: unbork 5.11 bootup: fix snafu in _TIF_NOTIFY_SIGNAL handling
bb855b51a9 UPSTREAM: ia64: don't call handle_signal() unless there's actually a signal queued
7140fddd84 UPSTREAM: sparc: add support for TIF_NOTIFY_SIGNAL
c9c70c8cb6 UPSTREAM: riscv: add support for TIF_NOTIFY_SIGNAL
52a756bf17 UPSTREAM: nds32: add support for TIF_NOTIFY_SIGNAL
6eaa6653e4 UPSTREAM: ia64: add support for TIF_NOTIFY_SIGNAL
1dcd12493b UPSTREAM: h8300: add support for TIF_NOTIFY_SIGNAL
b265cdb085 UPSTREAM: c6x: add support for TIF_NOTIFY_SIGNAL
f4ece56973 UPSTREAM: alpha: add support for TIF_NOTIFY_SIGNAL
01af0730c9 UPSTREAM: xtensa: add support for TIF_NOTIFY_SIGNAL
29420dc96b UPSTREAM: arm: add support for TIF_NOTIFY_SIGNAL
6c3e852b4f UPSTREAM: microblaze: add support for TIF_NOTIFY_SIGNAL
8c81f539a0 UPSTREAM: hexagon: add support for TIF_NOTIFY_SIGNAL
175cc59b9c UPSTREAM: csky: add support for TIF_NOTIFY_SIGNAL
2b94543d45 UPSTREAM: openrisc: add support for TIF_NOTIFY_SIGNAL
e2e4fbbceb UPSTREAM: sh: add support for TIF_NOTIFY_SIGNAL
8548375354 UPSTREAM: um: add support for TIF_NOTIFY_SIGNAL
eae40ee91c UPSTREAM: s390: add support for TIF_NOTIFY_SIGNAL
8489c86344 UPSTREAM: mips: add support for TIF_NOTIFY_SIGNAL
b1f0e1159f UPSTREAM: powerpc: add support for TIF_NOTIFY_SIGNAL
98031aa870 UPSTREAM: parisc: add support for TIF_NOTIFY_SIGNAL
470c17bd71 UPSTREAM: nios32: add support for TIF_NOTIFY_SIGNAL
c5825095c4 UPSTREAM: m68k: add support for TIF_NOTIFY_SIGNAL
fcf75a019e UPSTREAM: arm64: add support for TIF_NOTIFY_SIGNAL
d6b63ac444 UPSTREAM: arc: add support for TIF_NOTIFY_SIGNAL
109ccff96d UPSTREAM: x86: Wire up TIF_NOTIFY_SIGNAL
862aa233e7 UPSTREAM: task_work: Use TIF_NOTIFY_SIGNAL if available
a14b028722 UPSTREAM: entry: Add support for TIF_NOTIFY_SIGNAL
00af4b88ad UPSTREAM: fs: provide locked helper variant of close_fd_get_file()
82c3becbef UPSTREAM: file: Rename __close_fd_get_file close_fd_get_file
98006a0a15 UPSTREAM: fs: make do_renameat2() take struct filename
661bc0f679 UPSTREAM: signal: Add task_sigpending() helper
13f03f5275 UPSTREAM: net: add accept helper not installing fd
af091af9db UPSTREAM: net: provide __sys_shutdown_sock() that takes a socket
9505ff1a81 UPSTREAM: tools headers UAPI: Sync openat2.h with the kernel sources
2507b99d9a UPSTREAM: fs: expose LOOKUP_CACHED through openat2() RESOLVE_CACHED
6b92128557 UPSTREAM: Make sure nd->path.mnt and nd->path.dentry are always valid pointers
eaf736aa71 UPSTREAM: fix handling of nd->depth on LOOKUP_CACHED failures in try_to_unlazy*
7928a1689b UPSTREAM: fs: add support for LOOKUP_CACHED
72d2f4c1cd UPSTREAM: saner calling conventions for unlazy_child()
ee44bd07c4 UPSTREAM: iov_iter: add helper to save iov_iter state
463a74a83b UPSTREAM: kernel: provide create_io_thread() helper
8e993eabeb UPSTREAM: net: loopback: use NET_NAME_PREDICTABLE for name_assign_type
4373e5def3 UPSTREAM: Bluetooth: L2CAP: Fix u8 overflow
5278199031 UPSTREAM: HID: uclogic: Add HID_QUIRK_HIDINPUT_FORCE quirk
fa335f5bb9 UPSTREAM: HID: ite: Enable QUIRK_TOUCHPAD_ON_OFF_REPORT on Acer Aspire Switch V 10
784df646aa UPSTREAM: HID: ite: Enable QUIRK_TOUCHPAD_ON_OFF_REPORT on Acer Aspire Switch 10E
29cde746b8 UPSTREAM: HID: ite: Add support for Acer S1002 keyboard-dock
228253f43f UPSTREAM: igb: Initialize mailbox message for VF reset
001a013e84 UPSTREAM: xhci: Apply XHCI_RESET_TO_DEFAULT quirk to ADL-N
4fa772e757 UPSTREAM: USB: serial: f81534: fix division by zero on line-speed change
d81b6e6e88 UPSTREAM: USB: serial: f81232: fix division by zero on line-speed change
190b01ac50 UPSTREAM: USB: serial: cp210x: add Kamstrup RF sniffer PIDs
34d4848ba3 UPSTREAM: USB: serial: option: add Quectel EM05-G modem
9e620f2b54 UPSTREAM: usb: gadget: uvc: Prevent buffer overflow in setup handler
a20fd832a4 BACKPORT: f2fs: do not allow to decompress files have FI_COMPRESS_RELEASED
16996773d6 BACKPORT: f2fs: handle decompress only post processing in softirq
ce72626280 BACKPORT: f2fs: introduce memory mode
246a996565 BACKPORT: f2fs: allow compression for mmap files in compress_mode=user
f069ba2b3d UPSTREAM: iommu/iova: Fix alloc iova overflows issue
a1806694fc UPSTREAM: media: dvb-core: Fix UAF due to refcount races at releasing
5f30de1dff ANDROID: GKI: Add Tuxera symbol list
e3a5b60c60 UPSTREAM: usb: dwc3: gadget: Skip waiting for CMDACT cleared during endxfer
6b23440751 UPSTREAM: usb: dwc3: Increase DWC3 controller halt timeout
4091dff1ff UPSTREAM: usb: dwc3: Remove DWC3 locking during gadget suspend/resume
4fc3932857 UPSTREAM: usb: dwc3: Avoid unmapping USB requests if endxfer is not complete
19803140c0 UPSTREAM: usb: dwc3: gadget: Continue handling EP0 xfercomplete events
0bbc89c346 UPSTREAM: usb: dwc3: gadget: Synchronize IRQ between soft connect/disconnect
35cb147c38 UPSTREAM: usb: dwc3: gadget: Force sending delayed status during soft disconnect
5dc06419d8 UPSTREAM: usb: dwc3: Do not service EP0 and conndone events if soft disconnected
dd8418a59a UPSTREAM: efi: rt-wrapper: Add missing include
67884a649c UPSTREAM: arm64: efi: Execute runtime services from a dedicated stack
6bd9415d98 ANDROID: cpu: correct dl_cpu_busy() calls
9e2b4cc230 UPSTREAM: ALSA: pcm: Move rwsem lock inside snd_ctl_elem_read to prevent UAF
80cad52515 UPSTREAM: firmware: tegra: Reduce stack usage
79c4f55c94 UPSTREAM: scsi: bfa: Move a large struct from the stack onto the heap
e096145ac3 ANDROID: mm: page_pinner: ensure do_div() arguments matches with respect to type
e427004fad ANDROID: Revert "ANDROID: allmodconfig: disable WERROR"
8cf3c25495 FROMGIT: scsi: ufs: Modify Tactive time setting conditions
fc1490c621 UPSTREAM: remoteproc: core: Fix rproc->firmware free in rproc_set_firmware()
869cae6f25 UPSTREAM: usb: gadget: f_fs: Fix unbalanced spinlock in __ffs_ep0_queue_wait
56c8a40436 UPSTREAM: usb: gadget: f_hid: fix f_hidg lifetime vs cdev
e973de77ad UPSTREAM: usb: gadget: f_hid: optional SETUP/SET_REPORT mode
283eb356fd ANDROID: GKI: add symbol list file for honor
d30de90932 ANDROID: add TEST_MAPPING for net/, include/net
75d0665639 BACKPORT: arm64/bpf: Remove 128MB limit for BPF JIT programs

Change-Id: I111e3dafc40d4f06832e374fd10ae5984921dff5
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2023-03-24 10:31:49 +00:00
96 changed files with 4645 additions and 3081 deletions

View File

@@ -528,3 +528,8 @@ Contact: "Ping Xiong" <xiongping1@xiaomi.com>
Description: When DATA SEPARATION is on, it controls the age threshold to indicate Description: When DATA SEPARATION is on, it controls the age threshold to indicate
the data blocks as warm. By default it was initialized as 2621440 blocks the data blocks as warm. By default it was initialized as 2621440 blocks
(equals to 10GB). (equals to 10GB).
What: /sys/fs/f2fs/<disk>/last_age_weight
Date: January 2023
Contact: "Ping Xiong" <xiongping1@xiaomi.com>
Description: When DATA SEPARATION is on, it controls the weight of last data block age.

View File

@@ -15,6 +15,12 @@ Contact: Paul Lawrence <paullawrence@google.com>
Description: Reads 'supported'. Present if zstd compression is supported Description: Reads 'supported'. Present if zstd compression is supported
for data blocks. for data blocks.
What: /sys/fs/incremental-fs/features/bugfix_throttling
Date: January 2023
Contact: Paul Lawrence <paullawrence@google.com>
Description: Reads 'supported'. Present if the throttling lock bug is fixed
https://android-review.git.corp.google.com/c/kernel/common/+/2381827
What: /sys/fs/incremental-fs/instances/[name] What: /sys/fs/incremental-fs/instances/[name]
Date: April 2021 Date: April 2021
Contact: Paul Lawrence <paullawrence@google.com> Contact: Paul Lawrence <paullawrence@google.com>

View File

@@ -2344,6 +2344,8 @@
kvm-arm.mode= kvm-arm.mode=
[KVM,ARM] Select one of KVM/arm64's modes of operation. [KVM,ARM] Select one of KVM/arm64's modes of operation.
none: Forcefully disable KVM.
nvhe: Standard nVHE-based mode, without support for nvhe: Standard nVHE-based mode, without support for
protected guests. protected guests.

View File

@@ -300,6 +300,11 @@ inlinecrypt When possible, encrypt/decrypt the contents of encrypted
Documentation/block/inline-encryption.rst. Documentation/block/inline-encryption.rst.
atgc Enable age-threshold garbage collection, it provides high atgc Enable age-threshold garbage collection, it provides high
effectiveness and efficiency on background GC. effectiveness and efficiency on background GC.
memory=%s Control memory mode. This supports "normal" and "low" modes.
"low" mode is introduced to support low memory devices.
Because of the nature of low memory devices, in this mode, f2fs
will try to save memory sometimes by sacrificing performance.
"normal" mode is the default mode and same as before.
age_extent_cache Enable an age extent cache based on rb-tree. It records age_extent_cache Enable an age extent cache based on rb-tree. It records
data block update frequency of the extent per inode, in data block update frequency of the extent per inode, in
order to provide better temperature hints for data block order to provide better temperature hints for data block

View File

@@ -35,6 +35,9 @@ Features
/sys/fs/incremental-fs/features/zstd /sys/fs/incremental-fs/features/zstd
Reads 'supported'. Present if zstd compression is supported for data blocks. Reads 'supported'. Present if zstd compression is supported for data blocks.
/sys/fs/incremental-fs/features/bugfix_throttling
Reads 'supported'. Present if the throttling lock bug is fixed
Optional per mount Optional per mount
------------------ ------------------

File diff suppressed because it is too large Load Diff

View File

@@ -574,6 +574,7 @@
_snd_ctl_add_follower _snd_ctl_add_follower
_snd_pcm_stream_lock_irqsave _snd_pcm_stream_lock_irqsave
_totalram_pages _totalram_pages
_trace_android_vh_record_pcpu_rwsem_starttime
access_process_vm access_process_vm
ack_all_badblocks ack_all_badblocks
activate_task activate_task

View File

@@ -1294,6 +1294,7 @@
of_usb_host_tpl_support of_usb_host_tpl_support
page_endio page_endio
page_mapping page_mapping
page_pinner_inited
__page_pinner_migration_failed __page_pinner_migration_failed
panic panic
panic_notifier_list panic_notifier_list

View File

@@ -0,0 +1,3 @@
[abi_symbol_list]
__traceiter_android_rvh_dma_buf_stats_teardown
__tracepoint_android_rvh_dma_buf_stats_teardown

View File

@@ -34,6 +34,7 @@
__arch_copy_to_user __arch_copy_to_user
arch_timer_read_counter arch_timer_read_counter
arm64_const_caps_ready arm64_const_caps_ready
arm64_noalias_setup_dma_ops
arm64_use_ng_mappings arm64_use_ng_mappings
__arm_smccc_hvc __arm_smccc_hvc
__arm_smccc_smc __arm_smccc_smc
@@ -1309,6 +1310,7 @@
__netdev_alloc_skb __netdev_alloc_skb
netdev_err netdev_err
netdev_info netdev_info
netdev_notice
netdev_set_default_ethtool_ops netdev_set_default_ethtool_ops
netdev_warn netdev_warn
netif_carrier_off netif_carrier_off
@@ -1399,6 +1401,7 @@
of_get_next_child of_get_next_child
of_get_next_parent of_get_next_parent
of_get_parent of_get_parent
of_get_pci_domain_nr
of_get_phy_mode of_get_phy_mode
of_get_property of_get_property
of_get_regulator_init_data of_get_regulator_init_data
@@ -1471,6 +1474,11 @@
param_set_ulong param_set_ulong
pause_cpus pause_cpus
pci_bus_type pci_bus_type
pci_generic_config_read32
pci_generic_config_write32
pci_lock_rescan_remove
pci_pio_to_address
pci_unlock_rescan_remove
PDE_DATA PDE_DATA
__per_cpu_offset __per_cpu_offset
perf_event_create_kernel_counter perf_event_create_kernel_counter
@@ -2421,6 +2429,8 @@
usb_add_hcd usb_add_hcd
usb_add_phy_dev usb_add_phy_dev
usb_assign_descriptors usb_assign_descriptors
usb_autopm_get_interface_async
usb_autopm_put_interface_async
usb_composite_probe usb_composite_probe
usb_composite_unregister usb_composite_unregister
usb_copy_descriptors usb_copy_descriptors
@@ -2430,6 +2440,8 @@
usb_del_gadget_udc usb_del_gadget_udc
usb_deregister usb_deregister
usb_disabled usb_disabled
usb_driver_set_configuration
usb_enable_lpm
usb_ep_alloc_request usb_ep_alloc_request
usb_ep_autoconfig usb_ep_autoconfig
usb_ep_dequeue usb_ep_dequeue
@@ -2501,6 +2513,7 @@
usb_remove_function usb_remove_function
usb_remove_hcd usb_remove_hcd
usb_remove_phy usb_remove_phy
usb_reset_device
usb_role_switch_get usb_role_switch_get
usb_role_switch_get_drvdata usb_role_switch_get_drvdata
usb_role_switch_get_role usb_role_switch_get_role
@@ -2509,6 +2522,7 @@
usb_role_switch_unregister usb_role_switch_unregister
usb_speed_string usb_speed_string
usb_string_id usb_string_id
usb_wait_anchor_empty_timeout
__usecs_to_jiffies __usecs_to_jiffies
usleep_range usleep_range
uuid_null uuid_null

View File

@@ -2841,6 +2841,9 @@
__traceiter_android_vh_meminfo_proc_show __traceiter_android_vh_meminfo_proc_show
__traceiter_android_vh_alloc_pages_slowpath_begin __traceiter_android_vh_alloc_pages_slowpath_begin
__traceiter_android_vh_alloc_pages_slowpath_end __traceiter_android_vh_alloc_pages_slowpath_end
__traceiter_android_vh_mutex_can_spin_on_owner
__traceiter_android_vh_mutex_opt_spin_finish
__traceiter_android_vh_mutex_opt_spin_start
__traceiter_android_vh_mutex_unlock_slowpath __traceiter_android_vh_mutex_unlock_slowpath
__traceiter_android_vh_mutex_unlock_slowpath_end __traceiter_android_vh_mutex_unlock_slowpath_end
__traceiter_android_vh_mutex_wait_finish __traceiter_android_vh_mutex_wait_finish
@@ -2868,8 +2871,11 @@
__traceiter_android_vh_record_rwsem_lock_starttime __traceiter_android_vh_record_rwsem_lock_starttime
__traceiter_android_vh_record_pcpu_rwsem_starttime __traceiter_android_vh_record_pcpu_rwsem_starttime
__traceiter_android_vh_rmqueue __traceiter_android_vh_rmqueue
__traceiter_android_vh_rwsem_can_spin_on_owner
__traceiter_android_vh_rwsem_init __traceiter_android_vh_rwsem_init
__traceiter_android_vh_rwsem_mark_wake_readers __traceiter_android_vh_rwsem_mark_wake_readers
__traceiter_android_vh_rwsem_opt_spin_finish
__traceiter_android_vh_rwsem_opt_spin_start
__traceiter_android_vh_rwsem_set_owner __traceiter_android_vh_rwsem_set_owner
__traceiter_android_vh_rwsem_set_reader_owned __traceiter_android_vh_rwsem_set_reader_owned
__traceiter_android_vh_rwsem_up_read_end __traceiter_android_vh_rwsem_up_read_end
@@ -3085,6 +3091,9 @@
__tracepoint_android_vh_meminfo_proc_show __tracepoint_android_vh_meminfo_proc_show
__tracepoint_android_vh_alloc_pages_slowpath_begin __tracepoint_android_vh_alloc_pages_slowpath_begin
__tracepoint_android_vh_alloc_pages_slowpath_end __tracepoint_android_vh_alloc_pages_slowpath_end
__tracepoint_android_vh_mutex_can_spin_on_owner
__tracepoint_android_vh_mutex_opt_spin_finish
__tracepoint_android_vh_mutex_opt_spin_start
__tracepoint_android_vh_mutex_unlock_slowpath __tracepoint_android_vh_mutex_unlock_slowpath
__tracepoint_android_vh_mutex_unlock_slowpath_end __tracepoint_android_vh_mutex_unlock_slowpath_end
__tracepoint_android_vh_mutex_wait_finish __tracepoint_android_vh_mutex_wait_finish
@@ -3112,8 +3121,11 @@
__tracepoint_android_vh_record_rwsem_lock_starttime __tracepoint_android_vh_record_rwsem_lock_starttime
__tracepoint_android_vh_record_pcpu_rwsem_starttime __tracepoint_android_vh_record_pcpu_rwsem_starttime
__tracepoint_android_vh_rmqueue __tracepoint_android_vh_rmqueue
__tracepoint_android_vh_rwsem_can_spin_on_owner
__tracepoint_android_vh_rwsem_init __tracepoint_android_vh_rwsem_init
__tracepoint_android_vh_rwsem_mark_wake_readers __tracepoint_android_vh_rwsem_mark_wake_readers
__tracepoint_android_vh_rwsem_opt_spin_finish
__tracepoint_android_vh_rwsem_opt_spin_start
__tracepoint_android_vh_rwsem_set_owner __tracepoint_android_vh_rwsem_set_owner
__tracepoint_android_vh_rwsem_set_reader_owned __tracepoint_android_vh_rwsem_set_reader_owned
__tracepoint_android_vh_rwsem_up_read_end __tracepoint_android_vh_rwsem_up_read_end

View File

@@ -3045,6 +3045,7 @@
ww_mutex_lock ww_mutex_lock
ww_mutex_unlock ww_mutex_unlock
__xa_alloc __xa_alloc
__xa_alloc_cyclic
xa_destroy xa_destroy
xa_erase xa_erase
xa_find xa_find

View File

@@ -1638,6 +1638,10 @@
of_clk_add_hw_provider of_clk_add_hw_provider
of_clk_hw_simple_get of_clk_hw_simple_get
# required by clk-out.ko
__clk_hw_register_gate
of_clk_parent_fill
# required by clk-rk628.ko # required by clk-rk628.ko
devm_reset_controller_register devm_reset_controller_register
@@ -2705,6 +2709,7 @@
snd_soc_dapm_new_widgets snd_soc_dapm_new_widgets
# required by snd-soc-rockchip-i2s-tdm.ko # required by snd-soc-rockchip-i2s-tdm.ko
clk_has_parent
clk_is_match clk_is_match
pm_runtime_forbid pm_runtime_forbid
snd_pcm_stop_xrun snd_pcm_stop_xrun

View File

@@ -0,0 +1,248 @@
[abi_symbol_list]
add_to_page_cache_locked
__alloc_pages_nodemask
__arch_copy_from_user
__arch_copy_to_user
arm64_const_caps_ready
autoremove_wake_function
balance_dirty_pages_ratelimited
bcmp
bdev_read_only
__bforget
bio_add_page
bio_alloc_bioset
bio_associate_blkg
bio_put
__bitmap_weight
bit_waitqueue
blkdev_issue_discard
blkdev_issue_flush
blk_finish_plug
blk_start_plug
__blockdev_direct_IO
block_invalidatepage
block_is_partially_uptodate
__breadahead
__bread_gfp
__brelse
buffer_migrate_page
capable
capable_wrt_inode_uidgid
__cfi_slowpath
__check_object_size
clear_inode
clear_page_dirty_for_io
complete_and_exit
cpu_hwcap_keys
cpu_hwcaps
create_empty_buffers
current_umask
d_add
d_add_ci
d_instantiate
d_make_root
down_read
down_write
dput
drop_nlink
d_splice_alias
dump_stack
end_buffer_read_sync
end_page_writeback
errseq_set
failure_tracking
fiemap_fill_next_extent
fiemap_prep
filemap_fdatawait_range
filemap_fdatawrite
filemap_flush
__filemap_set_wb_err
filemap_write_and_wait_range
file_remove_privs
file_update_time
file_write_and_wait_range
finish_wait
flush_dcache_page
freezing_slow_path
fs_bio_set
generic_error_remove_page
generic_file_direct_write
generic_file_llseek
generic_file_mmap
generic_file_open
generic_file_read_iter
generic_file_splice_read
generic_fillattr
generic_perform_write
generic_read_dir
generic_write_checks
__getblk_gfp
gic_nonsecure_priorities
grab_cache_page_write_begin
iget5_locked
igrab
ihold
ilookup5
in_group_p
__init_rwsem
init_wait_entry
__init_waitqueue_head
inode_dio_wait
inode_init_once
inode_newsize_ok
inode_set_flags
__insert_inode_hash
invalidate_bdev
invalidate_mapping_pages
io_schedule
iov_iter_advance
iov_iter_alignment
iov_iter_get_pages
iput
is_bad_inode
iter_file_splice_write
iunique
jiffies
jiffies_to_msecs
kasan_flag_enabled
kfree
kill_block_super
__kmalloc
kmalloc_caches
kmem_cache_alloc
kmem_cache_alloc_trace
kmem_cache_create
kmem_cache_create_usercopy
kmem_cache_destroy
kmem_cache_free
krealloc
kthread_create_on_node
kthread_should_stop
kthread_stop
ktime_get_coarse_real_ts64
kvfree
__list_add_valid
__list_del_entry_valid
ll_rw_block
load_nls
load_nls_default
__lock_buffer
__lock_page
lru_cache_add
make_bad_inode
mark_buffer_dirty
mark_buffer_write_io_error
__mark_inode_dirty
mark_page_accessed
memcpy
memmove
memset
mktime64
mnt_drop_write_file
mnt_want_write_file
module_layout
mount_bdev
mpage_readahead
mpage_readpage
__msecs_to_jiffies
__mutex_init
mutex_lock
mutex_trylock
mutex_unlock
new_inode
notify_change
pagecache_get_page
page_cache_next_miss
page_cache_prev_miss
__page_pinner_migration_failed
pagevec_lookup_range_tag
__pagevec_release
__percpu_down_read
preempt_schedule
preempt_schedule_notrace
prepare_to_wait
prepare_to_wait_event
printk
__put_page
put_pages_list
___ratelimit
_raw_read_lock
_raw_read_lock_irqsave
_raw_read_unlock
_raw_read_unlock_irqrestore
_raw_spin_lock
_raw_spin_lock_irqsave
_raw_spin_unlock
_raw_spin_unlock_irqrestore
_raw_write_lock
_raw_write_lock_irqsave
_raw_write_unlock
_raw_write_unlock_irqrestore
rcuwait_wake_up
readahead_gfp_mask
read_cache_page
redirty_page_for_writepage
__refrigerator
register_filesystem
__remove_inode_hash
sb_min_blocksize
sb_set_blocksize
schedule
schedule_timeout_interruptible
seq_printf
setattr_prepare
set_freezable
set_nlink
set_page_dirty
__set_page_dirty_buffers
__set_page_dirty_nobuffers
set_user_nice
simple_strtol
simple_strtoul
simple_strtoull
sprintf
__stack_chk_fail
__stack_chk_guard
strchr
strcmp
strlen
strncasecmp
strncmp
strsep
strstr
submit_bh
submit_bio
__sync_dirty_buffer
sync_dirty_buffer
sync_filesystem
sync_inode_metadata
system_freezing_cnt
sys_tz
tag_pages_for_writeback
__test_set_page_writeback
time64_to_tm
_trace_android_vh_record_pcpu_rwsem_starttime
truncate_inode_pages
truncate_inode_pages_final
truncate_setsize
try_to_writeback_inodes_sb
unload_nls
unlock_buffer
unlock_new_inode
unlock_page
unregister_filesystem
up_read
up_write
vfree
vfs_fsync_range
__vmalloc
vsnprintf
vzalloc
__wait_on_buffer
wait_on_page_bit
wake_bit_function
__wake_up
wake_up_process
__warn_printk
write_inode_now
xa_load

View File

@@ -211,8 +211,12 @@
kstrndup kstrndup
kstrtobool kstrtobool
kstrtoint kstrtoint
kstrtoll
kstrtouint kstrtouint
kstrtoull kstrtoull
kthread_create_on_node
kthread_should_stop
kthread_stop
ktime_get ktime_get
ktime_get_mono_fast_ns ktime_get_mono_fast_ns
ktime_get_raw_ts64 ktime_get_raw_ts64
@@ -307,6 +311,10 @@
__per_cpu_offset __per_cpu_offset
perf_trace_buf_alloc perf_trace_buf_alloc
perf_trace_run_bpf_submit perf_trace_run_bpf_submit
platform_device_add
platform_device_alloc
platform_device_del
platform_device_put
platform_device_register_full platform_device_register_full
platform_device_unregister platform_device_unregister
__platform_driver_register __platform_driver_register
@@ -350,7 +358,6 @@
__rcu_read_unlock __rcu_read_unlock
refcount_warn_saturate refcount_warn_saturate
register_blkdev register_blkdev
register_netdev
register_netdevice register_netdevice
register_netdevice_notifier register_netdevice_notifier
register_pernet_device register_pernet_device
@@ -448,12 +455,14 @@
sscanf sscanf
__stack_chk_fail __stack_chk_fail
__stack_chk_guard __stack_chk_guard
strchr
strcmp strcmp
strcpy strcpy
strlcpy strlcpy
strlen strlen
strncmp strncmp
strncpy strncpy
strscpy
strsep strsep
submit_bio submit_bio
__sw_hweight16 __sw_hweight16
@@ -465,7 +474,6 @@
synchronize_rcu synchronize_rcu
sysfs_create_group sysfs_create_group
sysfs_create_link sysfs_create_link
__sysfs_match_string
sysfs_remove_group sysfs_remove_group
sysfs_remove_link sysfs_remove_link
system_freezable_wq system_freezable_wq
@@ -514,8 +522,11 @@
usleep_range usleep_range
vabits_actual vabits_actual
vfree vfree
virtio_break_device
virtio_check_driver_offered_feature virtio_check_driver_offered_feature
virtio_config_changed virtio_config_changed
virtio_device_freeze
virtio_device_restore
virtqueue_add_inbuf virtqueue_add_inbuf
virtqueue_add_outbuf virtqueue_add_outbuf
virtqueue_add_sgs virtqueue_add_sgs
@@ -531,12 +542,14 @@
virtqueue_kick virtqueue_kick
virtqueue_kick_prepare virtqueue_kick_prepare
virtqueue_notify virtqueue_notify
vmalloc_to_page
vring_create_virtqueue vring_create_virtqueue
vring_del_virtqueue vring_del_virtqueue
vring_interrupt vring_interrupt
vring_transport_features vring_transport_features
wait_for_completion wait_for_completion
__wake_up __wake_up
wake_up_process
__warn_printk __warn_printk
# required by ambakmi.ko # required by ambakmi.ko
@@ -711,7 +724,6 @@
set_page_dirty set_page_dirty
# required by goldfish_sync.ko # required by goldfish_sync.ko
__close_fd
dma_fence_default_wait dma_fence_default_wait
dma_fence_free dma_fence_free
@@ -821,7 +833,6 @@
simple_attr_open simple_attr_open
simple_attr_release simple_attr_release
__skb_ext_put __skb_ext_put
skb_unlink
# required by md-mod.ko # required by md-mod.ko
ack_all_badblocks ack_all_badblocks
@@ -867,22 +878,12 @@
kernfs_put kernfs_put
kobject_del kobject_del
kobject_get kobject_get
kstrtoll
kthread_create_on_node
kthread_parkme kthread_parkme
kthread_should_park kthread_should_park
kthread_should_stop
kthread_stop
mempool_alloc
mempool_create mempool_create
mempool_destroy mempool_destroy
mempool_exit
mempool_free
mempool_init
mempool_kfree mempool_kfree
mempool_kmalloc mempool_kmalloc
part_end_io_acct
part_start_io_acct
percpu_ref_exit percpu_ref_exit
percpu_ref_init percpu_ref_init
percpu_ref_is_zero percpu_ref_is_zero
@@ -902,7 +903,6 @@
unregister_reboot_notifier unregister_reboot_notifier
unregister_sysctl_table unregister_sysctl_table
vfs_fsync vfs_fsync
wake_up_process
# required by nd_virtio.ko # required by nd_virtio.ko
bio_chain bio_chain
@@ -923,6 +923,7 @@
netdev_lower_state_changed netdev_lower_state_changed
netdev_pick_tx netdev_pick_tx
pci_bus_type pci_bus_type
register_netdev
# required by psmouse.ko # required by psmouse.ko
bus_register_notifier bus_register_notifier
@@ -969,16 +970,11 @@
cec_s_phys_addr cec_s_phys_addr
cec_transmit_attempt_done_ts cec_transmit_attempt_done_ts
cec_unregister_adapter cec_unregister_adapter
strscpy
wait_for_completion_timeout wait_for_completion_timeout
# required by rtc-test.ko # required by rtc-test.ko
add_timer add_timer
devm_rtc_allocate_device devm_rtc_allocate_device
platform_device_add
platform_device_alloc
platform_device_del
platform_device_put
__rtc_register_device __rtc_register_device
rtc_time64_to_tm rtc_time64_to_tm
rtc_tm_to_time64 rtc_tm_to_time64
@@ -1002,9 +998,9 @@
strcat strcat
# required by snd-hda-codec-generic.ko # required by snd-hda-codec-generic.ko
devm_led_classdev_register_ext led_classdev_register_ext
led_classdev_unregister
snd_ctl_boolean_stereo_info snd_ctl_boolean_stereo_info
strchr
strlcat strlcat
# required by snd-hda-codec.ko # required by snd-hda-codec.ko
@@ -1106,6 +1102,9 @@
compat_ptr_ioctl compat_ptr_ioctl
# required by usbip-core.ko # required by usbip-core.ko
iov_iter_kvec
param_ops_ulong
print_hex_dump
sock_recvmsg sock_recvmsg
# required by vcan.ko # required by vcan.ko
@@ -1128,9 +1127,27 @@
devm_mfd_add_devices devm_mfd_add_devices
# required by vhci-hcd.ko # required by vhci-hcd.ko
kernel_sendmsg
kernel_sock_shutdown kernel_sock_shutdown
platform_bus platform_bus
platform_device_add_data
sockfd_lookup sockfd_lookup
usb_add_hcd
usb_create_hcd
usb_create_shared_hcd
usb_disabled
usb_get_dev
usb_hcd_check_unlink_urb
usb_hcd_giveback_urb
usb_hcd_is_primary_hcd
usb_hcd_link_urb_to_ep
usb_hcd_poll_rh_status
usb_hcd_resume_root_hub
usb_hcd_unlink_urb_from_ep
usb_put_dev
usb_put_hcd
usb_remove_hcd
usb_speed_string
# required by virt_wifi.ko # required by virt_wifi.ko
__module_get __module_get
@@ -1259,7 +1276,6 @@
__traceiter_gpu_mem_total __traceiter_gpu_mem_total
__tracepoint_dma_fence_emit __tracepoint_dma_fence_emit
__tracepoint_gpu_mem_total __tracepoint_gpu_mem_total
vmalloc_to_page
vmemdup_user vmemdup_user
vm_get_page_prot vm_get_page_prot
ww_mutex_lock_interruptible ww_mutex_lock_interruptible
@@ -1309,6 +1325,7 @@
__blk_rq_map_sg __blk_rq_map_sg
set_capacity_revalidate_and_notify set_capacity_revalidate_and_notify
string_get_size string_get_size
__sysfs_match_string
virtio_max_dma_size virtio_max_dma_size
# required by virtio_console.ko # required by virtio_console.ko
@@ -1412,9 +1429,6 @@
pci_request_selected_regions pci_request_selected_regions
pci_vfs_assigned pci_vfs_assigned
synchronize_irq synchronize_irq
virtio_break_device
virtio_device_freeze
virtio_device_restore
# required by virtio_pmem.ko # required by virtio_pmem.ko
nvdimm_bus_register nvdimm_bus_register
@@ -1422,14 +1436,8 @@
nvdimm_pmem_region_create nvdimm_pmem_region_create
# required by virtio_snd.ko # required by virtio_snd.ko
snd_ctl_notify
snd_pcm_format_physical_width snd_pcm_format_physical_width
snd_pcm_lib_free_pages
snd_pcm_lib_ioctl snd_pcm_lib_ioctl
snd_pcm_lib_malloc_pages
snd_pcm_lib_preallocate_pages
_snd_pcm_stream_lock_irqsave
snd_pcm_stream_unlock_irqrestore
wait_for_completion_interruptible_timeout wait_for_completion_interruptible_timeout
# required by vmw_vsock_virtio_transport.ko # required by vmw_vsock_virtio_transport.ko
@@ -1489,9 +1497,27 @@
# required by zsmalloc.ko # required by zsmalloc.ko
dec_zone_page_state dec_zone_page_state
inc_zone_page_state inc_zone_page_state
__lock_page
page_mapping page_mapping
_raw_read_lock _raw_read_lock
_raw_read_unlock _raw_read_unlock
_raw_write_lock _raw_write_lock
_raw_write_unlock _raw_write_unlock
wait_on_page_bit
# preserved by --additions-only
__close_fd
devm_led_classdev_register_ext
__lock_page
mempool_alloc
mempool_exit
mempool_free
mempool_init
part_end_io_acct
part_start_io_acct
skb_unlink
snd_ctl_notify
snd_pcm_lib_free_pages
snd_pcm_lib_malloc_pages
snd_pcm_lib_preallocate_pages
_snd_pcm_stream_lock_irqsave
snd_pcm_stream_unlock_irqrestore

View File

@@ -208,3 +208,9 @@
__tracepoint_android_vh_alloc_pages_reclaim_bypass __tracepoint_android_vh_alloc_pages_reclaim_bypass
__traceiter_android_vh_alloc_pages_failure_bypass __traceiter_android_vh_alloc_pages_failure_bypass
__tracepoint_android_vh_alloc_pages_failure_bypass __tracepoint_android_vh_alloc_pages_failure_bypass
#required by us_prox.ko module
iio_trigger_alloc
__iio_trigger_register
iio_trigger_free
iio_trigger_unregister

View File

@@ -124,6 +124,8 @@
#define SO_DETACH_REUSEPORT_BPF 68 #define SO_DETACH_REUSEPORT_BPF 68
#define SO_NETNS_COOKIE 71
#if !defined(__KERNEL__) #if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 #if __BITS_PER_LONG == 64

View File

@@ -729,6 +729,29 @@ config ARM64_ERRATUM_2067961
If unsure, say Y. If unsure, say Y.
config ARM64_ERRATUM_2454944
bool "Cortex-A510: 2454944: Unmodified cache line might be written back to memory"
select ARCH_HAS_TEARDOWN_DMA_OPS
select RODATA_FULL_DEFAULT_ENABLED
help
This option adds the workaround for ARM Cortex-A510 erratum 2454944.
Affected Cortex-A510 core might write unmodified cache lines back to
memory, which breaks the assumptions upon which software coherency
management for non-coherent DMA relies. If a cache line is
speculatively fetched while a non-coherent device is writing directly
to DRAM, and subsequently written back by natural eviction, data
written by the device in the intervening period can be lost.
The workaround is to enforce as far as reasonably possible that all
non-coherent DMA transfers are bounced and/or remapped to minimise
the chance that any Cacheable alias exists through which speculative
cache fills could occur. To further improve effectiveness of
the workaround, lazy TLB flushing should be disabled.
This is quite involved and has unavoidable performance impact on
affected systems.
config ARM64_ERRATUM_2457168 config ARM64_ERRATUM_2457168
bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly" bool "Cortex-A510: 2457168: workaround for AMEVCNTR01 incrementing incorrectly"
depends on ARM64_AMU_EXTN depends on ARM64_AMU_EXTN

View File

@@ -51,6 +51,7 @@ CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_HISI=y CONFIG_ARCH_HISI=y
CONFIG_ARCH_QCOM=y CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SPRD=y CONFIG_ARCH_SPRD=y
CONFIG_ARM64_ERRATUM_2454944=y
CONFIG_SCHED_MC=y CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=32 CONFIG_NR_CPUS=32
CONFIG_PARAVIRT=y CONFIG_PARAVIRT=y

View File

@@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2022-2023 ARM Ltd.
*/
#ifndef __ASM_DMA_MAPPING_NOALIAS_H
#define __ASM_DMA_MAPPING_NOALIAS_H
#ifdef CONFIG_ARM64_ERRATUM_2454944
void arm64_noalias_setup_dma_ops(struct device *dev);
#else
static inline void arm64_noalias_setup_dma_ops(struct device *dev)
{
}
#endif
#endif /* __ASM_DMA_MAPPING_NOALIAS_H */

View File

@@ -22,15 +22,6 @@ struct exception_table_entry
#define ARCH_HAS_RELATIVE_EXTABLE #define ARCH_HAS_RELATIVE_EXTABLE
static inline bool in_bpf_jit(struct pt_regs *regs)
{
if (!IS_ENABLED(CONFIG_BPF_JIT))
return false;
return regs->pc >= BPF_JIT_REGION_START &&
regs->pc < BPF_JIT_REGION_END;
}
#ifdef CONFIG_BPF_JIT #ifdef CONFIG_BPF_JIT
int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
struct pt_regs *regs); struct pt_regs *regs);

View File

@@ -58,6 +58,7 @@
enum kvm_mode { enum kvm_mode {
KVM_MODE_DEFAULT, KVM_MODE_DEFAULT,
KVM_MODE_PROTECTED, KVM_MODE_PROTECTED,
KVM_MODE_NONE,
}; };
enum kvm_mode kvm_get_mode(void); enum kvm_mode kvm_get_mode(void);

View File

@@ -44,11 +44,8 @@
#define _PAGE_OFFSET(va) (-(UL(1) << (va))) #define _PAGE_OFFSET(va) (-(UL(1) << (va)))
#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS))
#define KIMAGE_VADDR (MODULES_END) #define KIMAGE_VADDR (MODULES_END)
#define BPF_JIT_REGION_START (KASAN_SHADOW_END)
#define BPF_JIT_REGION_SIZE (SZ_128M)
#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (BPF_JIT_REGION_END) #define MODULES_VADDR (KASAN_SHADOW_END)
#define MODULES_VSIZE (SZ_128M) #define MODULES_VSIZE (SZ_128M)
#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M) #define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M)
#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) #define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)

View File

@@ -40,7 +40,9 @@ void mte_sync_tags(pte_t *ptep, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom); void mte_copy_page_tags(void *kto, const void *kfrom);
void mte_thread_init_user(void); void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next); void mte_thread_switch(struct task_struct *next);
void mte_cpu_setup(void);
void mte_suspend_enter(void); void mte_suspend_enter(void);
void mte_suspend_exit(void);
long set_mte_ctrl(struct task_struct *task, unsigned long arg); long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task); long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request, int mte_ptrace_copy_tags(struct task_struct *child, long request,
@@ -69,6 +71,9 @@ static inline void mte_thread_switch(struct task_struct *next)
static inline void mte_suspend_enter(void) static inline void mte_suspend_enter(void)
{ {
} }
static inline void mte_suspend_exit(void)
{
}
static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg) static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{ {
return 0; return 0;

View File

@@ -1870,7 +1870,8 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
{ {
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0); sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
isb();
mte_cpu_setup();
/* /*
* Clear the tags in the zero page. This needs to be done via the * Clear the tags in the zero page. This needs to be done via the

View File

@@ -242,6 +242,49 @@ void mte_thread_switch(struct task_struct *next)
mte_check_tfsr_el1(); mte_check_tfsr_el1();
} }
void mte_cpu_setup(void)
{
u64 rgsr;
/*
* CnP must be enabled only after the MAIR_EL1 register has been set
* up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
* lead to the wrong memory type being used for a brief window during
* CPU power-up.
*
* CnP is not a boot feature so MTE gets enabled before CnP, but let's
* make sure that is the case.
*/
BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
/* Normal Tagged memory type at the corresponding MAIR index */
sysreg_clear_set(mair_el1,
MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
MT_NORMAL_TAGGED));
write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
/*
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
* RGSR_EL1.SEED must be non-zero for IRG to produce
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
* must initialize it.
*/
rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
SYS_RGSR_EL1_SEED_SHIFT;
if (rgsr == 0)
rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
write_sysreg_s(rgsr, SYS_RGSR_EL1);
/* clear any pending tag check faults in TFSR*_EL1 */
write_sysreg_s(0, SYS_TFSR_EL1);
write_sysreg_s(0, SYS_TFSRE0_EL1);
local_flush_tlb_all();
}
void mte_suspend_enter(void) void mte_suspend_enter(void)
{ {
if (!system_supports_mte()) if (!system_supports_mte())
@@ -258,6 +301,14 @@ void mte_suspend_enter(void)
mte_check_tfsr_el1(); mte_check_tfsr_el1();
} }
void mte_suspend_exit(void)
{
if (!system_supports_mte())
return;
mte_cpu_setup();
}
long set_mte_ctrl(struct task_struct *task, unsigned long arg) long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{ {
u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &

View File

@@ -42,6 +42,8 @@ void notrace __cpu_suspend_exit(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
mte_suspend_exit();
/* /*
* We are resuming from reset with the idmap active in TTBR0_EL1. * We are resuming from reset with the idmap active in TTBR0_EL1.
* We must uninstall the idmap and restore the expected MMU * We must uninstall the idmap and restore the expected MMU

View File

@@ -931,7 +931,7 @@ static struct break_hook bug_break_hook = {
static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr) static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
{ {
pr_err("%s generated an invalid instruction at %pS!\n", pr_err("%s generated an invalid instruction at %pS!\n",
in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching", "Kernel text patching",
(void *)instruction_pointer(regs)); (void *)instruction_pointer(regs));
/* We cannot handle this */ /* We cannot handle this */

View File

@@ -2061,6 +2061,11 @@ int kvm_arch_init(void *opaque)
return -ENODEV; return -ENODEV;
} }
if (kvm_get_mode() == KVM_MODE_NONE) {
kvm_info("KVM disabled from command line\n");
return -ENODEV;
}
in_hyp_mode = is_kernel_in_hyp_mode(); in_hyp_mode = is_kernel_in_hyp_mode();
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
@@ -2137,13 +2142,25 @@ static int __init early_kvm_mode_cfg(char *arg)
if (!arg) if (!arg)
return -EINVAL; return -EINVAL;
if (strcmp(arg, "none") == 0) {
kvm_mode = KVM_MODE_NONE;
return 0;
}
if (!is_hyp_mode_available()) {
pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n");
return 0;
}
if (strcmp(arg, "protected") == 0) { if (strcmp(arg, "protected") == 0) {
kvm_mode = KVM_MODE_PROTECTED; kvm_mode = KVM_MODE_PROTECTED;
return 0; return 0;
} }
if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
kvm_mode = KVM_MODE_DEFAULT;
return 0; return 0;
}
return -EINVAL; return -EINVAL;
} }

View File

@@ -13,3 +13,5 @@ KASAN_SANITIZE_physaddr.o += n
obj-$(CONFIG_KASAN) += kasan_init.o obj-$(CONFIG_KASAN) += kasan_init.o
KASAN_SANITIZE_kasan_init.o := n KASAN_SANITIZE_kasan_init.o := n
obj-$(CONFIG_ARM64_ERRATUM_2454944) += dma-mapping-noalias.o

View File

@@ -0,0 +1,576 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for uncached DMA mappings.
* Part of Cortex-A510 erratum 2454944 workaround.
*
* Copyright (C) 2022-2023 ARM Ltd.
* Author: Robin Murphy <robin.murphy@arm.com>
* Activating swiotlb + disabling lazy vunmap: Beata Michalska
*/
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/iommu.h>
#include <linux/slab.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
/*
* Bits [58:55] of the translation table descriptor are being reserved
* by the architecture for software use purposes. With the assumption that
* those should not be used on linear map addresses (which is not without
* any guarantee though), those bits are being leveraged to trace potential
* cacheable aliases. This is still far from being perfect, to say at least:
* ... categorically the worst, but oh well, needs must...
*/
#define REFCOUNT_INC BIT(55)
#define PTE_REFCOUNT(pte) (((pte) >> 55) & 0xf)
static int pte_set_nc(pte_t *ptep, unsigned long addr, void *data)
{
pteval_t old_pte, new_pte, pte;
unsigned int refcount;
pte = pte_val(READ_ONCE(*ptep));
do {
/* Avoid racing against the transient invalid state */
old_pte = pte | PTE_VALID;
new_pte = old_pte + REFCOUNT_INC;
refcount = PTE_REFCOUNT(pte);
if (WARN_ON(refcount == 15))
return -EINVAL;
if (refcount == 0) {
new_pte &= ~(PTE_ATTRINDX_MASK | PTE_VALID);
new_pte |= PTE_ATTRINDX(MT_NORMAL_NC);
}
pte = cmpxchg_relaxed(&pte_val(*ptep), old_pte, new_pte);
} while (pte != old_pte);
*(unsigned int *)data = refcount;
if (refcount)
return 0;
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
WRITE_ONCE(*ptep, __pte(new_pte | PTE_VALID));
return 0;
}
static int pte_clear_nc(pte_t *ptep, unsigned long addr, void *data)
{
pteval_t old_pte, new_pte, pte;
unsigned int refcount;
pte = pte_val(READ_ONCE(*ptep));
do {
old_pte = pte | PTE_VALID;
new_pte = old_pte - REFCOUNT_INC;
refcount = PTE_REFCOUNT(pte);
if (WARN_ON(refcount == 0))
return -EINVAL;
if (refcount == 1) {
new_pte &= ~(PTE_ATTRINDX_MASK | PTE_VALID);
new_pte |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
}
pte = cmpxchg_relaxed(&pte_val(*ptep), old_pte, new_pte);
} while (pte != old_pte);
if (refcount > 1)
return 0;
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
WRITE_ONCE(*ptep, __pte(new_pte | PTE_VALID));
return 0;
}
static int set_nc(void *addr, size_t size)
{
unsigned int count;
int ret = apply_to_existing_page_range(&init_mm, (unsigned long)addr,
size, pte_set_nc, &count);
WARN_RATELIMIT(count == 0 && page_mapped(virt_to_page(addr)),
"changing linear mapping but cacheable aliases may still exist\n");
dsb(ishst);
isb();
__flush_dcache_area(addr, size);
return ret;
}
static int clear_nc(void *addr, size_t size)
{
int ret = apply_to_existing_page_range(&init_mm, (unsigned long)addr,
size, pte_clear_nc, NULL);
dsb(ishst);
isb();
__inval_dcache_area(addr, size);
return ret;
}
static phys_addr_t __arm64_noalias_map(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir,
unsigned long attrs, bool bounce)
{
bounce = bounce || (phys | size) & ~PAGE_MASK;
if (bounce) {
phys = swiotlb_tbl_map_single(dev, phys, size, PAGE_ALIGN(size),
dir, attrs);
if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
}
if (set_nc(phys_to_virt(phys & PAGE_MASK), PAGE_ALIGN(size)))
goto out_unmap;
return phys;
out_unmap:
if (bounce)
swiotlb_tbl_unmap_single(dev, phys, size, PAGE_ALIGN(size), dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
}
static void __arm64_noalias_unmap(struct device *dev, phys_addr_t phys, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
clear_nc(phys_to_virt(phys & PAGE_MASK), PAGE_ALIGN(size));
if (is_swiotlb_buffer(phys))
swiotlb_tbl_unmap_single(dev, phys, size, PAGE_ALIGN(size), dir, attrs);
}
static void __arm64_noalias_sync_for_device(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir)
{
if (is_swiotlb_buffer(phys))
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);
else
arch_sync_dma_for_device(phys, size, dir);
}
static void __arm64_noalias_sync_for_cpu(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir)
{
if (is_swiotlb_buffer(phys))
swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
else
arch_sync_dma_for_cpu(phys, size, dir);
}
static void *arm64_noalias_alloc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp, unsigned long attrs)
{
struct page *page;
void *ret;
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;
size = PAGE_ALIGN(size);
page = dma_direct_alloc_pages(dev, size, dma_addr, 0, gfp & ~__GFP_ZERO);
if (!page)
return NULL;
ret = page_address(page);
if (set_nc(ret, size)) {
dma_direct_free_pages(dev, size, page, *dma_addr, 0);
return NULL;
}
return ret;
}
static void arm64_noalias_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
size = PAGE_ALIGN(size);
clear_nc(cpu_addr, size);
dma_direct_free_pages(dev, size, virt_to_page(cpu_addr), dma_addr, 0);
}
static dma_addr_t arm64_noalias_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
bool bounce = !dma_capable(dev, phys_to_dma(dev, phys), size, true);
if (!bounce && dir == DMA_TO_DEVICE) {
arch_sync_dma_for_device(phys, size, dir);
return phys_to_dma(dev, phys);
}
bounce = bounce || page_mapped(page);
phys = __arm64_noalias_map(dev, phys, size, dir, attrs, bounce);
if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
return phys_to_dma(dev, phys);
}
static void arm64_noalias_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
if (dir == DMA_TO_DEVICE)
return;
__arm64_noalias_unmap(dev, dma_to_phys(dev, dma_addr), size, dir, attrs);
}
static void arm64_noalias_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
if (dir == DMA_TO_DEVICE)
return;
for_each_sg(sgl, sg, nents, i)
__arm64_noalias_unmap(dev, dma_to_phys(dev, sg->dma_address),
sg->length, dir, attrs);
}
static int arm64_noalias_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = arm64_noalias_map_page(dev, sg_page(sg), sg->offset,
sg->length, dir, attrs);
if (sg->dma_address == DMA_MAPPING_ERROR)
goto out_unmap;
sg->dma_length = sg->length;
}
return nents;
out_unmap:
arm64_noalias_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
return 0;
}
static void arm64_noalias_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
__arm64_noalias_sync_for_device(dev, dma_to_phys(dev, addr), size, dir);
}
static void arm64_noalias_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
__arm64_noalias_sync_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
}
static void arm64_noalias_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
arm64_noalias_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
}
static void arm64_noalias_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i)
arm64_noalias_sync_single_for_cpu(dev, sg->dma_address, sg->length, dir);
}
static const struct dma_map_ops arm64_noalias_ops = {
.alloc = arm64_noalias_alloc,
.free = arm64_noalias_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.map_page = arm64_noalias_map_page,
.unmap_page = arm64_noalias_unmap_page,
.map_sg = arm64_noalias_map_sg,
.unmap_sg = arm64_noalias_unmap_sg,
.sync_single_for_cpu = arm64_noalias_sync_single_for_cpu,
.sync_single_for_device = arm64_noalias_sync_single_for_device,
.sync_sg_for_cpu = arm64_noalias_sync_sg_for_cpu,
.sync_sg_for_device = arm64_noalias_sync_sg_for_device,
.dma_supported = dma_direct_supported,
.get_required_mask = dma_direct_get_required_mask,
.max_mapping_size = swiotlb_max_mapping_size,
};
#ifdef CONFIG_IOMMU_DMA
static const struct dma_map_ops *iommu_dma_ops;
static void *arm64_iommu_alloc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp, unsigned long attrs)
{
struct page **pages;
void *ret;
int i;
size = PAGE_ALIGN(size);
if (!gfpflags_allow_blocking(gfp) || (attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
ret = dma_common_alloc_pages(dev, size, dma_addr, 0, gfp);
return ret ? page_address(ret) : NULL;
}
ret = iommu_dma_ops->alloc(dev, size, dma_addr, gfp, attrs);
if (ret) {
pages = dma_common_find_pages(ret);
for (i = 0; i < size / PAGE_SIZE; i++)
if (set_nc(page_address(pages[i]), PAGE_SIZE))
goto err;
}
return ret;
err:
while (i--)
clear_nc(page_address(pages[i]), PAGE_SIZE);
iommu_dma_ops->free(dev, size, ret, *dma_addr, attrs);
return NULL;
}
static void arm64_iommu_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
struct page **pages = dma_common_find_pages(cpu_addr);
int i;
size = PAGE_ALIGN(size);
if (!pages)
return dma_common_free_pages(dev, size, virt_to_page(cpu_addr), dma_addr, 0);
for (i = 0; i < size / PAGE_SIZE; i++)
clear_nc(page_address(pages[i]), PAGE_SIZE);
iommu_dma_ops->free(dev, size, cpu_addr, dma_addr, attrs);
}
static dma_addr_t arm64_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t ret;
if (dir == DMA_TO_DEVICE)
return iommu_dma_ops->map_page(dev, page, offset, size, dir, attrs);
phys = __arm64_noalias_map(dev, phys, size, dir, attrs, page_mapped(page));
if (phys == DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
ret = iommu_dma_ops->map_page(dev, phys_to_page(phys), offset_in_page(phys),
size, dir, attrs);
if (ret == DMA_MAPPING_ERROR)
__arm64_noalias_unmap(dev, phys, size, dir, attrs);
return ret;
}
static void arm64_iommu_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
phys_addr_t phys;
if (dir == DMA_TO_DEVICE)
return iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs);
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), addr);
iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
__arm64_noalias_unmap(dev, phys, size, dir, attrs);
}
static int arm64_iommu_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
int i, ret;
struct scatterlist *sg;
phys_addr_t *orig_phys;
if (dir == DMA_TO_DEVICE)
return iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs);
orig_phys = kmalloc_array(nents, sizeof(*orig_phys), GFP_ATOMIC);
if (!orig_phys)
return 0;
for_each_sg(sgl, sg, nents, i) {
phys_addr_t phys = sg_phys(sg);
/*
* Note we do not have the page_mapped() check here, since
* bouncing plays complete havoc with dma-buf imports. Those
* may well be mapped in userspace, but we hope and pray that
* it's via dma_mmap_attrs() so any such mappings are safely
* non-cacheable. DO NOT allow a block device or other similar
* scatterlist user to get here (disable IOMMUs if necessary),
* since we can't mitigate for both conflicting use-cases.
*/
phys = __arm64_noalias_map(dev, phys, sg->length, dir, attrs, false);
if (phys == DMA_MAPPING_ERROR)
goto out_unmap;
orig_phys[i] = sg_phys(sg);
sg_assign_page(sg, phys_to_page(phys));
sg->offset = offset_in_page(phys);
}
ret = iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
if (ret <= 0)
goto out_unmap;
for_each_sg(sgl, sg, nents, i) {
sg_assign_page(sg, phys_to_page(orig_phys[i]));
sg->offset = offset_in_page(orig_phys[i]);
}
kfree(orig_phys);
return ret;
out_unmap:
for_each_sg(sgl, sg, nents, i) {
__arm64_noalias_unmap(dev, sg_phys(sg), sg->length, dir, attrs);
sg_assign_page(sg, phys_to_page(orig_phys[i]));
sg->offset = offset_in_page(orig_phys[i]);
}
kfree(orig_phys);
return 0;
}
static void arm64_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
struct iommu_domain *domain;
struct scatterlist *sg, *tmp;
dma_addr_t iova;
int i;
if (dir == DMA_TO_DEVICE)
return iommu_dma_ops->unmap_sg(dev, sgl, nents, dir, attrs);
domain = iommu_get_dma_domain(dev);
iova = sgl->dma_address;
tmp = sgl;
for_each_sg(sgl, sg, nents, i) {
phys_addr_t phys = iommu_iova_to_phys(domain, iova);
__arm64_noalias_unmap(dev, phys, sg->length, dir, attrs);
iova += sg->length;
if (iova == tmp->dma_address + tmp->dma_length && !sg_is_last(tmp)) {
tmp = sg_next(tmp);
iova = tmp->dma_address;
}
}
iommu_dma_ops->unmap_sg(dev, sgl, nents, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
}
static void arm64_iommu_sync_single_for_device(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
phys_addr_t phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), addr);
__arm64_noalias_sync_for_device(dev, phys, size, dir);
}
static void arm64_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
phys_addr_t phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), addr);
__arm64_noalias_sync_for_cpu(dev, phys, size, dir);
}
static void arm64_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct scatterlist *sg, *tmp = sgl;
dma_addr_t iova = sgl->dma_address;
int i;
for_each_sg(sgl, sg, nents, i) {
phys_addr_t phys = iommu_iova_to_phys(domain, iova);
__arm64_noalias_sync_for_device(dev, phys, sg->length, dir);
iova += sg->length;
if (iova == tmp->dma_address + tmp->dma_length && !sg_is_last(tmp)) {
tmp = sg_next(tmp);
iova = tmp->dma_address;
}
}
}
static void arm64_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir)
{
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct scatterlist *sg, *tmp = sgl;
dma_addr_t iova = sgl->dma_address;
int i;
for_each_sg(sgl, sg, nents, i) {
phys_addr_t phys = iommu_iova_to_phys(domain, iova);
__arm64_noalias_sync_for_cpu(dev, phys, sg->length, dir);
iova += sg->length;
if (iova == tmp->dma_address + tmp->dma_length && !sg_is_last(tmp)) {
tmp = sg_next(tmp);
iova = tmp->dma_address;
}
}
}
static struct dma_map_ops arm64_iommu_ops = {
.alloc = arm64_iommu_alloc,
.free = arm64_iommu_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
.map_page = arm64_iommu_map_page,
.unmap_page = arm64_iommu_unmap_page,
.map_sg = arm64_iommu_map_sg,
.unmap_sg = arm64_iommu_unmap_sg,
.sync_single_for_cpu = arm64_iommu_sync_single_for_cpu,
.sync_single_for_device = arm64_iommu_sync_single_for_device,
.sync_sg_for_cpu = arm64_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm64_iommu_sync_sg_for_device,
};
#endif /* CONFIG_IOMMU_DMA */
static inline void arm64_noalias_prepare(void)
{
if (!is_swiotlb_active())
swiotlb_late_init_with_default_size(swiotlb_size_or_default());
if (lazy_vunmap_enable) {
lazy_vunmap_enable = false;
vm_unmap_aliases();
}
}
void arm64_noalias_setup_dma_ops(struct device *dev)
{
if (dev_is_dma_coherent(dev))
return;
dev_info(dev, "applying no-alias DMA workaround\n");
if (!dev->dma_ops) {
dev->dma_ops = &arm64_noalias_ops;
goto done;
}
if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
dev->dma_ops = &arm64_iommu_ops;
if (iommu_dma_ops)
goto done;
iommu_dma_ops = dev->dma_ops;
arm64_iommu_ops.mmap = iommu_dma_ops->mmap;
arm64_iommu_ops.get_sgtable = iommu_dma_ops->get_sgtable;
arm64_iommu_ops.map_resource = iommu_dma_ops->map_resource;
arm64_iommu_ops.unmap_resource = iommu_dma_ops->unmap_resource;
arm64_iommu_ops.get_merge_boundary = iommu_dma_ops->get_merge_boundary;
}
done:
arm64_noalias_prepare();
}
EXPORT_SYMBOL_GPL(arm64_noalias_setup_dma_ops);

View File

@@ -9,14 +9,19 @@
int fixup_exception(struct pt_regs *regs) int fixup_exception(struct pt_regs *regs)
{ {
const struct exception_table_entry *fixup; const struct exception_table_entry *fixup;
unsigned long addr;
fixup = search_exception_tables(instruction_pointer(regs)); addr = instruction_pointer(regs);
/* Search the BPF tables first, these are formatted differently */
fixup = search_bpf_extables(addr);
if (fixup)
return arm64_bpf_fixup_exception(fixup, regs);
fixup = search_exception_tables(addr);
if (!fixup) if (!fixup)
return 0; return 0;
if (in_bpf_jit(regs))
return arm64_bpf_fixup_exception(fixup, regs);
regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; regs->pc = (unsigned long)&fixup->fixup + fixup->fixup;
return 1; return 1;
} }

View File

@@ -47,17 +47,19 @@
#ifdef CONFIG_KASAN_HW_TAGS #ifdef CONFIG_KASAN_HW_TAGS
#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1 #define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#else #elif defined(CONFIG_ARM64_MTE)
/* /*
* The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
* TBI being enabled at EL1. * TBI being enabled at EL1.
*/ */
#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
#else
#define TCR_MTE_FLAGS 0
#endif #endif
/* /*
* Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
* changed during __cpu_setup to Normal Tagged if the system supports MTE. * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
*/ */
#define MAIR_EL1_SET \ #define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
@@ -427,47 +429,6 @@ SYM_FUNC_START(__cpu_setup)
* Memory region attributes * Memory region attributes
*/ */
mov_q x5, MAIR_EL1_SET mov_q x5, MAIR_EL1_SET
#ifdef CONFIG_ARM64_MTE
mte_tcr .req x20
mov mte_tcr, #0
/*
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
* (ID_AA64PFR1_EL1[11:8] > 1).
*/
mrs x10, ID_AA64PFR1_EL1
ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4
cmp x10, #ID_AA64PFR1_MTE
b.lt 1f
/* Normal Tagged memory type at the corresponding MAIR index */
mov x10, #MAIR_ATTR_NORMAL_TAGGED
bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8
mov x10, #KERNEL_GCR_EL1
msr_s SYS_GCR_EL1, x10
/*
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
* RGSR_EL1.SEED must be non-zero for IRG to produce
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
* must initialize it.
*/
mrs x10, CNTVCT_EL0
ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
csinc x10, x10, xzr, ne
lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
msr_s SYS_RGSR_EL1, x10
/* clear any pending tag check faults in TFSR*_EL1 */
msr_s SYS_TFSR_EL1, xzr
msr_s SYS_TFSRE0_EL1, xzr
/* set the TCR_EL1 bits */
mov_q mte_tcr, TCR_MTE_FLAGS
1:
#endif
msr mair_el1, x5 msr mair_el1, x5
/* /*
* Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further * Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further
@@ -475,11 +436,8 @@ SYM_FUNC_START(__cpu_setup)
*/ */
mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
#ifdef CONFIG_ARM64_MTE
orr x10, x10, mte_tcr
.unreq mte_tcr
#endif
tcr_clear_errata_bits x10, x9, x5 tcr_clear_errata_bits x10, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52

View File

@@ -41,8 +41,6 @@ static struct addr_marker address_markers[] = {
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
{ KASAN_SHADOW_END, "Kasan shadow end" }, { KASAN_SHADOW_END, "Kasan shadow end" },
#endif #endif
{ BPF_JIT_REGION_START, "BPF start" },
{ BPF_JIT_REGION_END, "BPF end" },
{ MODULES_VADDR, "Modules start" }, { MODULES_VADDR, "Modules start" },
{ MODULES_END, "Modules end" }, { MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() area" }, { VMALLOC_START, "vmalloc() area" },

View File

@@ -1148,15 +1148,12 @@ out:
u64 bpf_jit_alloc_exec_limit(void) u64 bpf_jit_alloc_exec_limit(void)
{ {
return BPF_JIT_REGION_SIZE; return VMALLOC_END - VMALLOC_START;
} }
void *bpf_jit_alloc_exec(unsigned long size) void *bpf_jit_alloc_exec(unsigned long size)
{ {
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START, return vmalloc(size);
BPF_JIT_REGION_END, GFP_KERNEL,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
} }
void bpf_jit_free_exec(void *addr) void bpf_jit_free_exec(void *addr)

View File

@@ -135,6 +135,8 @@
#define SO_DETACH_REUSEPORT_BPF 68 #define SO_DETACH_REUSEPORT_BPF 68
#define SO_NETNS_COOKIE 71
#if !defined(__KERNEL__) #if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 #if __BITS_PER_LONG == 64

View File

@@ -116,6 +116,8 @@
#define SO_DETACH_REUSEPORT_BPF 0x4042 #define SO_DETACH_REUSEPORT_BPF 0x4042
#define SO_NETNS_COOKIE 0x4045
#if !defined(__KERNEL__) #if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 #if __BITS_PER_LONG == 64

View File

@@ -117,6 +117,8 @@
#define SO_DETACH_REUSEPORT_BPF 0x0047 #define SO_DETACH_REUSEPORT_BPF 0x0047
#define SO_NETNS_COOKIE 0x0050
#if !defined(__KERNEL__) #if !defined(__KERNEL__)

View File

@@ -10,6 +10,8 @@ obj-y = execvp.o file.o helper.o irq.o main.o mem.o process.o \
registers.o sigio.o signal.o start_up.o time.o tty.o \ registers.o sigio.o signal.o start_up.o time.o tty.o \
umid.o user_syms.o util.o drivers/ skas/ umid.o user_syms.o util.o drivers/ skas/
CFLAGS_signal.o += -Wframe-larger-than=4096
obj-$(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA) += elf_aux.o obj-$(CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA) += elf_aux.o
USER_OBJS := $(user-objs-y) elf_aux.o execvp.o file.o helper.o irq.o \ USER_OBJS := $(user-objs-y) elf_aux.o execvp.o file.o helper.o irq.o \

View File

@@ -9,7 +9,6 @@ function update_config() {
-d CPU_BIG_ENDIAN \ -d CPU_BIG_ENDIAN \
-d DYNAMIC_FTRACE \ -d DYNAMIC_FTRACE \
-e UNWINDER_FRAME_POINTER \ -e UNWINDER_FRAME_POINTER \
-d WERROR \
(cd ${OUT_DIR} && \ (cd ${OUT_DIR} && \
make O=${OUT_DIR} $archsubarch CROSS_COMPILE=${CROSS_COMPILE} "${TOOL_ARGS[@]}" ${MAKE_ARGS} olddefconfig) make O=${OUT_DIR} $archsubarch CROSS_COMPILE=${CROSS_COMPILE} "${TOOL_ARGS[@]}" ${MAKE_ARGS} olddefconfig)

View File

@@ -19,6 +19,7 @@ android/abi_gki_aarch64_fips140
android/abi_gki_aarch64_galaxy android/abi_gki_aarch64_galaxy
android/abi_gki_aarch64_generic android/abi_gki_aarch64_generic
android/abi_gki_aarch64_hikey960 android/abi_gki_aarch64_hikey960
android/abi_gki_aarch64_honor
android/abi_gki_aarch64_imx android/abi_gki_aarch64_imx
android/abi_gki_aarch64_lenovo android/abi_gki_aarch64_lenovo
android/abi_gki_aarch64_mtk android/abi_gki_aarch64_mtk
@@ -31,6 +32,7 @@ android/abi_gki_aarch64_vivo
android/abi_gki_aarch64_xiaomi android/abi_gki_aarch64_xiaomi
android/abi_gki_aarch64_asus android/abi_gki_aarch64_asus
android/abi_gki_aarch64_transsion android/abi_gki_aarch64_transsion
android/abi_gki_aarch64_tuxera
" "
FILES="${FILES} FILES="${FILES}

View File

@@ -121,6 +121,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_finish); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_opt_spin_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_opt_spin_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_can_spin_on_owner);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_wait_start); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_wait_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_wait_finish); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_wait_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_wait_start); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_wait_start);
@@ -130,6 +133,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_set_reader_owned);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_mark_wake_readers); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_mark_wake_readers);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_up_read_end); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_up_read_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_up_write_end); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_up_write_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_opt_spin_start);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_opt_spin_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_can_spin_on_owner);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_show_task); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_show_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shmem_alloc_page); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shmem_alloc_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_enter); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_enter);

View File

@@ -18,13 +18,20 @@ static DEFINE_IDA(rng_index_ida);
struct virtrng_info { struct virtrng_info {
struct hwrng hwrng; struct hwrng hwrng;
struct virtqueue *vq; struct virtqueue *vq;
struct completion have_data;
char name[25]; char name[25];
unsigned int data_avail;
int index; int index;
bool busy; bool busy;
bool hwrng_register_done; bool hwrng_register_done;
bool hwrng_removed; bool hwrng_removed;
/* data transfer */
struct completion have_data;
unsigned int data_avail;
/* minimal size returned by rng_buffer_size() */
#if SMP_CACHE_BYTES < 32
u8 data[32];
#else
u8 data[SMP_CACHE_BYTES];
#endif
}; };
static void random_recv_done(struct virtqueue *vq) static void random_recv_done(struct virtqueue *vq)
@@ -39,14 +46,14 @@ static void random_recv_done(struct virtqueue *vq)
} }
/* The host will fill any buffer we give it with sweet, sweet randomness. */ /* The host will fill any buffer we give it with sweet, sweet randomness. */
static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size) static void register_buffer(struct virtrng_info *vi)
{ {
struct scatterlist sg; struct scatterlist sg;
sg_init_one(&sg, buf, size); sg_init_one(&sg, vi->data, sizeof(vi->data));
/* There should always be room for one buffer. */ /* There should always be room for one buffer. */
virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL); virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL);
virtqueue_kick(vi->vq); virtqueue_kick(vi->vq);
} }
@@ -55,6 +62,8 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
{ {
int ret; int ret;
struct virtrng_info *vi = (struct virtrng_info *)rng->priv; struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
unsigned int chunk;
size_t read;
if (vi->hwrng_removed) if (vi->hwrng_removed)
return -ENODEV; return -ENODEV;
@@ -62,19 +71,33 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
if (!vi->busy) { if (!vi->busy) {
vi->busy = true; vi->busy = true;
reinit_completion(&vi->have_data); reinit_completion(&vi->have_data);
register_buffer(vi, buf, size); register_buffer(vi);
} }
if (!wait) if (!wait)
return 0; return 0;
ret = wait_for_completion_killable(&vi->have_data); read = 0;
if (ret < 0) while (size != 0) {
return ret; ret = wait_for_completion_killable(&vi->have_data);
if (ret < 0)
return ret;
chunk = min_t(unsigned int, size, vi->data_avail);
memcpy(buf + read, vi->data, chunk);
read += chunk;
size -= chunk;
vi->data_avail = 0;
if (size != 0) {
reinit_completion(&vi->have_data);
register_buffer(vi);
}
}
vi->busy = false; vi->busy = false;
return vi->data_avail; return read;
} }
static void virtio_cleanup(struct hwrng *rng) static void virtio_cleanup(struct hwrng *rng)

View File

@@ -68,10 +68,12 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
if (ret) if (ret)
return -1; return -1;
trace_android_vh_cpuidle_psci_enter(dev, s2idle);
/* Do runtime PM to manage a hierarchical CPU toplogy. */ /* Do runtime PM to manage a hierarchical CPU toplogy. */
rcu_irq_enter_irqson(); rcu_irq_enter_irqson();
trace_android_vh_cpuidle_psci_enter(dev, s2idle);
if (s2idle) if (s2idle)
dev_pm_genpd_suspend(pd_dev); dev_pm_genpd_suspend(pd_dev);
else else
@@ -89,10 +91,11 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
dev_pm_genpd_resume(pd_dev); dev_pm_genpd_resume(pd_dev);
else else
pm_runtime_get_sync(pd_dev); pm_runtime_get_sync(pd_dev);
rcu_irq_exit_irqson();
trace_android_vh_cpuidle_psci_exit(dev, s2idle); trace_android_vh_cpuidle_psci_exit(dev, s2idle);
rcu_irq_exit_irqson();
cpu_pm_exit(); cpu_pm_exit();
/* Clear the domain state to start fresh when back from idle. */ /* Clear the domain state to start fresh when back from idle. */

View File

@@ -144,7 +144,6 @@ struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask, unsigned int or
pool->gfp_mask = gfp_mask | __GFP_COMP; pool->gfp_mask = gfp_mask | __GFP_COMP;
pool->order = order; pool->order = order;
mutex_init(&pool->mutex); /* No longer used! */ mutex_init(&pool->mutex); /* No longer used! */
mutex_lock(&pool->mutex); /* Make sure anyone who attempts to acquire this hangs */
mutex_lock(&pool_list_lock); mutex_lock(&pool_list_lock);
list_add(&pool->list, &pool_list); list_add(&pool->list, &pool_list);

View File

@@ -74,28 +74,36 @@ static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset)
static const char *get_filename(struct tegra_bpmp *bpmp, static const char *get_filename(struct tegra_bpmp *bpmp,
const struct file *file, char *buf, int size) const struct file *file, char *buf, int size)
{ {
char root_path_buf[512]; const char *root_path, *filename = NULL;
const char *root_path; char *root_path_buf;
const char *filename;
size_t root_len; size_t root_len;
root_path_buf = kzalloc(512, GFP_KERNEL);
if (!root_path_buf)
goto out;
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf, root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
sizeof(root_path_buf)); sizeof(root_path_buf));
if (IS_ERR(root_path)) if (IS_ERR(root_path))
return NULL; goto out;
root_len = strlen(root_path); root_len = strlen(root_path);
filename = dentry_path(file->f_path.dentry, buf, size); filename = dentry_path(file->f_path.dentry, buf, size);
if (IS_ERR(filename)) if (IS_ERR(filename)) {
return NULL; filename = NULL;
goto out;
}
if (strlen(filename) < root_len || if (strlen(filename) < root_len || strncmp(filename, root_path, root_len)) {
strncmp(filename, root_path, root_len)) filename = NULL;
return NULL; goto out;
}
filename += root_len; filename += root_len;
out:
kfree(root_path_buf);
return filename; return filename;
} }

View File

@@ -201,13 +201,23 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
const struct iommu_ops *ops = dev->bus->iommu_ops; const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_device *iommu_dev; struct iommu_device *iommu_dev;
struct iommu_group *group; struct iommu_group *group;
static DEFINE_MUTEX(iommu_probe_device_lock);
int ret; int ret;
if (!ops) if (!ops)
return -ENODEV; return -ENODEV;
/*
if (!dev_iommu_get(dev)) * Serialise to avoid races between IOMMU drivers registering in
return -ENOMEM; * parallel and/or the "replay" calls from ACPI/OF code via client
* driver probe. Once the latter have been cleaned up we should
* probably be able to use device_lock() here to minimise the scope,
* but for now enforcing a simple global ordering is fine.
*/
mutex_lock(&iommu_probe_device_lock);
if (!dev_iommu_get(dev)) {
ret = -ENOMEM;
goto err_unlock;
}
if (!try_module_get(ops->owner)) { if (!try_module_get(ops->owner)) {
ret = -EINVAL; ret = -EINVAL;
@@ -227,11 +237,14 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
ret = PTR_ERR(group); ret = PTR_ERR(group);
goto out_release; goto out_release;
} }
iommu_group_put(group);
mutex_lock(&group->mutex);
if (group_list && !group->default_domain && list_empty(&group->entry)) if (group_list && !group->default_domain && list_empty(&group->entry))
list_add_tail(&group->entry, group_list); list_add_tail(&group->entry, group_list);
mutex_unlock(&group->mutex);
iommu_group_put(group);
mutex_unlock(&iommu_probe_device_lock);
iommu_device_link(iommu_dev, dev); iommu_device_link(iommu_dev, dev);
return 0; return 0;
@@ -245,6 +258,9 @@ out_module_put:
err_free: err_free:
dev_iommu_free(dev); dev_iommu_free(dev);
err_unlock:
mutex_unlock(&iommu_probe_device_lock);
return ret; return ret;
} }
@@ -1766,11 +1782,11 @@ int bus_iommu_probe(struct bus_type *bus)
return ret; return ret;
list_for_each_entry_safe(group, next, &group_list, entry) { list_for_each_entry_safe(group, next, &group_list, entry) {
mutex_lock(&group->mutex);
/* Remove item from the list */ /* Remove item from the list */
list_del_init(&group->entry); list_del_init(&group->entry);
mutex_lock(&group->mutex);
/* Try to allocate default domain */ /* Try to allocate default domain */
probe_alloc_default_domain(bus, group); probe_alloc_default_domain(bus, group);

View File

@@ -218,7 +218,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr = __get_cached_rbnode(iovad, limit_pfn); curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = rb_entry(curr, struct iova, node); curr_iova = rb_entry(curr, struct iova, node);
low_pfn_new = curr_iova->pfn_hi + 1; low_pfn_new = curr_iova->pfn_hi;
retry: retry:
do { do {
@@ -232,7 +232,7 @@ retry:
if (high_pfn < size || new_pfn < low_pfn) { if (high_pfn < size || new_pfn < low_pfn) {
if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) { if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
high_pfn = limit_pfn; high_pfn = limit_pfn;
low_pfn = low_pfn_new; low_pfn = low_pfn_new + 1;
curr = &iovad->anchor.node; curr = &iovad->anchor.node;
curr_iova = rb_entry(curr, struct iova, node); curr_iova = rb_entry(curr, struct iova, node);
goto retry; goto retry;

View File

@@ -2007,7 +2007,7 @@ int rproc_set_firmware(struct rproc *rproc, const char *fw_name)
goto out; goto out;
} }
kfree(rproc->firmware); kfree_const(rproc->firmware);
rproc->firmware = p; rproc->firmware = p;
out: out:

View File

@@ -1408,7 +1408,7 @@ static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg,
u32 resid_len, u32 resid_len,
struct fchs_s *rsp_fchs); struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_fdmi_timeout(void *arg); static void bfa_fcs_lport_fdmi_timeout(void *arg);
static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, static int bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
u8 *pyld); u8 *pyld);
static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
u8 *pyld); u8 *pyld);
@@ -1887,6 +1887,8 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi, bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
(u8 *) ((struct ct_hdr_s *) pyld (u8 *) ((struct ct_hdr_s *) pyld
+ 1)); + 1));
if (attr_len < 0)
return;
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, (len + attr_len), &fchs, FC_CLASS_3, (len + attr_len), &fchs,
@@ -1896,17 +1898,20 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT); bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
} }
static u16 static int
bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
{ {
struct bfa_fcs_lport_s *port = fdmi->ms->port; struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct bfa_fcs_fdmi_hba_attr_s hba_attr; struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr;
struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr;
struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld; struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
struct fdmi_attr_s *attr; struct fdmi_attr_s *attr;
int len;
u8 *curr_ptr; u8 *curr_ptr;
u16 len, count; u16 templen, count;
u16 templen;
fcs_hba_attr = kzalloc(sizeof(*fcs_hba_attr), GFP_KERNEL);
if (!fcs_hba_attr)
return -ENOMEM;
/* /*
* get hba attributes * get hba attributes
@@ -2148,6 +2153,9 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
len += ((sizeof(attr->type) + sizeof(attr->len)) * count); len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
rhba->hba_attr_blk.attr_count = cpu_to_be32(count); rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
kfree(fcs_hba_attr);
return len; return len;
} }

View File

@@ -7651,7 +7651,7 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
peer_pa_tactivate_us = peer_pa_tactivate * peer_pa_tactivate_us = peer_pa_tactivate *
gran_to_us_table[peer_granularity - 1]; gran_to_us_table[peer_granularity - 1];
if (pa_tactivate_us > peer_pa_tactivate_us) { if (pa_tactivate_us >= peer_pa_tactivate_us) {
u32 new_peer_pa_tactivate; u32 new_peer_pa_tactivate;
new_peer_pa_tactivate = pa_tactivate_us / new_peer_pa_tactivate = pa_tactivate_us /

View File

@@ -1076,6 +1076,11 @@ static int dwc3_core_init(struct dwc3 *dwc)
if (dwc->parkmode_disable_ss_quirk) if (dwc->parkmode_disable_ss_quirk)
reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS; reg |= DWC3_GUCTL1_PARKMODE_DISABLE_SS;
if (DWC3_VER_IS_WITHIN(DWC3, 290A, ANY) &&
(dwc->maximum_speed == USB_SPEED_HIGH ||
dwc->maximum_speed == USB_SPEED_FULL))
reg |= DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK;
dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
} }
@@ -1765,9 +1770,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
case DWC3_GCTL_PRTCAP_DEVICE: case DWC3_GCTL_PRTCAP_DEVICE:
if (pm_runtime_suspended(dwc->dev)) if (pm_runtime_suspended(dwc->dev))
break; break;
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_suspend(dwc); dwc3_gadget_suspend(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
synchronize_irq(dwc->irq_gadget); synchronize_irq(dwc->irq_gadget);
dwc3_core_exit(dwc); dwc3_core_exit(dwc);
break; break;
@@ -1828,9 +1831,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
return ret; return ret;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_resume(dwc); dwc3_gadget_resume(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
break; break;
case DWC3_GCTL_PRTCAP_HOST: case DWC3_GCTL_PRTCAP_HOST:
if (!PMSG_IS_AUTO(msg)) { if (!PMSG_IS_AUTO(msg)) {

View File

@@ -259,6 +259,7 @@
/* Global User Control 1 Register */ /* Global User Control 1 Register */
#define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT BIT(31) #define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT BIT(31)
#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28) #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
#define DWC3_GUCTL1_DEV_FORCE_20_CLK_FOR_30_CLK BIT(26)
#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24) #define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
#define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17) #define DWC3_GUCTL1_PARKMODE_DISABLE_SS BIT(17)
@@ -1564,6 +1565,7 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
u32 param); u32 param);
void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt); void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt);
void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc); void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc);
void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status);
#else #else
static inline int dwc3_gadget_init(struct dwc3 *dwc) static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; } { return 0; }

View File

@@ -197,7 +197,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
int ret; int ret;
spin_lock_irqsave(&dwc->lock, flags); spin_lock_irqsave(&dwc->lock, flags);
if (!dep->endpoint.desc || !dwc->pullups_connected) { if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name); dep->name);
ret = -ESHUTDOWN; ret = -ESHUTDOWN;
@@ -293,7 +293,10 @@ void dwc3_ep0_out_start(struct dwc3 *dwc)
continue; continue;
dwc3_ep->flags &= ~DWC3_EP_DELAY_STOP; dwc3_ep->flags &= ~DWC3_EP_DELAY_STOP;
dwc3_stop_active_transfer(dwc3_ep, true, true); if (dwc->connected)
dwc3_stop_active_transfer(dwc3_ep, true, true);
else
dwc3_remove_requests(dwc, dwc3_ep, -ESHUTDOWN);
} }
} }
@@ -814,8 +817,9 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb; struct usb_ctrlrequest *ctrl = (void *) dwc->ep0_trb;
int ret = -EINVAL; int ret = -EINVAL;
u32 len; u32 len;
struct dwc3_vendor *vdwc = container_of(dwc, struct dwc3_vendor, dwc);
if (!dwc->gadget_driver || !dwc->connected) if (!dwc->gadget_driver || !vdwc->softconnect || !dwc->connected)
goto out; goto out;
trace_dwc3_ctrl_req(ctrl); trace_dwc3_ctrl_req(ctrl);
@@ -1115,8 +1119,12 @@ void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
static void dwc3_ep0_xfernotready(struct dwc3 *dwc, static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
const struct dwc3_event_depevt *event) const struct dwc3_event_depevt *event)
{ {
struct dwc3_vendor *vdwc = container_of(dwc, struct dwc3_vendor, dwc);
switch (event->status) { switch (event->status) {
case DEPEVT_STATUS_CONTROL_DATA: case DEPEVT_STATUS_CONTROL_DATA:
if (!vdwc->softconnect || !dwc->connected)
return;
/* /*
* We already have a DATA transfer in the controller's cache, * We already have a DATA transfer in the controller's cache,
* if we receive a XferNotReady(DATA) we will ignore it, unless * if we receive a XferNotReady(DATA) we will ignore it, unless

View File

@@ -367,7 +367,9 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
dwc3_writel(dep->regs, DWC3_DEPCMD, cmd); dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
if (!(cmd & DWC3_DEPCMD_CMDACT)) { if (!(cmd & DWC3_DEPCMD_CMDACT) ||
(DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER &&
!(cmd & DWC3_DEPCMD_CMDIOC))) {
ret = 0; ret = 0;
goto skip_status; goto skip_status;
} }
@@ -967,12 +969,16 @@ out:
return 0; return 0;
} }
static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status) void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep, int status)
{ {
struct dwc3_request *req; struct dwc3_request *req;
dwc3_stop_active_transfer(dep, true, false); dwc3_stop_active_transfer(dep, true, false);
/* If endxfer is delayed, avoid unmapping requests */
if (dep->flags & DWC3_EP_DELAY_STOP)
return;
/* - giveback all requests to gadget driver */ /* - giveback all requests to gadget driver */
while (!list_empty(&dep->started_list)) { while (!list_empty(&dep->started_list)) {
req = next_request(&dep->started_list); req = next_request(&dep->started_list);
@@ -2430,7 +2436,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{ {
u32 reg; u32 reg;
u32 timeout = 500; u32 timeout = 2000;
if (pm_runtime_suspended(dwc->dev)) if (pm_runtime_suspended(dwc->dev))
return 0; return 0;
@@ -2463,6 +2469,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
dwc3_gadget_dctl_write_safe(dwc, reg); dwc3_gadget_dctl_write_safe(dwc, reg);
do { do {
usleep_range(1000, 2000);
reg = dwc3_readl(dwc->regs, DWC3_DSTS); reg = dwc3_readl(dwc->regs, DWC3_DSTS);
reg &= DWC3_DSTS_DEVCTRLHLT; reg &= DWC3_DSTS_DEVCTRLHLT;
} while (--timeout && !(!is_on ^ !reg)); } while (--timeout && !(!is_on ^ !reg));
@@ -2491,6 +2498,9 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
if (dwc->ep0state != EP0_SETUP_PHASE) { if (dwc->ep0state != EP0_SETUP_PHASE) {
int ret; int ret;
if (dwc->delayed_status)
dwc3_ep0_send_delayed_status(dwc);
reinit_completion(&dwc->ep0_in_setup); reinit_completion(&dwc->ep0_in_setup);
spin_unlock_irqrestore(&dwc->lock, flags); spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2559,6 +2569,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return 0; return 0;
} }
synchronize_irq(dwc->irq_gadget);
if (!is_on) { if (!is_on) {
ret = dwc3_gadget_soft_disconnect(dwc); ret = dwc3_gadget_soft_disconnect(dwc);
} else { } else {
@@ -2702,6 +2714,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0]; dep = dwc->eps[0];
dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
if (ret) { if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name); dev_err(dwc->dev, "failed to enable %s\n", dep->name);
@@ -2709,6 +2722,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
} }
dep = dwc->eps[1]; dep = dwc->eps[1];
dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
if (ret) { if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name); dev_err(dwc->dev, "failed to enable %s\n", dep->name);
@@ -3596,11 +3610,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
dep = dwc->eps[epnum]; dep = dwc->eps[epnum];
if (!(dep->flags & DWC3_EP_ENABLED)) { if (!(dep->flags & DWC3_EP_ENABLED)) {
if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED))
return; return;
/* Handle only EPCMDCMPLT when EP disabled */ /* Handle only EPCMDCMPLT when EP disabled */
if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) if ((event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT) &&
!(epnum <= 1 && event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE))
return; return;
} }
@@ -3695,7 +3710,7 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
* timeout. Delay issuing the End Transfer command until the Setup TRB is * timeout. Delay issuing the End Transfer command until the Setup TRB is
* prepared. * prepared.
*/ */
if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) { if (dwc->ep0state != EP0_SETUP_PHASE) {
dep->flags |= DWC3_EP_DELAY_STOP; dep->flags |= DWC3_EP_DELAY_STOP;
return; return;
} }
@@ -3877,6 +3892,10 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
u32 reg; u32 reg;
u8 lanes = 1; u8 lanes = 1;
u8 speed; u8 speed;
struct dwc3_vendor *vdwc = container_of(dwc, struct dwc3_vendor, dwc);
if (!vdwc->softconnect)
return;
reg = dwc3_readl(dwc->regs, DWC3_DSTS); reg = dwc3_readl(dwc->regs, DWC3_DSTS);
speed = reg & DWC3_DSTS_CONNECTSPD; speed = reg & DWC3_DSTS_CONNECTSPD;
@@ -4191,15 +4210,8 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
break; break;
case DWC3_DEVICE_EVENT_SUSPEND: case DWC3_DEVICE_EVENT_SUSPEND:
/* It changed to be suspend event for version 2.30a and above */ /* It changed to be suspend event for version 2.30a and above */
if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) { if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
/* dwc3_gadget_suspend_interrupt(dwc, event->event_info);
* Ignore suspend event until the gadget enters into
* USB_STATE_CONFIGURED state.
*/
if (dwc->gadget->state >= USB_STATE_CONFIGURED)
dwc3_gadget_suspend_interrupt(dwc,
event->event_info);
}
break; break;
case DWC3_DEVICE_EVENT_SOF: case DWC3_DEVICE_EVENT_SOF:
case DWC3_DEVICE_EVENT_ERRATIC_ERROR: case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
@@ -4523,12 +4535,17 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_suspend(struct dwc3 *dwc) int dwc3_gadget_suspend(struct dwc3 *dwc)
{ {
unsigned long flags;
if (!dwc->gadget_driver) if (!dwc->gadget_driver)
return 0; return 0;
dwc3_gadget_run_stop(dwc, false, false); dwc3_gadget_run_stop(dwc, false, false);
spin_lock_irqsave(&dwc->lock, flags);
dwc3_disconnect_gadget(dwc); dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc); __dwc3_gadget_stop(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return 0; return 0;
} }

View File

@@ -2447,6 +2447,10 @@ void composite_resume(struct usb_gadget *gadget)
usb_gadget_clear_selfpowered(gadget); usb_gadget_clear_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, maxpower); usb_gadget_vbus_draw(gadget, maxpower);
} else {
maxpower = CONFIG_USB_GADGET_VBUS_DRAW;
maxpower = min(maxpower, 100U);
usb_gadget_vbus_draw(gadget, maxpower);
} }
cdev->suspended = 0; cdev->suspended = 0;

View File

@@ -461,6 +461,12 @@ static int config_usb_cfg_link(
* from another gadget or a random directory. * from another gadget or a random directory.
* Also a function instance can only be linked once. * Also a function instance can only be linked once.
*/ */
if (gi->composite.gadget_driver.udc_name) {
ret = -EINVAL;
goto out;
}
list_for_each_entry(a_fi, &gi->available_func, cfs_list) { list_for_each_entry(a_fi, &gi->available_func, cfs_list) {
if (a_fi == fi) if (a_fi == fi)
break; break;

View File

@@ -294,15 +294,10 @@ void ext4_release_system_zone(struct super_block *sb)
call_rcu(&system_blks->rcu, ext4_destroy_system_zone); call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
} }
/* int ext4_sb_block_valid(struct super_block *sb, struct inode *inode,
* Returns 1 if the passed-in block region (start_blk, ext4_fsblk_t start_blk, unsigned int count)
* start_blk+count) is valid; 0 if some part of the block region
* overlaps with some other filesystem metadata blocks.
*/
int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
unsigned int count)
{ {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_system_blocks *system_blks; struct ext4_system_blocks *system_blks;
struct ext4_system_zone *entry; struct ext4_system_zone *entry;
struct rb_node *n; struct rb_node *n;
@@ -331,7 +326,9 @@ int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
else if (start_blk >= (entry->start_blk + entry->count)) else if (start_blk >= (entry->start_blk + entry->count))
n = n->rb_right; n = n->rb_right;
else { else {
ret = (entry->ino == inode->i_ino); ret = 0;
if (inode)
ret = (entry->ino == inode->i_ino);
break; break;
} }
} }
@@ -340,6 +337,17 @@ out_rcu:
return ret; return ret;
} }
/*
* Returns 1 if the passed-in block region (start_blk,
* start_blk+count) is valid; 0 if some part of the block region
* overlaps with some other filesystem metadata blocks.
*/
int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk,
unsigned int count)
{
return ext4_sb_block_valid(inode->i_sb, inode, start_blk, count);
}
int ext4_check_blockref(const char *function, unsigned int line, int ext4_check_blockref(const char *function, unsigned int line,
struct inode *inode, __le32 *p, unsigned int max) struct inode *inode, __le32 *p, unsigned int max)
{ {

View File

@@ -3575,6 +3575,9 @@ extern int ext4_inode_block_valid(struct inode *inode,
unsigned int count); unsigned int count);
extern int ext4_check_blockref(const char *, unsigned int, extern int ext4_check_blockref(const char *, unsigned int,
struct inode *, __le32 *, unsigned int); struct inode *, __le32 *, unsigned int);
extern int ext4_sb_block_valid(struct super_block *sb, struct inode *inode,
ext4_fsblk_t start_blk, unsigned int count);
/* extents.c */ /* extents.c */
struct ext4_ext_path; struct ext4_ext_path;

View File

@@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
keys[0].fmr_physical = bofs; keys[0].fmr_physical = bofs;
if (keys[1].fmr_physical >= eofs) if (keys[1].fmr_physical >= eofs)
keys[1].fmr_physical = eofs - 1; keys[1].fmr_physical = eofs - 1;
if (keys[1].fmr_physical < keys[0].fmr_physical)
return 0;
start_fsb = keys[0].fmr_physical; start_fsb = keys[0].fmr_physical;
end_fsb = keys[1].fmr_physical; end_fsb = keys[1].fmr_physical;

View File

@@ -5303,7 +5303,8 @@ static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
} }
/** /**
* ext4_free_blocks() -- Free given blocks and update quota * ext4_mb_clear_bb() -- helper function for freeing blocks.
* Used by ext4_free_blocks()
* @handle: handle for this transaction * @handle: handle for this transaction
* @inode: inode * @inode: inode
* @bh: optional buffer of the block to be freed * @bh: optional buffer of the block to be freed
@@ -5311,9 +5312,9 @@ static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block,
* @count: number of blocks to be freed * @count: number of blocks to be freed
* @flags: flags used by ext4_free_blocks * @flags: flags used by ext4_free_blocks
*/ */
void ext4_free_blocks(handle_t *handle, struct inode *inode, static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
struct buffer_head *bh, ext4_fsblk_t block, ext4_fsblk_t block, unsigned long count,
unsigned long count, int flags) int flags)
{ {
struct buffer_head *bitmap_bh = NULL; struct buffer_head *bitmap_bh = NULL;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
@@ -5330,79 +5331,14 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
sbi = EXT4_SB(sb); sbi = EXT4_SB(sb);
if (sbi->s_mount_state & EXT4_FC_REPLAY) {
ext4_free_blocks_simple(inode, block, count);
return;
}
might_sleep();
if (bh) {
if (block)
BUG_ON(block != bh->b_blocknr);
else
block = bh->b_blocknr;
}
if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
!ext4_inode_block_valid(inode, block, count)) { !ext4_inode_block_valid(inode, block, count)) {
ext4_error(sb, "Freeing blocks not in datazone - " ext4_error(sb, "Freeing blocks in system zone - "
"block = %llu, count = %lu", block, count); "Block = %llu, count = %lu", block, count);
/* err = 0. ext4_std_error should be a no op */
goto error_return; goto error_return;
} }
flags |= EXT4_FREE_BLOCKS_VALIDATED;
ext4_debug("freeing block %llu\n", block);
trace_ext4_free_blocks(inode, block, count, flags);
if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
BUG_ON(count > 1);
ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
inode, bh, block);
}
/*
* If the extent to be freed does not begin on a cluster
* boundary, we need to deal with partial clusters at the
* beginning and end of the extent. Normally we will free
* blocks at the beginning or the end unless we are explicitly
* requested to avoid doing so.
*/
overflow = EXT4_PBLK_COFF(sbi, block);
if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
overflow = sbi->s_cluster_ratio - overflow;
block += overflow;
if (count > overflow)
count -= overflow;
else
return;
} else {
block -= overflow;
count += overflow;
}
}
overflow = EXT4_LBLK_COFF(sbi, count);
if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
if (count > overflow)
count -= overflow;
else
return;
} else
count += sbi->s_cluster_ratio - overflow;
}
if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
int i;
int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
for (i = 0; i < count; i++) {
cond_resched();
if (is_metadata)
bh = sb_find_get_block(inode->i_sb, block + i);
ext4_forget(handle, is_metadata, inode, bh, block + i);
}
}
do_more: do_more:
overflow = 0; overflow = 0;
@@ -5420,6 +5356,8 @@ do_more:
overflow = EXT4_C2B(sbi, bit) + count - overflow = EXT4_C2B(sbi, bit) + count -
EXT4_BLOCKS_PER_GROUP(sb); EXT4_BLOCKS_PER_GROUP(sb);
count -= overflow; count -= overflow;
/* The range changed so it's no longer validated */
flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
} }
count_clusters = EXT4_NUM_B2C(sbi, count); count_clusters = EXT4_NUM_B2C(sbi, count);
bitmap_bh = ext4_read_block_bitmap(sb, block_group); bitmap_bh = ext4_read_block_bitmap(sb, block_group);
@@ -5434,13 +5372,8 @@ do_more:
goto error_return; goto error_return;
} }
if (in_range(ext4_block_bitmap(sb, gdp), block, count) || if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
in_range(ext4_inode_bitmap(sb, gdp), block, count) || !ext4_inode_block_valid(inode, block, count)) {
in_range(block, ext4_inode_table(sb, gdp),
sbi->s_itb_per_group) ||
in_range(block + count - 1, ext4_inode_table(sb, gdp),
sbi->s_itb_per_group)) {
ext4_error(sb, "Freeing blocks in system zone - " ext4_error(sb, "Freeing blocks in system zone - "
"Block = %llu, count = %lu", block, count); "Block = %llu, count = %lu", block, count);
/* err = 0. ext4_std_error should be a no op */ /* err = 0. ext4_std_error should be a no op */
@@ -5510,7 +5443,7 @@ do_more:
NULL); NULL);
if (err && err != -EOPNOTSUPP) if (err && err != -EOPNOTSUPP)
ext4_msg(sb, KERN_WARNING, "discard request in" ext4_msg(sb, KERN_WARNING, "discard request in"
" group:%d block:%d count:%lu failed" " group:%u block:%d count:%lu failed"
" with %d", block_group, bit, count, " with %d", block_group, bit, count,
err); err);
} else } else
@@ -5562,6 +5495,8 @@ do_more:
block += count; block += count;
count = overflow; count = overflow;
put_bh(bitmap_bh); put_bh(bitmap_bh);
/* The range changed so it's no longer validated */
flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
goto do_more; goto do_more;
} }
error_return: error_return:
@@ -5570,6 +5505,108 @@ error_return:
return; return;
} }
/**
* ext4_free_blocks() -- Free given blocks and update quota
* @handle: handle for this transaction
* @inode: inode
* @bh: optional buffer of the block to be freed
* @block: starting physical block to be freed
* @count: number of blocks to be freed
* @flags: flags used by ext4_free_blocks
*/
void ext4_free_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *bh, ext4_fsblk_t block,
unsigned long count, int flags)
{
struct super_block *sb = inode->i_sb;
unsigned int overflow;
struct ext4_sb_info *sbi;
sbi = EXT4_SB(sb);
if (sbi->s_mount_state & EXT4_FC_REPLAY) {
ext4_free_blocks_simple(inode, block, count);
return;
}
might_sleep();
if (bh) {
if (block)
BUG_ON(block != bh->b_blocknr);
else
block = bh->b_blocknr;
}
if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
!ext4_inode_block_valid(inode, block, count)) {
ext4_error(sb, "Freeing blocks not in datazone - "
"block = %llu, count = %lu", block, count);
return;
}
flags |= EXT4_FREE_BLOCKS_VALIDATED;
ext4_debug("freeing block %llu\n", block);
trace_ext4_free_blocks(inode, block, count, flags);
if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
BUG_ON(count > 1);
ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
inode, bh, block);
}
/*
* If the extent to be freed does not begin on a cluster
* boundary, we need to deal with partial clusters at the
* beginning and end of the extent. Normally we will free
* blocks at the beginning or the end unless we are explicitly
* requested to avoid doing so.
*/
overflow = EXT4_PBLK_COFF(sbi, block);
if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
overflow = sbi->s_cluster_ratio - overflow;
block += overflow;
if (count > overflow)
count -= overflow;
else
return;
} else {
block -= overflow;
count += overflow;
}
/* The range changed so it's no longer validated */
flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
}
overflow = EXT4_LBLK_COFF(sbi, count);
if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
if (count > overflow)
count -= overflow;
else
return;
} else
count += sbi->s_cluster_ratio - overflow;
/* The range changed so it's no longer validated */
flags &= ~EXT4_FREE_BLOCKS_VALIDATED;
}
if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) {
int i;
int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA;
for (i = 0; i < count; i++) {
cond_resched();
if (is_metadata)
bh = sb_find_get_block(inode->i_sb, block + i);
ext4_forget(handle, is_metadata, inode, bh, block + i);
}
}
ext4_mb_clear_bb(handle, inode, block, count, flags);
return;
}
/** /**
* ext4_group_add_blocks() -- Add given blocks to an existing group * ext4_group_add_blocks() -- Add given blocks to an existing group
* @handle: handle to this transaction * @handle: handle to this transaction
@@ -5626,11 +5663,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
goto error_return; goto error_return;
} }
if (in_range(ext4_block_bitmap(sb, desc), block, count) || if (!ext4_sb_block_valid(sb, NULL, block, count)) {
in_range(ext4_inode_bitmap(sb, desc), block, count) ||
in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
in_range(block + count - 1, ext4_inode_table(sb, desc),
sbi->s_itb_per_group)) {
ext4_error(sb, "Adding blocks in system zones - " ext4_error(sb, "Adding blocks in system zones - "
"Block = %llu, count = %lu", "Block = %llu, count = %lu",
block, count); block, count);

View File

@@ -737,14 +737,19 @@ out:
return ret; return ret;
} }
void f2fs_decompress_cluster(struct decompress_io_ctx *dic) static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
bool pre_alloc);
static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
bool bypass_destroy_callback, bool pre_alloc);
void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
{ {
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
struct f2fs_inode_info *fi = F2FS_I(dic->inode); struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops = const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm]; f2fs_cops[fi->i_compress_algorithm];
bool bypass_callback = false;
int ret; int ret;
int i;
trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx, trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
dic->cluster_size, fi->i_compress_algorithm); dic->cluster_size, fi->i_compress_algorithm);
@@ -754,41 +759,10 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
goto out_end_io; goto out_end_io;
} }
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size); ret = f2fs_prepare_decomp_mem(dic, false);
if (!dic->tpages) { if (ret) {
ret = -ENOMEM; bypass_callback = true;
goto out_end_io; goto out_release;
}
for (i = 0; i < dic->cluster_size; i++) {
if (dic->rpages[i]) {
dic->tpages[i] = dic->rpages[i];
continue;
}
dic->tpages[i] = f2fs_compress_alloc_page();
if (!dic->tpages[i]) {
ret = -ENOMEM;
goto out_end_io;
}
}
if (cops->init_decompress_ctx) {
ret = cops->init_decompress_ctx(dic);
if (ret)
goto out_end_io;
}
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
if (!dic->rbuf) {
ret = -ENOMEM;
goto out_destroy_decompress_ctx;
}
dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
if (!dic->cbuf) {
ret = -ENOMEM;
goto out_vunmap_rbuf;
} }
dic->clen = le32_to_cpu(dic->cbuf->clen); dic->clen = le32_to_cpu(dic->cbuf->clen);
@@ -796,7 +770,7 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) { if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
ret = -EFSCORRUPTED; ret = -EFSCORRUPTED;
goto out_vunmap_cbuf; goto out_release;
} }
ret = cops->decompress_pages(dic); ret = cops->decompress_pages(dic);
@@ -817,17 +791,13 @@ void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
} }
} }
out_vunmap_cbuf: out_release:
vm_unmap_ram(dic->cbuf, dic->nr_cpages); f2fs_release_decomp_mem(dic, bypass_callback, false);
out_vunmap_rbuf:
vm_unmap_ram(dic->rbuf, dic->cluster_size);
out_destroy_decompress_ctx:
if (cops->destroy_decompress_ctx)
cops->destroy_decompress_ctx(dic);
out_end_io: out_end_io:
trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx, trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
dic->clen, ret); dic->clen, ret);
f2fs_decompress_end_io(dic, ret); f2fs_decompress_end_io(dic, ret, in_task);
} }
/* /*
@@ -837,7 +807,7 @@ out_end_io:
* (or in the case of a failure, cleans up without actually decompressing). * (or in the case of a failure, cleans up without actually decompressing).
*/ */
void f2fs_end_read_compressed_page(struct page *page, bool failed, void f2fs_end_read_compressed_page(struct page *page, bool failed,
block_t blkaddr) block_t blkaddr, bool in_task)
{ {
struct decompress_io_ctx *dic = struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page); (struct decompress_io_ctx *)page_private(page);
@@ -847,12 +817,12 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
if (failed) if (failed)
WRITE_ONCE(dic->failed, true); WRITE_ONCE(dic->failed, true);
else if (blkaddr) else if (blkaddr && in_task)
f2fs_cache_compressed_page(sbi, page, f2fs_cache_compressed_page(sbi, page,
dic->inode->i_ino, blkaddr); dic->inode->i_ino, blkaddr);
if (atomic_dec_and_test(&dic->remaining_pages)) if (atomic_dec_and_test(&dic->remaining_pages))
f2fs_decompress_cluster(dic); f2fs_decompress_cluster(dic, in_task);
} }
static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index) static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
@@ -1492,13 +1462,82 @@ destroy_out:
return err; return err;
} }
static void f2fs_free_dic(struct decompress_io_ctx *dic); static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
bool pre_alloc)
{
return pre_alloc ^ f2fs_low_mem_mode(sbi);
}
static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
bool pre_alloc)
{
const struct f2fs_compress_ops *cops =
f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
int i;
if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
return 0;
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
if (!dic->tpages)
return -ENOMEM;
for (i = 0; i < dic->cluster_size; i++) {
if (dic->rpages[i]) {
dic->tpages[i] = dic->rpages[i];
continue;
}
dic->tpages[i] = f2fs_compress_alloc_page();
if (!dic->tpages[i])
return -ENOMEM;
}
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
if (!dic->rbuf)
return -ENOMEM;
dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
if (!dic->cbuf)
return -ENOMEM;
if (cops->init_decompress_ctx) {
int ret = cops->init_decompress_ctx(dic);
if (ret)
return ret;
}
return 0;
}
static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
bool bypass_destroy_callback, bool pre_alloc)
{
const struct f2fs_compress_ops *cops =
f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
return;
if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
cops->destroy_decompress_ctx(dic);
if (dic->cbuf)
vm_unmap_ram(dic->cbuf, dic->nr_cpages);
if (dic->rbuf)
vm_unmap_ram(dic->rbuf, dic->cluster_size);
}
static void f2fs_free_dic(struct decompress_io_ctx *dic,
bool bypass_destroy_callback);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
{ {
struct decompress_io_ctx *dic; struct decompress_io_ctx *dic;
pgoff_t start_idx = start_idx_of_cluster(cc); pgoff_t start_idx = start_idx_of_cluster(cc);
int i; int i, ret;
dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS); dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
if (!dic) if (!dic)
@@ -1526,32 +1565,43 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->nr_rpages = cc->cluster_size; dic->nr_rpages = cc->cluster_size;
dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages); dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
if (!dic->cpages) if (!dic->cpages) {
ret = -ENOMEM;
goto out_free; goto out_free;
}
for (i = 0; i < dic->nr_cpages; i++) { for (i = 0; i < dic->nr_cpages; i++) {
struct page *page; struct page *page;
page = f2fs_compress_alloc_page(); page = f2fs_compress_alloc_page();
if (!page) if (!page) {
ret = -ENOMEM;
goto out_free; goto out_free;
}
f2fs_set_compressed_page(page, cc->inode, f2fs_set_compressed_page(page, cc->inode,
start_idx + i + 1, dic); start_idx + i + 1, dic);
dic->cpages[i] = page; dic->cpages[i] = page;
} }
ret = f2fs_prepare_decomp_mem(dic, true);
if (ret)
goto out_free;
return dic; return dic;
out_free: out_free:
f2fs_free_dic(dic); f2fs_free_dic(dic, true);
return ERR_PTR(-ENOMEM); return ERR_PTR(ret);
} }
static void f2fs_free_dic(struct decompress_io_ctx *dic) static void f2fs_free_dic(struct decompress_io_ctx *dic,
bool bypass_destroy_callback)
{ {
int i; int i;
f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
if (dic->tpages) { if (dic->tpages) {
for (i = 0; i < dic->cluster_size; i++) { for (i = 0; i < dic->cluster_size; i++) {
if (dic->rpages[i]) if (dic->rpages[i])
@@ -1576,17 +1626,33 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic)
kmem_cache_free(dic_entry_slab, dic); kmem_cache_free(dic_entry_slab, dic);
} }
static void f2fs_put_dic(struct decompress_io_ctx *dic) static void f2fs_late_free_dic(struct work_struct *work)
{ {
if (refcount_dec_and_test(&dic->refcnt)) struct decompress_io_ctx *dic =
f2fs_free_dic(dic); container_of(work, struct decompress_io_ctx, free_work);
f2fs_free_dic(dic, false);
}
static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
{
if (refcount_dec_and_test(&dic->refcnt)) {
if (in_task) {
f2fs_free_dic(dic, false);
} else {
INIT_WORK(&dic->free_work, f2fs_late_free_dic);
queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
&dic->free_work);
}
}
} }
/* /*
* Update and unlock the cluster's pagecache pages, and release the reference to * Update and unlock the cluster's pagecache pages, and release the reference to
* the decompress_io_ctx that was being held for I/O completion. * the decompress_io_ctx that was being held for I/O completion.
*/ */
static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task)
{ {
int i; int i;
@@ -1607,7 +1673,7 @@ static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
unlock_page(rpage); unlock_page(rpage);
} }
f2fs_put_dic(dic); f2fs_put_dic(dic, in_task);
} }
static void f2fs_verify_cluster(struct work_struct *work) static void f2fs_verify_cluster(struct work_struct *work)
@@ -1624,14 +1690,15 @@ static void f2fs_verify_cluster(struct work_struct *work)
SetPageError(rpage); SetPageError(rpage);
} }
__f2fs_decompress_end_io(dic, false); __f2fs_decompress_end_io(dic, false, true);
} }
/* /*
* This is called when a compressed cluster has been decompressed * This is called when a compressed cluster has been decompressed
* (or failed to be read and/or decompressed). * (or failed to be read and/or decompressed).
*/ */
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed) void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task)
{ {
if (!failed && dic->need_verity) { if (!failed && dic->need_verity) {
/* /*
@@ -1643,7 +1710,7 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
INIT_WORK(&dic->verity_work, f2fs_verify_cluster); INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
fsverity_enqueue_verify_work(&dic->verity_work); fsverity_enqueue_verify_work(&dic->verity_work);
} else { } else {
__f2fs_decompress_end_io(dic, failed); __f2fs_decompress_end_io(dic, failed, in_task);
} }
} }
@@ -1652,12 +1719,12 @@ void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
* *
* This is called when the page is no longer needed and can be freed. * This is called when the page is no longer needed and can be freed.
*/ */
void f2fs_put_page_dic(struct page *page) void f2fs_put_page_dic(struct page *page, bool in_task)
{ {
struct decompress_io_ctx *dic = struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page); (struct decompress_io_ctx *)page_private(page);
f2fs_put_dic(dic); f2fs_put_dic(dic, in_task);
} }
/* /*

View File

@@ -119,7 +119,7 @@ struct bio_post_read_ctx {
unsigned int enabled_steps; unsigned int enabled_steps;
}; };
static void f2fs_finish_read_bio(struct bio *bio) static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
{ {
struct bio_vec *bv; struct bio_vec *bv;
struct bvec_iter_all iter_all; struct bvec_iter_all iter_all;
@@ -133,8 +133,9 @@ static void f2fs_finish_read_bio(struct bio *bio)
if (f2fs_is_compressed_page(page)) { if (f2fs_is_compressed_page(page)) {
if (bio->bi_status) if (bio->bi_status)
f2fs_end_read_compressed_page(page, true, 0); f2fs_end_read_compressed_page(page, true, 0,
f2fs_put_page_dic(page); in_task);
f2fs_put_page_dic(page, in_task);
continue; continue;
} }
@@ -191,7 +192,7 @@ static void f2fs_verify_bio(struct work_struct *work)
fsverity_verify_bio(bio); fsverity_verify_bio(bio);
} }
f2fs_finish_read_bio(bio); f2fs_finish_read_bio(bio, true);
} }
/* /*
@@ -203,7 +204,7 @@ static void f2fs_verify_bio(struct work_struct *work)
* can involve reading verity metadata pages from the file, and these verity * can involve reading verity metadata pages from the file, and these verity
* metadata pages may be encrypted and/or compressed. * metadata pages may be encrypted and/or compressed.
*/ */
static void f2fs_verify_and_finish_bio(struct bio *bio) static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
{ {
struct bio_post_read_ctx *ctx = bio->bi_private; struct bio_post_read_ctx *ctx = bio->bi_private;
@@ -211,7 +212,7 @@ static void f2fs_verify_and_finish_bio(struct bio *bio)
INIT_WORK(&ctx->work, f2fs_verify_bio); INIT_WORK(&ctx->work, f2fs_verify_bio);
fsverity_enqueue_verify_work(&ctx->work); fsverity_enqueue_verify_work(&ctx->work);
} else { } else {
f2fs_finish_read_bio(bio); f2fs_finish_read_bio(bio, in_task);
} }
} }
@@ -224,7 +225,8 @@ static void f2fs_verify_and_finish_bio(struct bio *bio)
* that the bio includes at least one compressed page. The actual decompression * that the bio includes at least one compressed page. The actual decompression
* is done on a per-cluster basis, not a per-bio basis. * is done on a per-cluster basis, not a per-bio basis.
*/ */
static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx) static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
bool in_task)
{ {
struct bio_vec *bv; struct bio_vec *bv;
struct bvec_iter_all iter_all; struct bvec_iter_all iter_all;
@@ -237,7 +239,7 @@ static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
/* PG_error was set if decryption failed. */ /* PG_error was set if decryption failed. */
if (f2fs_is_compressed_page(page)) if (f2fs_is_compressed_page(page))
f2fs_end_read_compressed_page(page, PageError(page), f2fs_end_read_compressed_page(page, PageError(page),
blkaddr); blkaddr, in_task);
else else
all_compressed = false; all_compressed = false;
@@ -262,15 +264,16 @@ static void f2fs_post_read_work(struct work_struct *work)
fscrypt_decrypt_bio(ctx->bio); fscrypt_decrypt_bio(ctx->bio);
if (ctx->enabled_steps & STEP_DECOMPRESS) if (ctx->enabled_steps & STEP_DECOMPRESS)
f2fs_handle_step_decompress(ctx); f2fs_handle_step_decompress(ctx, true);
f2fs_verify_and_finish_bio(ctx->bio); f2fs_verify_and_finish_bio(ctx->bio, true);
} }
static void f2fs_read_end_io(struct bio *bio) static void f2fs_read_end_io(struct bio *bio)
{ {
struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio)); struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
struct bio_post_read_ctx *ctx = bio->bi_private; struct bio_post_read_ctx *ctx = bio->bi_private;
bool intask = in_task();
if (time_to_inject(sbi, FAULT_READ_IO)) { if (time_to_inject(sbi, FAULT_READ_IO)) {
f2fs_show_injection_info(sbi, FAULT_READ_IO); f2fs_show_injection_info(sbi, FAULT_READ_IO);
@@ -278,16 +281,29 @@ static void f2fs_read_end_io(struct bio *bio)
} }
if (bio->bi_status) { if (bio->bi_status) {
f2fs_finish_read_bio(bio); f2fs_finish_read_bio(bio, intask);
return; return;
} }
if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) { if (ctx) {
INIT_WORK(&ctx->work, f2fs_post_read_work); unsigned int enabled_steps = ctx->enabled_steps &
queue_work(ctx->sbi->post_read_wq, &ctx->work); (STEP_DECRYPT | STEP_DECOMPRESS);
} else {
f2fs_verify_and_finish_bio(bio); /*
* If we have only decompression step between decompression and
* decrypt, we don't need post processing for this.
*/
if (enabled_steps == STEP_DECOMPRESS &&
!f2fs_low_mem_mode(sbi)) {
f2fs_handle_step_decompress(ctx, intask);
} else if (enabled_steps) {
INIT_WORK(&ctx->work, f2fs_post_read_work);
queue_work(ctx->sbi->post_read_wq, &ctx->work);
return;
}
} }
f2fs_verify_and_finish_bio(bio, intask);
} }
static void f2fs_write_end_io(struct bio *bio) static void f2fs_write_end_io(struct bio *bio)
@@ -2249,7 +2265,7 @@ skip_reading_dnode:
if (f2fs_load_compressed_page(sbi, page, blkaddr)) { if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
if (atomic_dec_and_test(&dic->remaining_pages)) if (atomic_dec_and_test(&dic->remaining_pages))
f2fs_decompress_cluster(dic); f2fs_decompress_cluster(dic, true);
continue; continue;
} }
@@ -2267,7 +2283,7 @@ submit_and_realloc:
page->index, for_write); page->index, for_write);
if (IS_ERR(bio)) { if (IS_ERR(bio)) {
ret = PTR_ERR(bio); ret = PTR_ERR(bio);
f2fs_decompress_end_io(dic, ret); f2fs_decompress_end_io(dic, ret, true);
f2fs_put_dnode(&dn); f2fs_put_dnode(&dn);
*bio_ret = NULL; *bio_ret = NULL;
return ret; return ret;

View File

@@ -870,14 +870,23 @@ unlock_out:
} }
#endif #endif
static unsigned long long __calculate_block_age(unsigned long long new, static unsigned long long __calculate_block_age(struct f2fs_sb_info *sbi,
unsigned long long new,
unsigned long long old) unsigned long long old)
{ {
unsigned long long diff; unsigned int rem_old, rem_new;
unsigned long long res;
unsigned int weight = sbi->last_age_weight;
diff = (new >= old) ? new - (new - old) : new + (old - new); res = div_u64_rem(new, 100, &rem_new) * (100 - weight)
+ div_u64_rem(old, 100, &rem_old) * weight;
return div_u64(diff * LAST_AGE_WEIGHT, 100); if (rem_new)
res += rem_new * (100 - weight) / 100;
if (rem_old)
res += rem_old * weight / 100;
return res;
} }
/* This returns a new age and allocated blocks in ei */ /* This returns a new age and allocated blocks in ei */
@@ -909,7 +918,7 @@ static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks; cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks;
if (tei.age) if (tei.age)
ei->age = __calculate_block_age(cur_age, tei.age); ei->age = __calculate_block_age(sbi, cur_age, tei.age);
else else
ei->age = cur_age; ei->age = cur_age;
ei->last_blocks = cur_blocks; ei->last_blocks = cur_blocks;
@@ -1226,6 +1235,7 @@ void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
atomic64_set(&sbi->allocated_data_blocks, 0); atomic64_set(&sbi->allocated_data_blocks, 0);
sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD; sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD;
sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD; sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD;
sbi->last_age_weight = LAST_AGE_WEIGHT;
} }
int __init f2fs_create_extent_cache(void) int __init f2fs_create_extent_cache(void)

View File

@@ -152,6 +152,7 @@ struct f2fs_mount_info {
int fsync_mode; /* fsync policy */ int fsync_mode; /* fsync policy */
int fs_mode; /* fs mode: LFS or ADAPTIVE */ int fs_mode; /* fs mode: LFS or ADAPTIVE */
int bggc_mode; /* bggc mode: off, on or sync */ int bggc_mode; /* bggc mode: off, on or sync */
int memory_mode; /* memory mode */
struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
block_t unusable_cap_perc; /* percentage for cap */ block_t unusable_cap_perc; /* percentage for cap */
block_t unusable_cap; /* Amount of space allowed to be block_t unusable_cap; /* Amount of space allowed to be
@@ -1331,6 +1332,11 @@ enum {
*/ */
}; };
enum {
MEMORY_MODE_NORMAL, /* memory mode for normal devices */
MEMORY_MODE_LOW, /* memory mode for low memry devices */
};
static inline int f2fs_test_bit(unsigned int nr, char *addr); static inline int f2fs_test_bit(unsigned int nr, char *addr);
static inline void f2fs_set_bit(unsigned int nr, char *addr); static inline void f2fs_set_bit(unsigned int nr, char *addr);
static inline void f2fs_clear_bit(unsigned int nr, char *addr); static inline void f2fs_clear_bit(unsigned int nr, char *addr);
@@ -1550,6 +1556,7 @@ struct decompress_io_ctx {
void *private; /* payload buffer for specified decompression algorithm */ void *private; /* payload buffer for specified decompression algorithm */
void *private2; /* extra payload buffer */ void *private2; /* extra payload buffer */
struct work_struct verity_work; /* work to verify the decompressed pages */ struct work_struct verity_work; /* work to verify the decompressed pages */
struct work_struct free_work; /* work for late free this structure itself */
}; };
#define NULL_CLUSTER ((unsigned int)(~0)) #define NULL_CLUSTER ((unsigned int)(~0))
@@ -1620,6 +1627,7 @@ struct f2fs_sb_info {
/* The threshold used for hot and warm data seperation*/ /* The threshold used for hot and warm data seperation*/
unsigned int hot_data_age_threshold; unsigned int hot_data_age_threshold;
unsigned int warm_data_age_threshold; unsigned int warm_data_age_threshold;
unsigned int last_age_weight;
/* basic filesystem units */ /* basic filesystem units */
unsigned int log_sectors_per_block; /* log2 sectors per block */ unsigned int log_sectors_per_block; /* log2 sectors per block */
@@ -4181,9 +4189,9 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
bool f2fs_is_compress_backend_ready(struct inode *inode); bool f2fs_is_compress_backend_ready(struct inode *inode);
int f2fs_init_compress_mempool(void); int f2fs_init_compress_mempool(void);
void f2fs_destroy_compress_mempool(void); void f2fs_destroy_compress_mempool(void);
void f2fs_decompress_cluster(struct decompress_io_ctx *dic); void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task);
void f2fs_end_read_compressed_page(struct page *page, bool failed, void f2fs_end_read_compressed_page(struct page *page, bool failed,
block_t blkaddr); block_t blkaddr, bool in_task);
bool f2fs_cluster_is_empty(struct compress_ctx *cc); bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
@@ -4199,8 +4207,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio, unsigned nr_pages, sector_t *last_block_in_bio,
bool is_readahead, bool for_write); bool is_readahead, bool for_write);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed); void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
void f2fs_put_page_dic(struct page *page); bool in_task);
void f2fs_put_page_dic(struct page *page, bool in_task);
unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn); unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
int f2fs_init_compress_ctx(struct compress_ctx *cc); int f2fs_init_compress_ctx(struct compress_ctx *cc);
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
@@ -4246,13 +4255,14 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
} }
static inline int f2fs_init_compress_mempool(void) { return 0; } static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { } static inline void f2fs_destroy_compress_mempool(void) { }
static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic) { } static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic,
bool in_task) { }
static inline void f2fs_end_read_compressed_page(struct page *page, static inline void f2fs_end_read_compressed_page(struct page *page,
bool failed, block_t blkaddr) bool failed, block_t blkaddr, bool in_task)
{ {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
} }
static inline void f2fs_put_page_dic(struct page *page) static inline void f2fs_put_page_dic(struct page *page, bool in_task)
{ {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
} }
@@ -4403,6 +4413,11 @@ static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
} }
static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
{
return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW;
}
static inline bool f2fs_may_compress(struct inode *inode) static inline bool f2fs_may_compress(struct inode *inode)
{ {
if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||

View File

@@ -4121,8 +4121,8 @@ static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
goto out; goto out;
} }
if (f2fs_is_mmap_file(inode)) { if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EBUSY; ret = -EINVAL;
goto out; goto out;
} }
@@ -4193,8 +4193,8 @@ static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
goto out; goto out;
} }
if (f2fs_is_mmap_file(inode)) { if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EBUSY; ret = -EINVAL;
goto out; goto out;
} }

View File

@@ -154,6 +154,7 @@ enum {
Opt_atgc, Opt_atgc,
Opt_gc_merge, Opt_gc_merge,
Opt_nogc_merge, Opt_nogc_merge,
Opt_memory_mode,
Opt_age_extent_cache, Opt_age_extent_cache,
Opt_err, Opt_err,
}; };
@@ -230,6 +231,7 @@ static match_table_t f2fs_tokens = {
{Opt_atgc, "atgc"}, {Opt_atgc, "atgc"},
{Opt_gc_merge, "gc_merge"}, {Opt_gc_merge, "gc_merge"},
{Opt_nogc_merge, "nogc_merge"}, {Opt_nogc_merge, "nogc_merge"},
{Opt_memory_mode, "memory=%s"},
{Opt_age_extent_cache, "age_extent_cache"}, {Opt_age_extent_cache, "age_extent_cache"},
{Opt_err, NULL}, {Opt_err, NULL},
}; };
@@ -1153,6 +1155,22 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
case Opt_age_extent_cache: case Opt_age_extent_cache:
set_opt(sbi, AGE_EXTENT_CACHE); set_opt(sbi, AGE_EXTENT_CACHE);
break; break;
case Opt_memory_mode:
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
if (!strcmp(name, "normal")) {
F2FS_OPTION(sbi).memory_mode =
MEMORY_MODE_NORMAL;
} else if (!strcmp(name, "low")) {
F2FS_OPTION(sbi).memory_mode =
MEMORY_MODE_LOW;
} else {
kfree(name);
return -EINVAL;
}
kfree(name);
break;
default: default:
f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
p); p);
@@ -1901,6 +1919,12 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
if (test_opt(sbi, ATGC)) if (test_opt(sbi, ATGC))
seq_puts(seq, ",atgc"); seq_puts(seq, ",atgc");
if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
seq_printf(seq, ",memory=%s", "normal");
else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
seq_printf(seq, ",memory=%s", "low");
return 0; return 0;
} }
@@ -1923,6 +1947,7 @@ static void default_options(struct f2fs_sb_info *sbi)
F2FS_OPTION(sbi).compress_ext_cnt = 0; F2FS_OPTION(sbi).compress_ext_cnt = 0;
F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS; F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
sbi->sb->s_flags &= ~SB_INLINECRYPT; sbi->sb->s_flags &= ~SB_INLINECRYPT;

View File

@@ -567,6 +567,15 @@ out:
return count; return count;
} }
if (!strcmp(a->attr.name, "last_age_weight")) {
if (t > 100)
return -EINVAL;
if (t == *ui)
return count;
*ui = (unsigned int)t;
return count;
}
*ui = (unsigned int)t; *ui = (unsigned int)t;
return count; return count;
@@ -799,6 +808,7 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_reclaimed_segments, gc_reclaimed_segs);
/* For block age extent cache */ /* For block age extent cache */
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, hot_data_age_threshold, hot_data_age_threshold); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, hot_data_age_threshold, hot_data_age_threshold);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, warm_data_age_threshold, warm_data_age_threshold); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, warm_data_age_threshold, warm_data_age_threshold);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, last_age_weight, last_age_weight);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr) #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = { static struct attribute *f2fs_attrs[] = {
@@ -877,6 +887,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_reclaimed_segments), ATTR_LIST(gc_reclaimed_segments),
ATTR_LIST(hot_data_age_threshold), ATTR_LIST(hot_data_age_threshold),
ATTR_LIST(warm_data_age_threshold), ATTR_LIST(warm_data_age_threshold),
ATTR_LIST(last_age_weight),
NULL, NULL,
}; };
ATTRIBUTE_GROUPS(f2fs); ATTRIBUTE_GROUPS(f2fs);

View File

@@ -3,7 +3,6 @@
* Copyright 2019 Google LLC * Copyright 2019 Google LLC
*/ */
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/fsverity.h> #include <linux/fsverity.h>
#include <linux/gfp.h> #include <linux/gfp.h>
@@ -1104,25 +1103,10 @@ static void notify_pending_reads(struct mount_info *mi,
wake_up_all(&mi->mi_blocks_written_notif_wq); wake_up_all(&mi->mi_blocks_written_notif_wq);
} }
static int usleep_interruptible(u32 us)
{
/* See:
* https://www.kernel.org/doc/Documentation/timers/timers-howto.txt
* for explanation
*/
if (us < 10) {
udelay(us);
return 0;
} else if (us < 20000) {
usleep_range(us, us + us / 10);
return 0;
} else
return msleep_interruptible(us / 1000);
}
static int wait_for_data_block(struct data_file *df, int block_index, static int wait_for_data_block(struct data_file *df, int block_index,
struct data_file_block *res_block, struct data_file_block *res_block,
struct incfs_read_data_file_timeouts *timeouts) struct incfs_read_data_file_timeouts *timeouts,
unsigned int *delayed_min_us)
{ {
struct data_file_block block = {}; struct data_file_block block = {};
struct data_file_segment *segment = NULL; struct data_file_segment *segment = NULL;
@@ -1130,7 +1114,7 @@ static int wait_for_data_block(struct data_file *df, int block_index,
struct mount_info *mi = NULL; struct mount_info *mi = NULL;
int error; int error;
int wait_res = 0; int wait_res = 0;
unsigned int delayed_pending_us = 0, delayed_min_us = 0; unsigned int delayed_pending_us = 0;
bool delayed_pending = false; bool delayed_pending = false;
if (!df || !res_block) if (!df || !res_block)
@@ -1161,8 +1145,7 @@ static int wait_for_data_block(struct data_file *df, int block_index,
if (is_data_block_present(&block)) { if (is_data_block_present(&block)) {
*res_block = block; *res_block = block;
if (timeouts && timeouts->min_time_us) { if (timeouts && timeouts->min_time_us) {
delayed_min_us = timeouts->min_time_us; *delayed_min_us = timeouts->min_time_us;
error = usleep_interruptible(delayed_min_us);
goto out; goto out;
} }
return 0; return 0;
@@ -1209,13 +1192,9 @@ static int wait_for_data_block(struct data_file *df, int block_index,
delayed_pending = true; delayed_pending = true;
delayed_pending_us = timeouts->max_pending_time_us - delayed_pending_us = timeouts->max_pending_time_us -
jiffies_to_usecs(wait_res); jiffies_to_usecs(wait_res);
if (timeouts->min_pending_time_us > delayed_pending_us) { if (timeouts->min_pending_time_us > delayed_pending_us)
delayed_min_us = timeouts->min_pending_time_us - *delayed_min_us = timeouts->min_pending_time_us -
delayed_pending_us; delayed_pending_us;
error = usleep_interruptible(delayed_min_us);
if (error)
return error;
}
error = down_read_killable(&segment->rwsem); error = down_read_killable(&segment->rwsem);
if (error) if (error)
@@ -1250,9 +1229,9 @@ out:
delayed_pending_us; delayed_pending_us;
} }
if (delayed_min_us) { if (delayed_min_us && *delayed_min_us) {
mi->mi_reads_delayed_min++; mi->mi_reads_delayed_min++;
mi->mi_reads_delayed_min_us += delayed_min_us; mi->mi_reads_delayed_min_us += *delayed_min_us;
} }
return 0; return 0;
@@ -1282,7 +1261,8 @@ static int incfs_update_sysfs_error(struct file *file, int index, int result,
ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f, ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
int index, struct mem_range tmp, int index, struct mem_range tmp,
struct incfs_read_data_file_timeouts *timeouts) struct incfs_read_data_file_timeouts *timeouts,
unsigned int *delayed_min_us)
{ {
loff_t pos; loff_t pos;
ssize_t result; ssize_t result;
@@ -1301,7 +1281,8 @@ ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
mi = df->df_mount_info; mi = df->df_mount_info;
bfc = df->df_backing_file_context; bfc = df->df_backing_file_context;
result = wait_for_data_block(df, index, &block, timeouts); result = wait_for_data_block(df, index, &block, timeouts,
delayed_min_us);
if (result < 0) if (result < 0)
goto out; goto out;
@@ -1379,7 +1360,8 @@ ssize_t incfs_read_merkle_tree_blocks(struct mem_range dst,
} }
int incfs_process_new_data_block(struct data_file *df, int incfs_process_new_data_block(struct data_file *df,
struct incfs_fill_block *block, u8 *data) struct incfs_fill_block *block, u8 *data,
bool *complete)
{ {
struct mount_info *mi = NULL; struct mount_info *mi = NULL;
struct backing_file_context *bfc = NULL; struct backing_file_context *bfc = NULL;
@@ -1418,27 +1400,42 @@ int incfs_process_new_data_block(struct data_file *df,
if (error) if (error)
return error; return error;
if (is_data_block_present(&existing_block)) { if (is_data_block_present(&existing_block))
/* Block is already present, nothing to do here */ /* Block is already present, nothing to do here */
return 0; return 0;
}
error = down_write_killable(&segment->rwsem); error = down_write_killable(&segment->rwsem);
if (error) if (error)
return error; return error;
error = mutex_lock_interruptible(&bfc->bc_mutex); /* Recheck inside write lock */
if (!error) { error = get_data_file_block(df, block->block_index, &existing_block);
error = incfs_write_data_block_to_backing_file( if (error)
bfc, range(data, block->data_len), block->block_index, goto out_up_write;
df->df_blockmap_off, flags);
mutex_unlock(&bfc->bc_mutex);
}
if (!error) {
notify_pending_reads(mi, segment, block->block_index);
atomic_inc(&df->df_data_blocks_written);
}
if (is_data_block_present(&existing_block))
goto out_up_write;
error = mutex_lock_interruptible(&bfc->bc_mutex);
if (error)
goto out_up_write;
error = incfs_write_data_block_to_backing_file(bfc,
range(data, block->data_len), block->block_index,
df->df_blockmap_off, flags);
if (error)
goto out_mutex_unlock;
if (atomic_inc_return(&df->df_data_blocks_written)
>= df->df_data_block_count)
*complete = true;
out_mutex_unlock:
mutex_unlock(&bfc->bc_mutex);
if (!error)
notify_pending_reads(mi, segment, block->block_index);
out_up_write:
up_write(&segment->rwsem); up_write(&segment->rwsem);
if (error) if (error)

View File

@@ -429,7 +429,8 @@ struct incfs_read_data_file_timeouts {
ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f, ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
int index, struct mem_range tmp, int index, struct mem_range tmp,
struct incfs_read_data_file_timeouts *timeouts); struct incfs_read_data_file_timeouts *timeouts,
unsigned int *delayed_min_us);
ssize_t incfs_read_merkle_tree_blocks(struct mem_range dst, ssize_t incfs_read_merkle_tree_blocks(struct mem_range dst,
struct data_file *df, size_t offset); struct data_file *df, size_t offset);
@@ -441,7 +442,8 @@ int incfs_get_filled_blocks(struct data_file *df,
int incfs_read_file_signature(struct data_file *df, struct mem_range dst); int incfs_read_file_signature(struct data_file *df, struct mem_range dst);
int incfs_process_new_data_block(struct data_file *df, int incfs_process_new_data_block(struct data_file *df,
struct incfs_fill_block *block, u8 *data); struct incfs_fill_block *block, u8 *data,
bool *complete);
int incfs_process_new_hash_block(struct data_file *df, int incfs_process_new_hash_block(struct data_file *df,
struct incfs_fill_block *block, u8 *data); struct incfs_fill_block *block, u8 *data);

View File

@@ -33,11 +33,13 @@ static struct kobj_attribute name##_attr = __ATTR_RO(name)
DECLARE_FEATURE_FLAG(corefs); DECLARE_FEATURE_FLAG(corefs);
DECLARE_FEATURE_FLAG(zstd); DECLARE_FEATURE_FLAG(zstd);
DECLARE_FEATURE_FLAG(v2); DECLARE_FEATURE_FLAG(v2);
DECLARE_FEATURE_FLAG(bugfix_throttling);
static struct attribute *attributes[] = { static struct attribute *attributes[] = {
&corefs_attr.attr, &corefs_attr.attr,
&zstd_attr.attr, &zstd_attr.attr,
&v2_attr.attr, &v2_attr.attr,
&bugfix_throttling_attr.attr,
NULL, NULL,
}; };

View File

@@ -323,7 +323,7 @@ static int incfs_build_merkle_tree(struct file *f, struct data_file *df,
if (lvl == 0) if (lvl == 0)
result = incfs_read_data_file_block(partial_buf, result = incfs_read_data_file_block(partial_buf,
f, i, tmp, NULL); f, i, tmp, NULL, NULL);
else { else {
hash_level_offset = hash_offset + hash_level_offset = hash_offset +
hash_tree->hash_level_suboffset[lvl - 1]; hash_tree->hash_level_suboffset[lvl - 1];

View File

@@ -5,6 +5,7 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/compat.h> #include <linux/compat.h>
#include <linux/delay.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/fs_stack.h> #include <linux/fs_stack.h>
@@ -477,7 +478,8 @@ static struct dentry *open_or_create_special_dir(struct dentry *backing_dir,
static int read_single_page_timeouts(struct data_file *df, struct file *f, static int read_single_page_timeouts(struct data_file *df, struct file *f,
int block_index, struct mem_range range, int block_index, struct mem_range range,
struct mem_range tmp) struct mem_range tmp,
unsigned int *delayed_min_us)
{ {
struct mount_info *mi = df->df_mount_info; struct mount_info *mi = df->df_mount_info;
struct incfs_read_data_file_timeouts timeouts = { struct incfs_read_data_file_timeouts timeouts = {
@@ -509,7 +511,23 @@ static int read_single_page_timeouts(struct data_file *df, struct file *f,
} }
return incfs_read_data_file_block(range, f, block_index, tmp, return incfs_read_data_file_block(range, f, block_index, tmp,
&timeouts); &timeouts, delayed_min_us);
}
static int usleep_interruptible(u32 us)
{
/* See:
* https://www.kernel.org/doc/Documentation/timers/timers-howto.txt
* for explanation
*/
if (us < 10) {
udelay(us);
return 0;
} else if (us < 20000) {
usleep_range(us, us + us / 10);
return 0;
} else
return msleep_interruptible(us / 1000);
} }
static int read_single_page(struct file *f, struct page *page) static int read_single_page(struct file *f, struct page *page)
@@ -522,6 +540,7 @@ static int read_single_page(struct file *f, struct page *page)
int result = 0; int result = 0;
void *page_start; void *page_start;
int block_index; int block_index;
unsigned int delayed_min_us = 0;
if (!df) { if (!df) {
SetPageError(page); SetPageError(page);
@@ -547,7 +566,8 @@ static int read_single_page(struct file *f, struct page *page)
bytes_to_read = min_t(loff_t, size - offset, PAGE_SIZE); bytes_to_read = min_t(loff_t, size - offset, PAGE_SIZE);
read_result = read_single_page_timeouts(df, f, block_index, read_result = read_single_page_timeouts(df, f, block_index,
range(page_start, bytes_to_read), tmp); range(page_start, bytes_to_read), tmp,
&delayed_min_us);
free_pages((unsigned long)tmp.data, get_order(tmp.len)); free_pages((unsigned long)tmp.data, get_order(tmp.len));
} else { } else {
@@ -569,6 +589,8 @@ err:
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
unlock_page(page); unlock_page(page);
if (delayed_min_us)
usleep_interruptible(delayed_min_us);
return result; return result;
} }
@@ -662,8 +684,7 @@ out:
dput(file); dput(file);
} }
static void maybe_delete_incomplete_file(struct file *f, static void handle_file_completed(struct file *f, struct data_file *df)
struct data_file *df)
{ {
struct backing_file_context *bfc; struct backing_file_context *bfc;
struct mount_info *mi = df->df_mount_info; struct mount_info *mi = df->df_mount_info;
@@ -672,9 +693,6 @@ static void maybe_delete_incomplete_file(struct file *f,
const struct cred *old_cred = override_creds(mi->mi_owner); const struct cred *old_cred = override_creds(mi->mi_owner);
int error; int error;
if (atomic_read(&df->df_data_blocks_written) < df->df_data_block_count)
goto out;
/* Truncate file to remove any preallocated space */ /* Truncate file to remove any preallocated space */
bfc = df->df_backing_file_context; bfc = df->df_backing_file_context;
if (bfc) { if (bfc) {
@@ -733,6 +751,7 @@ static long ioctl_fill_blocks(struct file *f, void __user *arg)
u8 *data_buf = NULL; u8 *data_buf = NULL;
ssize_t error = 0; ssize_t error = 0;
int i = 0; int i = 0;
bool complete = false;
if (!df) if (!df)
return -EBADF; return -EBADF;
@@ -774,7 +793,7 @@ static long ioctl_fill_blocks(struct file *f, void __user *arg)
data_buf); data_buf);
} else { } else {
error = incfs_process_new_data_block(df, &fill_block, error = incfs_process_new_data_block(df, &fill_block,
data_buf); data_buf, &complete);
} }
if (error) if (error)
break; break;
@@ -783,7 +802,8 @@ static long ioctl_fill_blocks(struct file *f, void __user *arg)
if (data_buf) if (data_buf)
free_pages((unsigned long)data_buf, get_order(data_buf_size)); free_pages((unsigned long)data_buf, get_order(data_buf_size));
maybe_delete_incomplete_file(f, df); if (complete)
handle_file_completed(f, df);
/* /*
* Only report the error if no records were processed, otherwise * Only report the error if no records were processed, otherwise

View File

@@ -73,6 +73,12 @@ static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
return -ENOSYS; return -ENOSYS;
} }
static inline int eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n,
unsigned mask)
{
return -ENOSYS;
}
static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) static inline void eventfd_ctx_put(struct eventfd_ctx *ctx)
{ {

View File

@@ -41,6 +41,9 @@ static inline void dump_page_pinner(struct page *page)
static inline void page_pinner_put_page(struct page *page) static inline void page_pinner_put_page(struct page *page)
{ {
if (!static_branch_unlikely(&page_pinner_inited))
return;
if (!static_branch_unlikely(&failure_tracking)) if (!static_branch_unlikely(&failure_tracking))
return; return;
@@ -49,6 +52,9 @@ static inline void page_pinner_put_page(struct page *page)
static inline void page_pinner_failure_detect(struct page *page) static inline void page_pinner_failure_detect(struct page *page)
{ {
if (!static_branch_unlikely(&page_pinner_inited))
return;
if (!static_branch_unlikely(&failure_tracking)) if (!static_branch_unlikely(&failure_tracking))
return; return;

View File

@@ -83,7 +83,7 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */ /* Internal to kernel */
void rcu_init(void); void rcu_init(void);
extern int rcu_scheduler_active __read_mostly; extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user); void rcu_sched_clock_irq(int user);
void rcu_report_dead(unsigned int cpu); void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu); void rcutree_migrate_callbacks(int cpu);

View File

@@ -60,7 +60,7 @@ static inline void rcu_irq_exit_check_preempt(void) { }
void exit_rcu(void); void exit_rcu(void);
void rcu_scheduler_starting(void); void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly; extern int rcu_scheduler_active;
void rcu_end_inkernel_boot(void); void rcu_end_inkernel_boot(void);
bool rcu_inkernel_boot_has_ended(void); bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void); bool rcu_is_watching(void);

View File

@@ -245,4 +245,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
int register_vmap_purge_notifier(struct notifier_block *nb); int register_vmap_purge_notifier(struct notifier_block *nb);
int unregister_vmap_purge_notifier(struct notifier_block *nb); int unregister_vmap_purge_notifier(struct notifier_block *nb);
/* Allow disabling lazy TLB flushing */
extern bool lazy_vunmap_enable;
#endif /* _LINUX_VMALLOC_H */ #endif /* _LINUX_VMALLOC_H */

7
include/net/TEST_MAPPING Normal file
View File

@@ -0,0 +1,7 @@
{
"presubmit": [
{
"name": "CtsNetTestCases"
}
]
}

View File

@@ -237,8 +237,6 @@ extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid); struct net *get_net_ns_by_pid(pid_t pid);
struct net *get_net_ns_by_fd(int fd); struct net *get_net_ns_by_fd(int fd);
u64 __net_gen_cookie(struct net *net);
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void); void ipx_register_sysctl(void);
void ipx_unregister_sysctl(void); void ipx_unregister_sysctl(void);

View File

@@ -32,6 +32,15 @@ DECLARE_HOOK(android_vh_mutex_wait_start,
DECLARE_HOOK(android_vh_mutex_wait_finish, DECLARE_HOOK(android_vh_mutex_wait_finish,
TP_PROTO(struct mutex *lock), TP_PROTO(struct mutex *lock),
TP_ARGS(lock)); TP_ARGS(lock));
DECLARE_HOOK(android_vh_mutex_opt_spin_start,
TP_PROTO(struct mutex *lock, bool *time_out, int *cnt),
TP_ARGS(lock, time_out, cnt));
DECLARE_HOOK(android_vh_mutex_opt_spin_finish,
TP_PROTO(struct mutex *lock, bool taken),
TP_ARGS(lock, taken));
DECLARE_HOOK(android_vh_mutex_can_spin_on_owner,
TP_PROTO(struct mutex *lock, int *retval),
TP_ARGS(lock, retval));
DECLARE_HOOK(android_vh_rtmutex_wait_start, DECLARE_HOOK(android_vh_rtmutex_wait_start,
TP_PROTO(struct rt_mutex *lock), TP_PROTO(struct rt_mutex *lock),
@@ -52,6 +61,15 @@ DECLARE_HOOK(android_vh_rwsem_write_wait_start,
DECLARE_HOOK(android_vh_rwsem_write_wait_finish, DECLARE_HOOK(android_vh_rwsem_write_wait_finish,
TP_PROTO(struct rw_semaphore *sem), TP_PROTO(struct rw_semaphore *sem),
TP_ARGS(sem)); TP_ARGS(sem));
DECLARE_HOOK(android_vh_rwsem_opt_spin_start,
TP_PROTO(struct rw_semaphore *sem, bool *time_out, int *cnt, bool chk_only),
TP_ARGS(sem, time_out, cnt, chk_only));
DECLARE_HOOK(android_vh_rwsem_opt_spin_finish,
TP_PROTO(struct rw_semaphore *sem, bool taken, bool wlock),
TP_ARGS(sem, taken, wlock));
DECLARE_HOOK(android_vh_rwsem_can_spin_on_owner,
TP_PROTO(struct rw_semaphore *sem, bool *ret, bool wlock),
TP_ARGS(sem, ret, wlock));
DECLARE_HOOK(android_vh_sched_show_task, DECLARE_HOOK(android_vh_sched_show_task,
TP_PROTO(struct task_struct *task), TP_PROTO(struct task_struct *task),

View File

@@ -119,6 +119,8 @@
#define SO_DETACH_REUSEPORT_BPF 68 #define SO_DETACH_REUSEPORT_BPF 68
#define SO_NETNS_COOKIE 71
#if !defined(__KERNEL__) #if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))

View File

@@ -1159,7 +1159,7 @@ int remove_cpu(unsigned int cpu)
} }
EXPORT_SYMBOL_GPL(remove_cpu); EXPORT_SYMBOL_GPL(remove_cpu);
extern bool dl_cpu_busy(unsigned int cpu); extern int dl_cpu_busy(int cpu, struct task_struct *p);
int __pause_drain_rq(struct cpumask *cpus) int __pause_drain_rq(struct cpumask *cpus)
{ {
@@ -1234,7 +1234,7 @@ int pause_cpus(struct cpumask *cpus)
cpumask_and(cpus, cpus, cpu_active_mask); cpumask_and(cpus, cpus, cpu_active_mask);
for_each_cpu(cpu, cpus) { for_each_cpu(cpu, cpus) {
if (!cpu_online(cpu) || dl_cpu_busy(cpu) || if (!cpu_online(cpu) || dl_cpu_busy(cpu, NULL) ||
get_cpu_device(cpu)->offline_disabled == true) { get_cpu_device(cpu)->offline_disabled == true) {
err = -EBUSY; err = -EBUSY;
goto err_cpu_maps_update; goto err_cpu_maps_update;

View File

@@ -568,9 +568,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
{ {
bool ret = true; bool ret = true;
int cnt = 0;
bool time_out = false;
rcu_read_lock(); rcu_read_lock();
while (__mutex_owner(lock) == owner) { while (__mutex_owner(lock) == owner) {
trace_android_vh_mutex_opt_spin_start(lock, &time_out, &cnt);
if (time_out) {
ret = false;
break;
}
/* /*
* Ensure we emit the owner->on_cpu, dereference _after_ * Ensure we emit the owner->on_cpu, dereference _after_
* checking lock->owner still matches owner. If that fails, * checking lock->owner still matches owner. If that fails,
@@ -621,6 +628,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
if (owner) if (owner)
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
rcu_read_unlock(); rcu_read_unlock();
trace_android_vh_mutex_can_spin_on_owner(lock, &retval);
/* /*
* If lock->owner is not set, the mutex has been released. Return true * If lock->owner is not set, the mutex has been released. Return true
@@ -702,6 +710,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
if (!waiter) if (!waiter)
osq_unlock(&lock->osq); osq_unlock(&lock->osq);
trace_android_vh_mutex_opt_spin_finish(lock, true);
return true; return true;
@@ -710,6 +719,7 @@ fail_unlock:
osq_unlock(&lock->osq); osq_unlock(&lock->osq);
fail: fail:
trace_android_vh_mutex_opt_spin_finish(lock, false);
/* /*
* If we fell out of the spin path because of need_resched(), * If we fell out of the spin path because of need_resched(),
* reschedule now, before we try-lock the mutex. This avoids getting * reschedule now, before we try-lock the mutex. This avoids getting

View File

@@ -673,6 +673,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
ret = false; ret = false;
rcu_read_unlock(); rcu_read_unlock();
preempt_enable(); preempt_enable();
trace_android_vh_rwsem_can_spin_on_owner(sem, &ret, nonspinnable == RWSEM_WR_NONSPINNABLE);
lockevent_cond_inc(rwsem_opt_fail, !ret); lockevent_cond_inc(rwsem_opt_fail, !ret);
return ret; return ret;
@@ -715,6 +716,8 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
struct task_struct *new, *owner; struct task_struct *new, *owner;
unsigned long flags, new_flags; unsigned long flags, new_flags;
enum owner_state state; enum owner_state state;
int cnt = 0;
bool time_out = false;
owner = rwsem_owner_flags(sem, &flags); owner = rwsem_owner_flags(sem, &flags);
state = rwsem_owner_state(owner, flags, nonspinnable); state = rwsem_owner_state(owner, flags, nonspinnable);
@@ -723,6 +726,9 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
rcu_read_lock(); rcu_read_lock();
for (;;) { for (;;) {
trace_android_vh_rwsem_opt_spin_start(sem, &time_out, &cnt, true);
if (time_out)
break;
/* /*
* When a waiting writer set the handoff flag, it may spin * When a waiting writer set the handoff flag, it may spin
* on the owner as well. Once that writer acquires the lock, * on the owner as well. Once that writer acquires the lock,
@@ -786,6 +792,8 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
int prev_owner_state = OWNER_NULL; int prev_owner_state = OWNER_NULL;
int loop = 0; int loop = 0;
u64 rspin_threshold = 0; u64 rspin_threshold = 0;
int cnt = 0;
bool time_out = false;
unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE
: RWSEM_RD_NONSPINNABLE; : RWSEM_RD_NONSPINNABLE;
@@ -804,6 +812,10 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
for (;;) { for (;;) {
enum owner_state owner_state; enum owner_state owner_state;
trace_android_vh_rwsem_opt_spin_start(sem, &time_out, &cnt, false);
if (time_out)
break;
owner_state = rwsem_spin_on_owner(sem, nonspinnable); owner_state = rwsem_spin_on_owner(sem, nonspinnable);
if (!(owner_state & OWNER_SPINNABLE)) if (!(owner_state & OWNER_SPINNABLE))
break; break;
@@ -898,6 +910,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock)
cpu_relax(); cpu_relax();
} }
osq_unlock(&sem->osq); osq_unlock(&sem->osq);
trace_android_vh_rwsem_opt_spin_finish(sem, taken, wlock);
done: done:
preempt_enable(); preempt_enable();
lockevent_cond_inc(rwsem_opt_fail, !taken); lockevent_cond_inc(rwsem_opt_fail, !taken);

View File

@@ -733,7 +733,7 @@ static void show_rcu_tasks_rude_gp_kthread(void)
#endif /* #ifndef CONFIG_TINY_RCU */ #endif /* #ifndef CONFIG_TINY_RCU */
#else /* #ifdef CONFIG_TASKS_RUDE_RCU */ #else /* #ifdef CONFIG_TASKS_RUDE_RCU */
static void show_rcu_tasks_rude_gp_kthread(void) {} static inline void show_rcu_tasks_rude_gp_kthread(void) {}
#endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */ #endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////

View File

@@ -75,7 +75,6 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
loads[1] = (avenrun[1] + offset) << shift; loads[1] = (avenrun[1] + offset) << shift;
loads[2] = (avenrun[2] + offset) << shift; loads[2] = (avenrun[2] + offset) << shift;
} }
EXPORT_SYMBOL_GPL(get_avenrun);
long calc_load_fold_active(struct rq *this_rq, long adjust) long calc_load_fold_active(struct rq *this_rq, long adjust)
{ {

View File

@@ -2736,11 +2736,14 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
if (vmf->flags & FAULT_FLAG_SPECULATIVE) { if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
page = find_get_page(mapping, offset); page = find_get_page(mapping, offset);
if (unlikely(!page) || unlikely(PageReadahead(page))) if (unlikely(!page))
return VM_FAULT_RETRY; return VM_FAULT_RETRY;
if (unlikely(PageReadahead(page)))
goto page_put;
if (!trylock_page(page)) if (!trylock_page(page))
return VM_FAULT_RETRY; goto page_put;
if (unlikely(compound_head(page)->mapping != mapping)) if (unlikely(compound_head(page)->mapping != mapping))
goto page_unlock; goto page_unlock;
@@ -2772,6 +2775,8 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
return VM_FAULT_LOCKED; return VM_FAULT_LOCKED;
page_unlock: page_unlock:
unlock_page(page); unlock_page(page);
page_put:
put_page(page);
return VM_FAULT_RETRY; return VM_FAULT_RETRY;
} }

View File

@@ -57,6 +57,7 @@ static struct longterm_pinner acf_pinner = {
static bool page_pinner_enabled; static bool page_pinner_enabled;
DEFINE_STATIC_KEY_FALSE(page_pinner_inited); DEFINE_STATIC_KEY_FALSE(page_pinner_inited);
EXPORT_SYMBOL(page_pinner_inited);
DEFINE_STATIC_KEY_TRUE(failure_tracking); DEFINE_STATIC_KEY_TRUE(failure_tracking);
EXPORT_SYMBOL(failure_tracking); EXPORT_SYMBOL(failure_tracking);
@@ -236,7 +237,7 @@ print_page_pinner(bool longterm, char __user *buf, size_t count, struct captured
ret = snprintf(kbuf, count, "Page pinned for %lld us\n", ret = snprintf(kbuf, count, "Page pinned for %lld us\n",
record->elapsed); record->elapsed);
} else { } else {
s64 ts_usec = record->ts_usec; u64 ts_usec = record->ts_usec;
unsigned long rem_usec = do_div(ts_usec, 1000000); unsigned long rem_usec = do_div(ts_usec, 1000000);
ret = snprintf(kbuf, count, ret = snprintf(kbuf, count,
@@ -291,7 +292,7 @@ void __dump_page_pinner(struct page *page)
unsigned long pfn; unsigned long pfn;
int count; int count;
unsigned long rem_usec; unsigned long rem_usec;
s64 ts_usec; u64 ts_usec;
if (unlikely(!page_ext)) { if (unlikely(!page_ext)) {
pr_alert("There is not page extension available.\n"); pr_alert("There is not page extension available.\n");

View File

@@ -1277,6 +1277,7 @@ int unregister_vmap_purge_notifier(struct notifier_block *nb)
} }
EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
bool lazy_vunmap_enable __read_mostly = true;
/* /*
* lazy_max_pages is the maximum amount of virtual address space we gather up * lazy_max_pages is the maximum amount of virtual address space we gather up
* before attempting to purge with a TLB flush. * before attempting to purge with a TLB flush.
@@ -1297,6 +1298,9 @@ static unsigned long lazy_max_pages(void)
{ {
unsigned int log; unsigned int log;
if (!lazy_vunmap_enable)
return 0;
log = fls(num_online_cpus()); log = fls(num_online_cpus());
return log * (32UL * 1024 * 1024 / PAGE_SIZE); return log * (32UL * 1024 * 1024 / PAGE_SIZE);

7
net/TEST_MAPPING Normal file
View File

@@ -0,0 +1,7 @@
{
"presubmit": [
{
"name": "CtsNetTestCases"
}
]
}

View File

@@ -4636,11 +4636,9 @@ static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
static u64 __bpf_get_netns_cookie(struct sock *sk) static u64 __bpf_get_netns_cookie(struct sock *sk)
{ {
#ifdef CONFIG_NET_NS const struct net *net = sk ? sock_net(sk) : &init_net;
return __net_gen_cookie(sk ? sk->sk_net.net : &init_net);
#else return atomic64_read(&net->net_cookie);
return 0;
#endif
} }
BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx) BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx)

View File

@@ -72,18 +72,6 @@ static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
DEFINE_COOKIE(net_cookie); DEFINE_COOKIE(net_cookie);
u64 __net_gen_cookie(struct net *net)
{
while (1) {
u64 res = atomic64_read(&net->net_cookie);
if (res)
return res;
res = gen_cookie_next(&net_cookie);
atomic64_cmpxchg(&net->net_cookie, 0, res);
}
}
static struct net_generic *net_alloc_generic(void) static struct net_generic *net_alloc_generic(void)
{ {
struct net_generic *ng; struct net_generic *ng;
@@ -341,6 +329,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
refcount_set(&net->count, 1); refcount_set(&net->count, 1);
refcount_set(&net->passive, 1); refcount_set(&net->passive, 1);
get_random_bytes(&net->hash_mix, sizeof(u32)); get_random_bytes(&net->hash_mix, sizeof(u32));
preempt_disable();
atomic64_set(&net->net_cookie, gen_cookie_next(&net_cookie));
preempt_enable();
net->dev_base_seq = 1; net->dev_base_seq = 1;
net->user_ns = user_ns; net->user_ns = user_ns;
idr_init(&net->netns_ids); idr_init(&net->netns_ids);
@@ -1128,10 +1119,6 @@ static int __init net_ns_init(void)
rcu_assign_pointer(init_net.gen, ng); rcu_assign_pointer(init_net.gen, ng);
preempt_disable();
__net_gen_cookie(&init_net);
preempt_enable();
down_write(&pernet_ops_rwsem); down_write(&pernet_ops_rwsem);
if (setup_net(&init_net, &init_user_ns)) if (setup_net(&init_net, &init_user_ns))
panic("Could not setup the initial network namespace"); panic("Could not setup the initial network namespace");

View File

@@ -1613,6 +1613,13 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sk->sk_bound_dev_if; v.val = sk->sk_bound_dev_if;
break; break;
case SO_NETNS_COOKIE:
lv = sizeof(u64);
if (len != lv)
return -EINVAL;
v.val64 = atomic64_read(&sock_net(sk)->net_cookie);
break;
default: default:
/* We implement the SO_SNDLOWAT etc to not be settable /* We implement the SO_SNDLOWAT etc to not be settable
* (1003.1g 7). * (1003.1g 7).

View File

@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_QRTR) := qrtr.o ns.o obj-$(CONFIG_QRTR) += qrtr.o
qrtr-y := af_qrtr.o ns.o
obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o
qrtr-smd-y := smd.o qrtr-smd-y := smd.o