
This is the merge of the upstream LTS release of 5.10.168 into the android12-5.10 branch. It contains the following commits: *b34e092097
Revert "nvmem: core: Fix a conflict between MTD and NVMEM on wp-gpios property" *570621d64f
Merge 5.10.168 into android12-5.10-lts |\ | *707c48210a
Linux 5.10.168 | *0a626e27f9
Fix page corruption caused by racy check in __free_pages | *0ef2490a87
arm64: dts: meson-axg: Make mmc host controller interrupts level-sensitive | *5bfc8f0961
arm64: dts: meson-g12-common: Make mmc host controller interrupts level-sensitive | *809f4acb7f
arm64: dts: meson-gx: Make mmc host controller interrupts level-sensitive | *8eee3521bc
riscv: Fixup race condition on PG_dcache_clean in flush_icache_pte | *6ff8b48253
ceph: flush cap releases when the session is flushed | *4f518a4a79
usb: typec: altmodes/displayport: Fix probe pin assign check | *f25fa93e52
usb: core: add quirk for Alcor Link AK9563 smartcard reader | *dd965ad39d
btrfs: free device in btrfs_close_devices for a single device filesystem | *1be271c52b
net: USB: Fix wrong-direction WARNING in plusb.c | *2b693fe3f7
cifs: Fix use-after-free in rdata->read_into_pages() | *bbc8509044
pinctrl: intel: Restore the pins that used to be in Direct IRQ mode | *4863f46dda
spi: dw: Fix wrong FIFO level setting for long xfers | *6e2a0521e4
pinctrl: single: fix potential NULL dereference | *61f8a493c0
pinctrl: aspeed: Fix confusing types in return value | *ef3edede7b
ALSA: pci: lx6464es: fix a debug loop | *3914b71dad
selftests: forwarding: lib: quote the sysctl values | *c53f34ec3f
rds: rds_rm_zerocopy_callback() use list_first_entry() | *3eb04ef278
net/mlx5: fw_tracer, Zero consumer index when reloading the tracer | *fac1fb8008
net/mlx5: fw_tracer, Clear load bit when freeing string DBs buffers | *703c3efa4b
net/mlx5e: IPoIB, Show unknown speed instead of error | *896bd85688
net: mscc: ocelot: fix VCAP filters not matching on MAC with "protocol 802.1Q" | *1ad4112c9f
ice: Do not use WQ_MEM_RECLAIM flag for workqueue | *34a5af788e
uapi: add missing ip/ipv6 header dependencies for linux/stddef.h | *4259a40827
ionic: clean interrupt before enabling queue to avoid credit race | *07097ad30b
net: phy: meson-gxl: use MMD access dummy stubs for GXL, internal PHY | *cafa2ad4f1
bonding: fix error checking in bond_debug_reregister() | *30fdf66035
xfrm: fix bug with DSCP copy to v6 from v4 tunnel | *491b7a5fc8
RDMA/usnic: use iommu_map_atomic() under spin_lock() | *b1afb666c3
IB/IPoIB: Fix legacy IPoIB due to wrong number of queues | *a893cc6448
xfrm/compat: prevent potential spectre v1 gadget in xfrm_xlate32_attr() | *79b595d959
IB/hfi1: Restore allocated resources on failed copyout | *3797e94c19
xfrm: compat: change expression for switch in xfrm_xlate64 | *bc9771cd63
can: j1939: do not wait 250 ms if the same addr was already claimed | *edaf5c7183
of/address: Return an error when no valid dma-ranges are found | *b7d5fa8052
tracing: Fix poll() and select() do not work on per_cpu trace_pipe and trace_pipe_raw | *35452bf986
ALSA: hda/realtek: Fix the speaker output on Samsung Galaxy Book2 Pro 360 | *e1646e2be9
ALSA: emux: Avoid potential array out-of-bound in snd_emux_xg_control() | *1c65762399
ALSA: hda/realtek: Add Positivo N14KP6-TG | *f1fd16cd97
btrfs: zlib: zero-initialize zlib workspace | *a1406d5aa3
btrfs: limit device extents to the device size | *dbe5a11954
migrate: hugetlb: check for hugetlb shared PMD in node migration | *97a5104d64
mm/migration: return errno when isolate_huge_page failed | *91ad3104b2
iio:adc:twl6030: Enable measurement of VAC | *e4c3ea9b60
bpf: Do not reject when the stack read size is different from the tracked scalar size | *34ec4c7831
nvmem: core: Fix a conflict between MTD and NVMEM on wp-gpios property | *b2e4128795
wifi: brcmfmac: Check the count value of channel spec to prevent out-of-bounds reads | *914e38f02a
f2fs: fix to do sanity check on i_extra_isize in is_alive() | *3931014367
fbdev: smscufx: fix error handling code in ufx_usb_probe | *6c8a2c67a9
serial: 8250_dma: Fix DMA Rx rearm race | *967e726e57
serial: 8250_dma: Fix DMA Rx completion race | *1fd7a6a579
nvmem: core: fix cell removal on error | *bb875f0a34
nvmem: core: initialise nvmem->id early | *b591abac78
drm/i915: Fix potential bit_17 double-free | *5c4d4a83bf
Squashfs: fix handling and sanity checking of xattr_ids count | *30187be290
mm/swapfile: add cond_resched() in get_swap_pages() | *639b40007a
fpga: stratix10-soc: Fix return value check in s10_ops_write_init() | *0139d61d28
x86/debug: Fix stack recursion caused by wrongly ordered DR7 accesses | *556959327b
mm: hugetlb: proc: check for hugetlb shared PMD in /proc/PID/smaps | *50d31309c9
riscv: disable generation of unwind tables | *71a4f39f99
parisc: Wire up PTRACE_GETREGS/PTRACE_SETREGS for compat case | *2982b473d7
parisc: Fix return code of pdc_iodc_print() | *170e1cc3c0
nvmem: qcom-spmi-sdam: fix module autoloading | *f11330b7ba
iio: imu: fxos8700: fix MAGN sensor scale and unit | *5b30998c7f
iio: imu: fxos8700: remove definition FXOS8700_CTRL_ODR_MIN | *42e34a0839
iio: imu: fxos8700: fix failed initialization ODR mode assignment | *ab976ecd04
iio: imu: fxos8700: fix incorrect ODR mode readback | *9d6502ed63
iio: imu: fxos8700: fix swapped ACCEL and MAGN channels readback | *aff4add609
iio: imu: fxos8700: fix map label of channel type to MAGN sensor | *9545ce720a
iio: imu: fxos8700: fix IMU data bits returned to user space | *6d43eddc56
iio: imu: fxos8700: fix incomplete ACCEL and MAGN channels readback | *f7d996c953
iio: imu: fxos8700: fix ACCEL measurement range selection | *11ac43f763
iio:adc:twl6030: Enable measurements of VUSB, VBAT and others | *5602f6a244
iio: adc: berlin2-adc: Add missing of_node_put() in error path | *33483172b3
iio: hid: fix the retval in accel_3d_capture_sample | *55cf65461b
efi: Accept version 2 of memory attributes table | *8fb515c8b1
ALSA: hda/realtek: Add Acer Predator PH315-54 | *192fd121d0
watchdog: diag288_wdt: fix __diag288() inline assembly | *5bcdcf437d
watchdog: diag288_wdt: do not use stack buffers for hardware data | *0c76eddc1f
net: qrtr: free memory on error path in radix_tree_insert() | *28d190882b
fbcon: Check font dimension limits | *658e0d99db
Input: i8042 - add Clevo PCX0DX to i8042 quirk table | *a82d493604
Input: i8042 - add TUXEDO devices to i8042 quirk tables | *04d99a0a96
Input: i8042 - merge quirk tables | *ab85074c30
Input: i8042 - move __initconst to fix code styling warning | *55515d7d87
vc_screen: move load of struct vc_data pointer in vcs_read() to avoid UAF | *434a36ed64
usb: gadget: f_fs: Fix unbalanced spinlock in __ffs_ep0_queue_wait | *6e5565aa00
usb: dwc3: qcom: enable vbus override when in OTG dr-mode | *1ca8629505
usb: dwc3: dwc3-qcom: Fix typo in the dwc3 vbus override API | *30d0e2cf99
iio: adc: stm32-dfsdm: fill module aliases | *4bbc34401d
net/x25: Fix to not accept on connected socket | *2b1e8e20b9
platform/x86: dell-wmi: Add a keymap for KEY_MUTE in type 0x0010 table | *ac4d9c86e9
i2c: rk3x: fix a bunch of kernel-doc warnings | *9758ffe1c0
scsi: iscsi_tcp: Fix UAF during login when accessing the shost ipaddress | *1b28bf868f
scsi: target: core: Fix warning on RT kernels | *4e66ba3cfb
i2c: mxs: suppress probe-deferral error message | *d09b0bf9ff
qede: execute xdp_do_flush() before napi_complete_done() | *8aba483f70
qede: add netpoll support for qede driver | *87d4ff1873
efi: fix potential NULL deref in efi_mem_reserve_persistent | *70154489f5
net: openvswitch: fix flow memory leak in ovs_flow_cmd_new | *4fb430c698
virtio-net: Keep stop() to follow mirror sequence of open() | *812236bb6a
selftests: net: udpgso_bench_tx: Cater for pending datagrams zerocopy benchmarking | *9e7e2887cc
selftests: net: udpgso_bench: Fix racing bug between the rx/tx programs | *4babbd1f59
selftests: net: udpgso_bench_rx/tx: Stop when wrong CLI args are provided | *a4a493e599
selftests: net: udpgso_bench_rx: Fix 'used uninitialized' compiler warning | *4d9c962716
ata: libata: Fix sata_down_spd_limit() when no link speed is reported | *b6d4407211
can: j1939: fix errant WARN_ON_ONCE in j1939_session_deactivate | *6362b86170
igc: return an error if the mac type is unknown in igc_ptp_systim_to_hwtstamp() | *98c93a0160
net: phy: meson-gxl: Add generic dummy stubs for MMD register access | *de2785aa34
squashfs: harden sanity check in squashfs_read_xattr_id_table | *f53c6e7e77
netfilter: br_netfilter: disable sabotage_in hook after first suppression | *dd6991251a
netrom: Fix use-after-free caused by accept on already connected socket | *362a2f5531
net: phy: dp83822: Fix null pointer access on DP83825/DP83826 devices | *6824169e74
sfc: correctly advertise tunneled IPv6 segmentation | *539fc3ef51
virtio-net: execute xdp_do_flush() before napi_complete_done() | *63d1c4edbf
fix "direction" argument of iov_iter_kvec() | *0c67fb7775
fix iov_iter_bvec() "direction" argument | *b5437e0ba9
READ is "data destination", not source... | *fefb47a833
WRITE is "data source", not destination... | *f9815b303e
vhost/net: Clear the pending messages when the backend is removed | *de990d1571
scsi: Revert "scsi: core: map PQ=1, PDT=other values to SCSI_SCAN_TARGET_PRESENT" | *d08a9b0ad2
drm/vc4: hdmi: make CEC adapter name unique | *8c6b46d426
arm64: dts: imx8mm: Fix pad control for UART1_DTE_RX | *9bd6074e18
bpf, sockmap: Check for any of tcp_bpf_prots when cloning a listener | *36dbb8daf0
bpf: Fix to preserve reg parent/live fields when copying range info | *8de8c4a25e
bpf: Support <8-byte scalar spill and refill | *2b557fa635
ALSA: hda/via: Avoid potential array out-of-bound in add_secret_dac_path() | *1b1f56cc0e
bpf: Fix a possible task gone issue with bpf_send_signal[_thread]() helpers | *2d0f276d50
powerpc/imc-pmu: Revert nest_init_lock to being a mutex | *9ff2bebc2c
bpf: Fix incorrect state pruning for <8B spill/fill | *60c27e0e37
bus: sunxi-rsb: Fix error handling in sunxi_rsb_init() | *d5a2dcee53
firewire: fix memory leak for payload of request subaction to IEC 61883-1 FCP region * |b405332f4b
Merge 5.10.167 into android12-5.10-lts |\| | *a5acb54d40
Linux 5.10.167 | *6446369fb9
net: fix NULL pointer in skb_segment_list | *0f9db1209f
Bluetooth: fix null ptr deref on hci_sync_conn_complete_evt | *d744c03c04
ACPI: processor idle: Practically limit "Dummy wait" workaround to old Intel systems | *bd0050b7ff
dmaengine: imx-sdma: Fix a possible memory leak in sdma_transfer_init | *19c9a2ba46
blk-cgroup: fix missing pd_online_fn() while activating policy | *a1c0263f1e
bpf: Skip task with pid=1 in send_signal_common() | *f185468631
arm64: dts: imx8mq-thor96: fix no-mmc property for SDHCI | *a26cef0041
ARM: dts: vf610: Fix pca9548 i2c-mux node names | *67a8beb854
ARM: dts: imx: Fix pca9547 i2c-mux node name * |0ddb73d446
Merge 5.10.166 into android12-5.10-lts |\| | *8d823aaa22
Linux 5.10.166 | *19f1f99be3
clk: Fix pointer casting to prevent oops in devm_clk_release() | *f84c9b72fb
perf/x86/amd: fix potential integer overflow on shift of a int | *743435cd17
netfilter: conntrack: unify established states for SCTP paths | *e284c273db
x86/i8259: Mark legacy PIC interrupts with IRQ_LEVEL | *2eca102b35
block: fix and cleanup bio_check_ro | *7fe4fab870
Revert "selftests/ftrace: Update synthetic event syntax errors" | *032a7d5ff5
nfsd: Ensure knfsd shuts down when the "nfsd" pseudofs is unmounted | *8fe3e574b3
nouveau: explicitly wait on the fence in nouveau_bo_move_m2mf | *9f3dd454fe
Revert "Input: synaptics - switch touchpad on HP Laptop 15-da3001TU to RMI mode" | *230be65a18
tools: gpio: fix -c option of gpio-event-mon | *7ff8128bb1
net: mdio-mux-meson-g12a: force internal PHY off on mux switch | *62a0806eb4
net/tg3: resolve deadlock in tg3_reset_task() during EEH | *e9c1b1e1a0
thermal: intel: int340x: Add locking to int340x_thermal_get_trip_type() | *3af20f6321
net: ravb: Fix possible hang if RIS2_QFF1 happen | *6ef652f35d
sctp: fail if no bound addresses can be used for a given scope | *cf9a2ce038
net/sched: sch_taprio: do not schedule in taprio_reset() | *7de16d75b2
netrom: Fix use-after-free of a listening socket. | *498584ccf4
netfilter: conntrack: fix vtag checks for ABORT/SHUTDOWN_COMPLETE | *7f9828fb1f
ipv4: prevent potential spectre v1 gadget in fib_metrics_match() | *34c6142f0d
ipv4: prevent potential spectre v1 gadget in ip_metrics_convert() | *870a565bd6
netlink: annotate data races around sk_state | *8583f52c23
netlink: annotate data races around dst_portid and dst_group | *eccb532ada
netlink: annotate data races around nlk->portid | *0308b7dfea
netfilter: nft_set_rbtree: skip elements in transaction from garbage collection | *4aacf3d784
netfilter: nft_set_rbtree: Switch to node list walk for overlap detection | *d4c008f3b7
net: fix UaF in netns ops registration error path | *539ca5dcbc
netlink: prevent potential spectre v1 gadgets | *ed173f77fd
i2c: designware: use casting of u64 in clock multiplication to avoid overflow | *8949ef3a7a
i2c: designware: Use DIV_ROUND_CLOSEST() macro | *8ebc2efcb6
units: Add SI metric prefix definitions | *974aaf1180
units: Add Watt units | *76d9ebb7f0
EDAC/qcom: Do not pass llcc_driv_data as edac_device_ctl_info's pvt_info | *511f6c7c40
EDAC/device: Respect any driver-supplied workqueue polling value | *0cb922cef7
ARM: 9280/1: mm: fix warning on phys_addr_t to void pointer assignment | *98d85586aa
thermal: intel: int340x: Protect trip temperature from concurrent updates | *76c5640737
KVM: x86/vmx: Do not skip segment attributes if unusable bit is set | *e037baee16
cifs: Fix oops due to uncleared server->smbd_conn in reconnect | *c42a6e6870
ftrace/scripts: Update the instructions for ftrace-bisect.sh | *886aa44923
trace_events_hist: add check for return value of 'create_hist_field' | *de3930a488
tracing: Make sure trace_printk() can output as soon as it can be used | *083b3dda86
module: Don't wait for GOING modules | *ce3aa76946
scsi: hpsa: Fix allocation size for scsi_host_alloc() | *6da7055826
xhci: Set HCD flag to defer primary roothub registration | *1d580d3e13
Bluetooth: hci_sync: cancel cmd_timer if hci_open failed | *b98a8b731b
exit: Use READ_ONCE() for all oops/warn limit reads | *53f177b504
docs: Fix path paste-o for /sys/kernel/warn_count | *b0bd5dcfa6
panic: Expose "warn_count" to sysfs | *8c99d4c4c1
panic: Introduce warn_limit | *55eba18262
panic: Consolidate open-coded panic_on_warn checks | *530cdae5c2
exit: Allow oops_limit to be disabled | *7cffbcd68f
exit: Expose "oops_count" to sysfs | *de586785b9
exit: Put an upper limit on how often we can oops | *191a3b17dd
panic: Separate sysctl logic from CONFIG_SMP | *1b9a33a94b
ia64: make IA64_MCA_RECOVERY bool instead of tristate | *6d971830da
csky: Fix function name in csky_alignment() and die() | *648d8b8c49
h8300: Fix build errors from do_exit() to make_task_dead() transition | *63d77c5596
hexagon: Fix function name in die() | *b2c178f311
objtool: Add a missing comma to avoid string concatenation | *d9c740c765
exit: Add and use make_task_dead. | *715a63588f
kasan: no need to unset panic_on_warn in end_report() | *b857b42a8c
ubsan: no need to unset panic_on_warn in ubsan_epilogue() | *590ba6fee0
panic: unset panic_on_warn inside panic() | *e97ec099d7
kernel/panic: move panic sysctls to its own file | *e6226917f4
sysctl: add a new register_sysctl_init() interface | *c4097e844a
fs: reiserfs: remove useless new_opts in reiserfs_remount | *1f6768143b
x86: ACPI: cstate: Optimize C3 entry on AMD CPUs | *5fb884d748
netfilter: conntrack: do not renew entry stuck in tcp SYN_SENT state | *a7345145e7
Revert "selftests/bpf: check null propagation only neither reg is PTR_TO_BTF_ID" | *20a02bc845
lockref: stop doing cpu_relax in the cmpxchg loop | *f8ddf7dbf5
platform/x86: asus-nb-wmi: Add alternate mapping for KEY_SCREENLOCK | *9968f9a862
platform/x86: touchscreen_dmi: Add info for the CSL Panther Tab HD | *52249c2168
scsi: hisi_sas: Set a port invalid only if there are no devices attached when refreshing port id | *71bd134c4e
KVM: s390: interrupt: use READ_ONCE() before cmpxchg() | *300da569a1
spi: spidev: remove debug messages that access spidev->spi without locking | *a84def9b10
ASoC: fsl-asoc-card: Fix naming of AC'97 CODEC widgets | *d9a0752a6a
ASoC: fsl_ssi: Rename AC'97 streams to avoid collisions with AC'97 CODEC | *00f2301611
cpufreq: armada-37xx: stop using 0 as NULL pointer | *2ca345d19c
s390/debug: add _ASM_S390_ prefix to header guard | *ae108a5fc9
drm: Add orientation quirk for Lenovo ideapad D330-10IGL | *96f4899a38
ASoC: fsl_micfil: Correct the number of steps on SX controls | *3b154d5204
kcsan: test: don't put the expect array on the stack | *b75e9fc402
cpufreq: Add Tegra234 to cpufreq-dt-platdev blocklist | *6bc564f3fe
scsi: iscsi: Fix multiple iSCSI session unbind events sent to userspace | *d79e700680
tcp: fix rate_app_limited to default to 1 | *a84240df70
net: dsa: microchip: ksz9477: port map correction in ALU table entry register | *704a423c93
driver core: Fix test_async_probe_init saves device in wrong array | *216f35db6e
w1: fix WARNING after calling w1_process() | *8e5be0ae55
w1: fix deadloop in __w1_remove_master_device() | *ddf16dae65
tcp: avoid the lookup process failing to get sk in ehash table | *5f10f7efe0
nvme-pci: fix timeout request state check | *98519ed691
dmaengine: xilinx_dma: call of_node_put() when breaking out of for_each_child_of_node() | *28fc6095da
HID: betop: check shape of output reports | *16791d5a7a
l2tp: prevent lockdep issue in l2tp_tunnel_register() | *f96b2f6908
net: macb: fix PTP TX timestamp failure due to packet padding | *42ecd72f02
dmaengine: Fix double increment of client_count in dma_chan_get() | *1e97e2e08e
drm/panfrost: fix GENERIC_ATOMIC64 dependency | *31f63c62a8
net: mlx5: eliminate anonymous module_init & module_exit | *4b3b5cc1a7
usb: gadget: f_fs: Ensure ep0req is dequeued before free_request | *6dd9ea0553
usb: gadget: f_fs: Prevent race during ffs_ep0_queue_wait | *55be77aa89
HID: revert CHERRY_MOUSE_000C quirk | *34f1194993
net: stmmac: fix invalid call to mdiobus_get_phy() | *20fd459876
HID: check empty report_list in bigben_probe() | *5dc3469a11
HID: check empty report_list in hid_validate_values() | *4bc5f1f6bc
net: mdio: validate parameter addr in mdiobus_get_phy() | *67866b1e0a
net: usb: sr9700: Handle negative len | *2d77e5c0ad
l2tp: close all race conditions in l2tp_tunnel_register() | *76c640d6a1
l2tp: convert l2tp_tunnel_list to idr | *5b209b8c99
l2tp: Don't sleep and disable BH under writer-side sk_callback_lock | *e34a965f77
l2tp: Serialize access to sk_user_data with sk_callback_lock | *c60fe70078
net/sched: sch_taprio: fix possible use-after-free | *802fd7623e
wifi: rndis_wlan: Prevent buffer overflow in rndis_query_oid | *1af8071bd0
gpio: mxc: Always set GPIOs used as interrupt source to INPUT mode | *613020d048
net: wan: Add checks for NULL for utdm in undo_uhdlc_init and unmap_si_regs | *ad1baab3a5
net: nfc: Fix use-after-free in local_cleanup() | *2a0156a4aa
phy: rockchip-inno-usb2: Fix missing clk_disable_unprepare() in rockchip_usb2phy_power_on() | *da75dec7c6
bpf: Fix pointer-leak due to insufficient speculative store bypass mitigation | *f351af45e2
amd-xgbe: Delay AN timeout during KR training | *a65a8727a2
amd-xgbe: TX Flow Ctrl Registers are h/w ver dependent | *aa8b584cec
ARM: dts: at91: sam9x60: fix the ddr clock for sam9x60 | *fa566549a1
phy: ti: fix Kconfig warning and operator precedence | *b18490138d
PM: AVS: qcom-cpr: Fix an error handling path in cpr_probe() | *39ab0fc498
affs: initialize fsdata in affs_truncate() | *f6fa12fbb1
IB/hfi1: Remove user expected buffer invalidate race | *6ce4382bd1
IB/hfi1: Immediately remove invalid memory from hardware | *6dd8136fd1
IB/hfi1: Fix expected receive setup error exit issues | *ee474dd66e
IB/hfi1: Reserve user expected TIDs | *73e5666bf3
IB/hfi1: Reject a zero-length user expected buffer | *d66c1d4178
RDMA/core: Fix ib block iterator counter overflow | *eab7a92037
tomoyo: fix broken dependency on *.conf.default | *6813d8ba7d
firmware: arm_scmi: Harden shared memory access in fetch_notification | *e85df1db28
firmware: arm_scmi: Harden shared memory access in fetch_response | *329fbd2603
EDAC/highbank: Fix memory leak in highbank_mc_probe() | *7b4516ba56
HID: intel_ish-hid: Add check for ishtp_dma_tx_map | *d775671dcc
ARM: imx: add missing of_node_put() | *5c1dcedd52
arm64: dts: imx8mm-beacon: Fix ecspi2 pinmux | *cccb0aea9c
ARM: dts: imx6qdl-gw560x: Remove incorrect 'uart-has-rtscts' | *6805e392f5
ARM: dts: imx7d-pico: Use 'clock-frequency' | *2a3c3a01e2
ARM: dts: imx6ul-pico-dwarf: Use 'clock-frequency' | *e57ea0c6ba
memory: mvebu-devbus: Fix missing clk_disable_unprepare in mvebu_devbus_probe() | *53f55d6e07
memory: atmel-sdramc: Fix missing clk_disable_unprepare in atmel_ramc_probe() | *935ec78de5
clk: Provide new devm_clk helpers for prepared and enabled clocks | *0b8b21c0b3
clk: generalize devm_clk_get() a bit * |e5ea3c44c8
Revert "xhci: Add update_hub_device override for PCI xHCI hosts" * |a73c1dbdd5
Revert "xhci: Detect lpm incapable xHC USB3 roothub ports from ACPI tables" * |fa89210a0e
Revert "xhci: Add a flag to disable USB3 lpm on a xhci root port level." * |78da590924
Merge 5.10.165 into android12-5.10-lts |\| | *179624a57b
Linux 5.10.165 | *e699cce29a
io_uring/rw: remove leftover debug statement | *3d5f181bda
io_uring/rw: ensure kiocb_end_write() is always called | *c1a279d79e
io_uring: fix double poll leak on repolling | *ddaaadf22b
io_uring: Clean up a false-positive warning from GCC 9.3.0 | *8bc72b4952
mm/khugepaged: fix collapse_pte_mapped_thp() to allow anon_vma | *217721b763
Bluetooth: hci_qca: Fixed issue during suspend | *c208f1e84a
Bluetooth: hci_qca: check for SSR triggered flag while suspend | *ef11bc4bb9
Bluetooth: hci_qca: Wait for SSR completion during suspend | *c392c350a0
soc: qcom: apr: Make qcom,protection-domain optional again | *71e5cd1018
Revert "wifi: mac80211: fix memory leak in ieee80211_if_add()" | *be1067427a
net/mlx5: fix missing mutex_unlock in mlx5_fw_fatal_reporter_err_work() | *f6c201b438
net/ulp: use consistent error code when blocking ULP | *fc2491562a
io_uring/net: fix fast_iov assignment in io_setup_async_msg() | *89a77271d2
io_uring: io_kiocb_update_pos() should not touch file for non -1 offset | *c6e3c12ff9
tracing: Use alignof__(struct {type b;}) instead of offsetof() | *03ba86bb38
x86/fpu: Use _Alignof to avoid undefined behavior in TYPE_ALIGN | *2f45b20c39
Revert "drm/amdgpu: make display pinning more flexible (v2)" | *d6544bccc1
efi: rt-wrapper: Add missing include | *4012603cbd
arm64: efi: Execute runtime services from a dedicated stack | *bf1d287c14
drm/amd/display: Fix COLOR_SPACE_YCBCR2020_TYPE matrix | *75105d943d
drm/amd/display: Calculate output_color_space after pixel encoding adjustment | *a3ef532483
drm/amd/display: Fix set scaling doesn's work | *59590f50ec
drm/i915: re-disable RC6p on Sandy Bridge | *d960fff8e2
mei: me: add meteor lake point M DID | *ae2a9dcc8c
gsmi: fix null-deref in gsmi_get_variable | *a75e80d118
serial: atmel: fix incorrect baudrate setup | *5a7a040795
dmaengine: tegra210-adma: fix global intr clear | *c9da2cb968
serial: pch_uart: Pass correct sg to dma_unmap_sg() | *e924f79e67
dt-bindings: phy: g12a-usb3-pcie-phy: fix compatible string documentation | *31132df12a
dt-bindings: phy: g12a-usb2-phy: fix compatible string documentation | *a9f2658a01
usb-storage: apply IGNORE_UAS only for HIKSEMI MD202 on RTL9210 | *e92c700591
usb: gadget: f_ncm: fix potential NULL ptr deref in ncm_bitrate() | *06600ae7e0
usb: gadget: g_webcam: Send color matching descriptor per frame | *6107a8f15c
usb: typec: altmodes/displayport: Fix pin assignment calculation | *d26f38d16f
usb: typec: altmodes/displayport: Add pin assignment helper | *9c58f1e9e6
usb: host: ehci-fsl: Fix module alias | *3dc896db02
USB: serial: cp210x: add SCALANCE LPE-9000 device id | *856e4b5e53
USB: gadgetfs: Fix race between mounting and unmounting | *894681682d
tty: serial: qcom-geni-serial: fix slab-out-of-bounds on RX FIFO buffer | *c4ab24e333
thunderbolt: Use correct function to calculate maximum USB3 link rate | *531268a875
cifs: do not include page data when checking signature | *3bd4337485
btrfs: fix race between quota rescan and disable leading to NULL pointer deref | *6ee8feca91
mmc: sdhci-esdhc-imx: correct the tuning start tap and step setting | *79819909c2
mmc: sunxi-mmc: Fix clock refcount imbalance during unbind | *2eed23765b
comedi: adv_pci1760: Fix PWM instruction handling | *7efeed828c
usb: core: hub: disable autosuspend for TI TUSB8041 | *b171d0d2cf
misc: fastrpc: Fix use-after-free race condition for maps | *193cd85314
misc: fastrpc: Don't remove map on creater_process and device_release | *e0db5d44bc
USB: misc: iowarrior: fix up header size for USB_DEVICE_ID_CODEMERCS_IOW100 | *20d0dedc7a
staging: vchiq_arm: fix enum vchiq_status return types | *a06e9ec5ab
USB: serial: option: add Quectel EM05CN modem | *2f44c60bb8
USB: serial: option: add Quectel EM05CN (SG) modem | *fcd49b2309
USB: serial: option: add Quectel EC200U modem | *21c5b61812
USB: serial: option: add Quectel EM05-G (RS) modem | *46b898f934
USB: serial: option: add Quectel EM05-G (CS) modem | *3774654f7a
USB: serial: option: add Quectel EM05-G (GR) modem | *9f8e45720e
prlimit: do_prlimit needs to have a speculation check | *96562a23cf
xhci: Detect lpm incapable xHC USB3 roothub ports from ACPI tables | *2551f8cbf2
usb: acpi: add helper to check port lpm capability using acpi _DSM | *4d70a8a9ab
xhci: Add a flag to disable USB3 lpm on a xhci root port level. | *83e3a5be74
xhci: Add update_hub_device override for PCI xHCI hosts | *081105213f
xhci: Fix null pointer dereference when host dies | *66fc160085
usb: xhci: Check endpoint is valid before dereferencing it | *8ca60d59b9
xhci-pci: set the dma max_seg_size | *ea2e6286e3
io_uring/rw: defer fsnotify calls to task context | *e90cfb9699
io_uring: do not recalculate ppos unnecessarily | *ea528ecac3
io_uring: update kiocb->ki_pos at execution time | *076f872314
io_uring: remove duplicated calls to io_kiocb_ppos | *e9c6556708
io_uring: ensure that cached task references are always put on exit | *e0140e9da3
io_uring: fix CQ waiting timeout handling | *de77faee28
io_uring: lock overflowing for IOPOLL | *78e8151f04
io_uring: check for valid register opcode earlier | *aa4c9b3e45
io_uring: fix async accept on O_NONBLOCK sockets | *4bc17e6381
io_uring: allow re-poll if we made progress | *f901b4bfd0
io_uring: support MSG_WAITALL for IORING_OP_SEND(MSG) | *96ccba4a1a
io_uring: add flag for disabling provided buffer recycling | *aadd9b0930
io_uring: ensure recv and recvmsg handle MSG_WAITALL correctly | *abdc16c836
io_uring: improve send/recv error handling | *2fd232bbd6
io_uring: don't gate task_work run on TIF_NOTIFY_SIGNAL | *e84ec6e25d
Bluetooth: hci_qca: Fix driver shutdown on closed serdev | *1ab0098333
Bluetooth: hci_qca: Wait for timeout during suspend | *413638f615
drm/i915/gt: Reset twice | *cab2123567
ALSA: hda/realtek - Turn on power early | *5822baf950
efi: fix userspace infinite retry read efivars after EFI runtime services page fault | *712bd74ecc
nilfs2: fix general protection fault in nilfs_btree_insert() | *03bf73e09a
zonefs: Detect append writes at invalid locations | *20d0a6d17e
Add exception protection processing for vd in axi_chan_handle_err function | *187523fa7c
wifi: mac80211: sdata can be NULL during AMPDU start | *2d1fd99e8e
wifi: brcmfmac: fix regression for Broadcom PCIe wifi devices | *72009139a6
f2fs: let's avoid panic if extent_tree is not created | *bf6c7f1801
x86/asm: Fix an assembler warning with current binutils | *18bd1c9c02
btrfs: always report error in run_one_delayed_ref() | *936b8b15a2
RDMA/srp: Move large values to a new enum for gcc13 | *0040e48492
net/ethtool/ioctl: return -EOPNOTSUPP if we have no phy stats | *f7845de23f
tools/virtio: initialize spinlocks in vring_test.c | *3093027183
selftests/bpf: check null propagation only neither reg is PTR_TO_BTF_ID | *c7c36bb6ea
pNFS/filelayout: Fix coalescing test for single DS | *2cbd815970
btrfs: fix trace event name typo for FLUSH_DELAYED_REFS * |1e32d1c96a
Revert "xhci: Prevent infinite loop in transaction errors recovery for streams" * |b0d4a37a43
Merge 5.10.164 into android12-5.10-lts |\| | *3a9f1b907b
Linux 5.10.164 | *74985c5757
Revert "usb: ulpi: defer ulpi_register on ulpi_read_id timeout" | *a88a0d16e1
io_uring/io-wq: only free worker if it was allocated for creation | *b912ed1363
io_uring/io-wq: free worker if task_work creation is canceled | *68bcd06385
drm/virtio: Fix GEM handle creation UAF | *4ca71bc0e1
efi: fix NULL-deref in init error path | *057f5ddfbc
arm64: cmpxchg_double*: hazard against entire exchange variable | *9a5fd0844e
arm64: atomics: remove LL/SC trampolines | *28840e46ea
arm64: atomics: format whitespace consistently | *5dac4c7212
x86/resctrl: Fix task CLOSID/RMID update race | *446c7251f0
x86/resctrl: Use task_curr() instead of task_struct->on_cpu to prevent unnecessary IPI | *196c6f0c3e
KVM: x86: Do not return host topology information from KVM_GET_SUPPORTED_CPUID | *0027164b24
Documentation: KVM: add API issues section | *caaea2ab6b
iommu/mediatek-v1: Fix an error handling path in mtk_iommu_v1_probe() | *cf38e76241
iommu/mediatek-v1: Add error handle for mtk_iommu_probe | *60806adc9b
mm: Always release pages to the buddy allocator in memblock_free_late(). | *092f0c2d1f
net/mlx5e: Don't support encap rules with gbp option | *b3d47227f0
net/mlx5: Fix ptp max frequency adjustment range | *453277feb4
net/sched: act_mpls: Fix warning during failed attribute validation | *0ca78c9965
nfc: pn533: Wait for out_urb's completion in pn533_usb_send_frame() | *92b30a27e4
hvc/xen: lock console list traversal | *14e72a56e1
octeontx2-af: Fix LMAC config in cgx_lmac_rx_tx_enable | *8e2bfcfaab
octeontx2-af: Map NIX block from CGX connection | *d9be5b57ab
octeontx2-af: Update get/set resource count functions | *0d0675bc33
tipc: fix unexpected link reset due to discovery messages | *d83cac6c00
ASoC: wm8904: fix wrong outputs volume after power reactivation | *d4aa749e04
regulator: da9211: Use irq handler when ready | *3ca8ef4d91
EDAC/device: Fix period calculation in edac_device_reset_delay_period() | *28b9a0e216
x86/boot: Avoid using Intel mnemonics in AT&T syntax asm | *8cbeb60320
powerpc/imc-pmu: Fix use of mutex in IRQs disabled section | *4e6a70fd84
netfilter: ipset: Fix overflow before widen in the bitmap_ip_create() function. | *a3a1114aa6
xfrm: fix rcu lock in xfrm_notify_userpolicy() | *091f85db4c
ext4: fix uninititialized value in 'ext4_evict_inode' | *98407a4ae3
usb: ulpi: defer ulpi_register on ulpi_read_id timeout | *3d13818a99
xhci: Prevent infinite loop in transaction errors recovery for streams | *2f90fcedc5
xhci: move and rename xhci_cleanup_halted_endpoint() | *cad965cedb
xhci: store TD status in the td struct instead of passing it along | *9b63a80c45
xhci: move xhci_td_cleanup so it can be called by more functions | *44c635c60f
xhci: Add xhci_reset_halted_ep() helper function | *10287d18f5
xhci: adjust parameters passed to cleanup_halted_endpoint() | *aaaa7cc4ab
xhci: get isochronous ring directly from endpoint structure | *a81ace0656
xhci: Avoid parsing transfer events several times | *ba20d6056b
clk: imx: imx8mp: add shared clk gate for usb suspend clk | *2b331d2137
dt-bindings: clocks: imx8mp: Add ID for usb suspend clock | *cb769960ef
clk: imx8mp: add clkout1/2 support | *85eaaa17c0
clk: imx8mp: Add DISP2 pixel clock | *6b21077146
iommu/amd: Fix ill-formed ivrs_ioapic, ivrs_hpet and ivrs_acpihid options | *5badda810f
iommu/amd: Add PCI segment support for ivrs_[ioapic/hpet/acpihid] commands | *ab9bb65b85
bus: mhi: host: Fix race between channel preparation and M0 event | *6c9e2c11c3
ipv6: raw: Deduct extension header length in rawv6_push_pending_frames | *112df4cd2b
ixgbe: fix pci device refcount leak | *f401062d8d
platform/x86: sony-laptop: Don't turn off 0x153 keyboard backlight during probe | *785607e5e6
drm/msm/dp: do not complete dp_aux_cmd_fifo_tx() if irq is not for aux transfer | *8c71777b6a
drm/msm/adreno: Make adreno quirks not overwrite each other | *afb6063aa8
cifs: Fix uninitialized memory read for smb311 posix symlink create | *51dbedee2f
s390/percpu: add READ_ONCE() to arch_this_cpu_to_op_simple() | *bddb355267
s390/cpum_sf: add READ_ONCE() semantics to compare and swap loops | *2adc64f3e6
ASoC: qcom: lpass-cpu: Fix fallback SD line index handling | *5ee3083307
s390/kexec: fix ipl report address for kdump | *d1725dbf23
perf auxtrace: Fix address filter duplicate symbol selection | *eaabceae1b
docs: Fix the docs build with Sphinx 6.0 | *38c4a17c6b
efi: tpm: Avoid READ_ONCE() for accessing the event log | *c47883105c
KVM: arm64: Fix S1PTW handling on RO memslots | *443b390f2c
ALSA: hda/realtek: Enable mute/micmute LEDs on HP Spectre x360 13-aw0xxx | *550efeff98
netfilter: nft_payload: incorrect arithmetics when fetching VLAN header bits * |2702f09758
Revert "ASoC/SoundWire: dai: expand 'stream' concept beyond SoundWire" * |5417a09eec
Revert "ASoC: Intel/SOF: use set_stream() instead of set_tdm_slots() for HDAudio" * |c35badfe0d
Revert "net: add atomic_long_t to net_device_stats fields" * |f1242cd146
Revert "PM/devfreq: governor: Add a private governor_data for governor" * |4922049993
Merge 5.10.163 into android12-5.10-lts |\| | *19ff2d645f
Linux 5.10.163 | *de4a20e148
ALSA: hda - Enable headset mic on another Dell laptop with ALC3254 | *0ad275c139
ALSA: hda/hdmi: Add a HP device 0x8715 to force connect list | *df02234e6b
ALSA: pcm: Move rwsem lock inside snd_ctl_elem_read to prevent UAF | *f8ed0a93b5
net/ulp: prevent ULP without clone op from entering the LISTEN status | *9f7bc28a6b
net: sched: disallow noqueue for qdisc classes | *6eb02c596e
mptcp: use proper req destructor for IPv6 | *f4c7afa951
mptcp: dedicated request sock for subflow in v6 | *31472f94c6
mptcp: remove MPTCP 'ifdef' in TCP SYN cookies | *5aa15a8400
mptcp: mark ops structures as ro_after_init | *f5ef26276b
serial: fixup backport of "serial: Deassert Transmit Enable on probe in driver-specific way" | *2ecf0819e4
fsl_lpuart: Don't enable interrupts too early | *23ad034760
ext4: don't set up encryption key during jbd2 transaction | *d9ff5ad203
ext4: disable fast-commit of encrypted dir operations | *5b700b9c04
parisc: Align parisc MADV_XXX constants with all other architectures | *07b3672c40
io_uring: Fix unsigned 'res' comparison with zero in io_fixup_rw_res() | *b57d7b1dcd
efi: random: combine bootloader provided RNG seed with RNG protocol output | *da20f56a35
mbcache: Avoid nesting of cache->c_list_lock under bit locks | *be01f35efa
hfs/hfsplus: avoid WARN_ON() for sanity check, use proper error handling | *1f881d9201
hfs/hfsplus: use WARN_ON for sanity check | *434909edca
selftests: set the BUILD variable to absolute path | *a41d63f204
ext4: don't allow journal inode to have encrypt flag | *af90f8b36d
drm/i915/gvt: fix vgpu debugfs clean in remove | *bb7c7b2c89
drm/i915/gvt: fix gvt debugfs destroy | *bc847857fb
riscv: uaccess: fix type of 0 variable on error in get_user() | *f64e56fb28
fbdev: matroxfb: G200eW: Increase max memory from 1 MB to 16 MB | *d0c46b55d6
nfsd: fix handling of readdir in v4root vs. mount upcall timeout | *67e39c4f4c
x86/bugs: Flush IBP in ib_prctl_set() | *f13301a69a
nvme: fix multipath crash caused by flush request when blktrace is enabled | *3f257a98e5
ASoC: Intel: bytcr_rt5640: Add quirk for the Advantech MICA-071 tablet | *6df376e245
udf: Fix extension of the last extent in the file | *84b2cc7b36
caif: fix memory leak in cfctrl_linkup_request() | *e5a0583744
drm/i915: unpin on error in intel_vgpu_shadow_mm_pin() | *232ef345e5
usb: rndis_host: Secure rndis_query check against int overflow | *2a9ee7c24b
drivers/net/bonding/bond_3ad: return when there's no aggregator | *bc6a0993bf
perf tools: Fix resources leak in perf_data__open_dir() | *ee756980e4
netfilter: ipset: Rework long task execution when adding/deleting entries | *ba5d279097
netfilter: ipset: fix hash:net,port,net hang with /0 subnet | *b2c917e510
net: sched: cbq: dont intepret cls results when asked to drop | *5f65f48516
net: sched: atm: dont intepret cls results when asked to drop | *f4a2ad1002
gpio: sifive: Fix refcount leak in sifive_gpio_probe | *7ec369e215
ceph: switch to vfs_inode_has_locks() to fix file lock bug | *407710427d
filelock: new helper: vfs_inode_has_locks | *9f0ff5de3e
drm/meson: Reduce the FIFO lines held when AFBC is not used | *ae2639cd2c
RDMA/mlx5: Fix validation of max_rd_atomic caps for DC | *106d0d33c9
net: phy: xgmiitorgmii: Fix refcount leak in xgmiitorgmii_probe | *398e14bb73
net: amd-xgbe: add missed tasklet_kill | *e3d90ca906
net/mlx5e: Fix hw mtu initializing at XDP SQ allocation | *6d655a9d82
net/mlx5e: IPoIB, Don't allow CQE compression to be turned on by default | *670b206173
net/mlx5: Avoid recovery in probe flows | *66b92b80c9
net/mlx5: Add forgotten cleanup calls into mlx5_init_once() error path | *b6c74d2376
vhost: fix range used in translate_desc() | *264fb6dcbf
vringh: fix range used in iotlb_translate() | *eabb3ceb04
vhost/vsock: Fix error handling in vhost_vsock_init() | *e0f5c962c0
nfc: Fix potential resource leaks | *513787ff9a
qlcnic: prevent ->dcb use-after-free on qlcnic_dcb_enable() failure | *b314f6c351
net: sched: fix memory leak in tcindex_set_parms | *4226ce95a9
net: hns3: add interrupts re-initialization while doing VF FLR | *998ebbdc3b
nfsd: shut down the NFSv4 state objects before the filecache | *69d896b609
veth: Fix race with AF_XDP exposing old or uninitialized descriptors | *5f41212dc2
vmxnet3: correctly report csum_level for encapsulated packet | *0b70f6ea4d
drm/panfrost: Fix GEM handle creation ref-counting | *e68e088d0d
bpf: pull before calling skb_postpull_rcsum() | *cb0d627bc7
SUNRPC: ensure the matching upcall is in-flight upon downcall | *1be16a0c2f
ext4: fix deadlock due to mbcache entry corruption | *0da99012d3
mbcache: automatically delete entries from cache on freeing | *1a56cd972c
ext4: fix race when reusing xattr blocks | *4cc218e217
ext4: unindent codeblock in ext4_xattr_block_set() | *0e6fbc566f
ext4: remove EA inode entry from mbcache on inode eviction | *27c0867397
mbcache: add functions to delete entry if unused | *fb59d12ae7
mbcache: don't reclaim used entries | *4c363e2961
ext4: use kmemdup() to replace kmalloc + memcpy | *b8b7922374
ext4: fix leaking uninitialized memory in fast-commit journal | *a5584ba9b3
ext4: fix various seppling typos | *adfefe804b
ext4: simplify ext4 error translation | *95eaa8a953
ext4: move functions in super.c | *769469f8f1
fs: ext4: initialize fsdata in pagecache_write() | *b33e42d65e
ext4: use memcpy_to_page() in pagecache_write() | *60d4383c1b
mm/highmem: Lift memcpy_[to|from]_page to core | *f86d3338c8
ext4: correct inconsistent error msg in nojournal mode | *99017eb3de
ext4: goto right label 'failed_mount3a' | *56d87959c6
riscv: stacktrace: Fixup ftrace_graph_ret_addr retp argument | *ecb8e8b2e5
riscv/stacktrace: Fix stack output without ra on the stack top | *b5c75efd0a
ravb: Fix "failed to switch device to config mode" message during unbind | *5451efb2ca
staging: media: tegra-video: fix device_node use after free | *f899baf6cb
x86/kprobes: Fix optprobe optimization check with CONFIG_RETHUNK | *5d112deb2a
x86/kprobes: Convert to insn_decode() | *a1766efc5b
perf probe: Fix to get the DW_AT_decl_file and DW_AT_call_file as unsinged data | *b5d0f7c240
perf probe: Use dwarf_attr_integrate as generic DWARF attr accessor | *c0a3d21584
media: s5p-mfc: Fix in register read and write for H264 | *8ff64edf9d
media: s5p-mfc: Clear workbit to handle error condition | *dcd1a4ade5
media: s5p-mfc: Fix to handle reference queue during finishing | *97e7896000
x86/MCE/AMD: Clear DFR errors found in THR handler | *ec75bc4368
x86/mce: Get rid of msr_ops | *58de7a95f0
btrfs: replace strncpy() with strscpy() | *7a04f85009
perf/x86/intel/uncore: Clear attr_update properly | *53d24a9592
perf/x86/intel/uncore: Generalize I/O stacks to PMON mapping procedure | *9620f8a5c7
ARM: renumber bits related to _TIF_WORK_MASK | *6302709784
drm/amdgpu: make display pinning more flexible (v2) | *dfc01905b8
drm/amdgpu: handle polaris10/11 overlap asics (v2) | *30e95fdc96
ext4: allocate extended attribute value in vmalloc area | *8d3e87d43c
ext4: avoid unaccounted block allocation when expanding inode | *15d0cf84df
ext4: initialize quota before expanding inode in setproject ioctl | *9882601ee6
ext4: fix inode leak in ext4_xattr_inode_create() on an error path | *407f47728c
ext4: avoid BUG_ON when creating xattrs | *00092b218d
ext4: fix error code return to user-space in ext4_get_branch() | *f06c980287
ext4: fix corruption when online resizing a 1K bigalloc fs | *9404839e0c
ext4: fix delayed allocation bug in ext4_clu_mapped for bigalloc + inline | *84a2f2ed49
ext4: init quota for 'old.inode' in 'ext4_rename' | *71e99ec131
ext4: fix bug_on in __es_tree_search caused by bad boot loader inode | *9020f56a3c
ext4: check and assert if marking an no_delete evicting inode dirty | *86c2a2ec4b
ext4: fix reserved cluster accounting in __es_remove_extent() | *98004f926d
ext4: fix bug_on in __es_tree_search caused by bad quota inode | *20af66617e
ext4: add helper to check quota inums | *c0a738875c
ext4: add EXT4_IGET_BAD flag to prevent unexpected bad inode | *f9cd698080
ext4: fix undefined behavior in bit shift for ext4_check_flag_values | *7223d5e75f
ext4: fix use-after-free in ext4_orphan_cleanup | *d6d18d6e2d
ext4: add inode table check in __ext4_get_inode_loc to aovid possible infinite loop | *bdc698ce91
ext4: silence the warning when evicting inode with dioread_nolock | *68af1a4842
drm/ingenic: Fix missing platform_driver_unregister() call in ingenic_drm_init() | *bf83a303f2
drm/i915/dsi: fix VBT send packet port selection for dual link DSI | *439cbbc151
drm/vmwgfx: Validate the box size for the snooped cursor | *0a0662d597
drm/connector: send hotplug uevent on connector cleanup | *21a773ec89
device_cgroup: Roll back to original exceptions after copy failure | *3505c187b8
parisc: led: Fix potential null-ptr-deref in start_task() | *2c7c487cd8
remoteproc: core: Do pm_relax when in RPROC_OFFLINE state | *e291dea722
iommu/amd: Fix ivrs_acpihid cmdline parsing code | *28e71fd8d5
driver core: Fix bus_type.match() error handling in __driver_attach() | *772dbbfc20
crypto: n2 - add missing hash statesize | *7c44205748
PCI/sysfs: Fix double free in error path | *99ef6cc791
PCI: Fix pci_device_is_present() for VFs by checking PF | *f29d127b37
ipmi: fix use after free in _ipmi_destroy_user() | *bfe1e039a0
ima: Fix a potential NULL pointer access in ima_restore_measurement_list | *62307558e7
mtd: spi-nor: Check for zero erase size in spi_nor_find_best_erase_type() | *4e17819cb3
ipmi: fix long wait in unload when IPMI disconnect | *24bc27ea4e
ASoC: jz4740-i2s: Handle independent FIFO flush bits | *652f1d66a8
wifi: wilc1000: sdio: fix module autoloading | *d9f6614a73
efi: Add iMac Pro 2017 to uefi skip cert quirk | *ffcf71676d
md/bitmap: Fix bitmap chunk size overflow issues | *e94443252b
rtc: ds1347: fix value written to century register | *6155aed476
cifs: fix missing display of three mount options | *8c82733e24
cifs: fix confusing debug message | *3df07728ab
media: dvb-core: Fix UAF due to refcount races at releasing | *7dd5a68cdb
media: dvb-core: Fix double free in dvb_register_device() | *1032520b21
ARM: 9256/1: NWFPE: avoid compiler-generated __aeabi_uldivmod | *1306614412
staging: media: tegra-video: fix chan->mipi value on error | *52c0622e53
tracing: Fix infinite loop in tracing_read_pipe on overflowed print_trace_line | *b838b1b9ca
tracing/hist: Fix wrong return value in parse_action_params() | *ff3dd2c1be
x86/kprobes: Fix kprobes instruction boudary check with CONFIG_RETHUNK | *362495bf45
ftrace/x86: Add back ftrace_expected for ftrace bug reports | *b677629cae
x86/microcode/intel: Do not retry microcode reloading on the APs | *43dd254853
KVM: nVMX: Inject #GP, not #UD, if "generic" VMXON CR0/CR4 check fails | *e61eacf993
perf/core: Call LSM hook after copying perf_event_attr | *0cb31bd883
tracing/hist: Fix out-of-bound write on 'action_data.var_ref_idx' | *18a489a3fd
dm cache: set needs_check flag after aborting metadata | *2b17026685
dm cache: Fix UAF in destroy() | *342cfd8426
dm clone: Fix UAF in clone_dtr() | *a506b5c927
dm integrity: Fix UAF in dm_integrity_dtr() | *34fe9c2251
dm thin: Fix UAF in run_timer_softirq() | *c84d1ca228
dm thin: resume even if in FAIL mode | *94f01ecc2a
dm thin: Use last transaction's pmd->root when commit failed | *7e37578069
dm thin: Fix ABBA deadlock between shrink_slab and dm_pool_abort_metadata | *b45e77b792
dm cache: Fix ABBA deadlock between shrink_slab and dm_cache_metadata_abort | *d9fa243ab2
ALSA: hda/realtek: Apply dual codec fixup for Dell Latitude laptops | *2437b06223
ALSA: patch_realtek: Fix Dell Inspiron Plus 16 | *e379b88a8f
cpufreq: Init completion before kobject_init_and_add() | *cea018aaf7
PM/devfreq: governor: Add a private governor_data for governor | *d1d73c3034
selftests: Use optional USERCFLAGS and USERLDFLAGS | *12576d2ebf
arm64: dts: qcom: sdm850-lenovo-yoga-c630: correct I2C12 pins drive strength | *8546f11c42
ARM: ux500: do not directly dereference __iomem | *0061ab5153
btrfs: fix resolving backrefs for inline extent followed by prealloc | *c0aa6e6ab0
mmc: sdhci-sprd: Disable CLK_AUTO when the clock is less than 400K | *e918762f8a
arm64: dts: qcom: sdm845-db845c: correct SPI2 pins drive strength | *c023597bae
jbd2: use the correct print format | *8c444b3061
ktest.pl minconfig: Unset configs instead of just removing them | *5148dfceab
kest.pl: Fix grub2 menu handling for rebooting | *780297af3c
soc: qcom: Select REMAP_MMIO for LLCC driver | *d5db9aaf1b
media: stv0288: use explicitly signed char | *25dbd87379
net/af_packet: make sure to pull mac header | *c2137d565c
net/af_packet: add VLAN support for AF_PACKET SOCK_RAW GSO | *7c15d7ecce
rcu: Prevent lockdep-RCU splats on lock acquisition/release | *4c57f612f4
torture: Exclude "NOHZ tick-stop error" from fatal errors | *289f512d08
wifi: rtlwifi: 8192de: correct checking of IQK reload | *0f6d6974b0
wifi: rtlwifi: remove always-true condition pointed out by GCC 12 | *40b844796b
net/mlx5e: Fix nullptr in mlx5e_tc_add_fdb_flow() | *8b20aab8cf
ASoC/SoundWire: dai: expand 'stream' concept beyond SoundWire | *185c141946
ASoC: Intel/SOF: use set_stream() instead of set_tdm_slots() for HDAudio | *7b3631a2e1
kcsan: Instrument memcpy/memset/memmove with newer Clang | *2cd6026e25
SUNRPC: Don't leak netobj memory when gss_read_proxy_verf() fails | *3b6c822238
tpm: tpm_tis: Add the missed acpi_put_table() to fix memory leak | *0bd9b4be72
tpm: tpm_crb: Add the missed acpi_put_table() to fix memory leak | *8ddc48068a
tpm: acpi: Call acpi_put_table() to fix memory leak | *b51d5fed9f
mmc: vub300: fix warning - do not call blocking ops when !TASK_RUNNING | *252a720882
f2fs: should put a page when checking the summary info | *882734bbc5
mm, compaction: fix fast_isolate_around() to stay within boundaries | *ae77930277
md: fix a crash in mempool_free | *b591b2919d
pnode: terminate at peers of source | *66f359ad66
ALSA: line6: fix stack overflow in line6_midi_transmit | *5e79f77ea4
ALSA: line6: correct midi status byte when receiving data from podxt | *56abf8046b
ovl: Use ovl mounter's fsuid and fsgid in ovl_link() | *c3e8bbcbaa
binfmt: Fix error return code in load_elf_fdpic_binary() | *12407462d3
hfsplus: fix bug causing custom uid and gid being unable to be assigned with mount | *44cf50587e
pstore/zone: Use GFP_ATOMIC to allocate zone buffer | *0d992c044c
HID: plantronics: Additional PIDs for double volume key presses quirk | *eaf0b78226
HID: multitouch: fix Asus ExpertBook P2 P2451FA trackpoint | *6f7e2fcab7
powerpc/rtas: avoid scheduling in rtas_os_term() | *f2167f10fc
powerpc/rtas: avoid device tree lookups in rtas_os_term() | *0af0e115ff
objtool: Fix SEGFAULT | *57ae492f62
nvmet: don't defer passthrough commands with trivial effects to the workqueue | *4b3282a977
nvme: fix the NVME_CMD_EFFECTS_CSE_MASK definition | *ab711f3eda
ata: ahci: Fix PCS quirk application for suspend | *cc512539c4
nvme-pci: fix page size checks | *dfb6d54893
nvme-pci: fix mempool alloc size | *f5d8738fbe
nvme-pci: fix doorbell buffer value endianness | *fe6ea044c4
cifs: fix oops during encryption | *f9089b9554
usb: dwc3: qcom: Fix memory leak in dwc3_qcom_interconnect_init | *ce2462bcf3
pwm: tegra: Fix 32 bit build | *a8be7c2787
media: dvbdev: fix refcnt bug | *153319671a
media: dvbdev: fix build warning due to comments | *740c537f52
ovl: fix use inode directly in rcu-walk mode | *f24474d12e
gcov: add support for checksum field | *36be7afca1
regulator: core: fix deadlock on regulator enable | *e12f4c3212
iio: adc128s052: add proper .data members in adc128_of_match table | *9f604702b7
iio: adc: ad_sigma_delta: do not use internal iio_dev lock | *582f5fc2c5
reiserfs: Add missing calls to reiserfs_security_free() | *08371068ff
HID: mcp2221: don't connect hidraw | *7a203471b9
HID: wacom: Ensure bootloader PID is usable in hidraw mode | *723ffde78a
usb: dwc3: core: defer probe on ulpi_read_id timeout | *d17c82aad6
usb: dwc3: Fix race between dwc3_set_mode and __dwc3_set_mode | *2b725b6fbb
ALSA: hda/hdmi: Add HP Device 0x8711 to force connect list | *c863b67350
ALSA: hda/realtek: Add quirk for Lenovo TianYi510Pro-14IOB | *d3767082eb
ALSA: usb-audio: add the quirk for KT0206 device | *7691fa4102
ima: Simplify ima_lsm_copy_rule | *1d8dcc3dad
pstore: Make sure CONFIG_PSTORE_PMSG selects CONFIG_RT_MUTEXES | *07b0ce902e
afs: Fix lost servers_outstanding count | *1080729b9a
perf debug: Set debug_peo_args and redirect_to_stderr variable to correct values in perf_quiet_option() | *1c7b03d00c
pstore: Switch pmsg_lock to an rt_mutex to avoid priority inversion | *c3607ed7ed
LoadPin: Ignore the "contents" argument of the LSM hooks | *4138e1b775
ASoC: rt5670: Remove unbalanced pm_runtime_put() | *fd49dc17c3
ASoC: rockchip: spdif: Add missing clk_disable_unprepare() in rk_spdif_runtime_resume() | *c0ae46693b
ASoC: wm8994: Fix potential deadlock | *e4a8573b04
ASoC: rockchip: pdm: Add missing clk_disable_unprepare() in rockchip_pdm_runtime_resume() | *06c9d468c0
ASoC: audio-graph-card: fix refcount leak of cpu_ep in __graph_for_each_link() | *812a18e48e
ASoC: mediatek: mt8173-rt5650-rt5514: fix refcount leak in mt8173_rt5650_rt5514_dev_probe() | *c2eb1a3877
ASoC: Intel: Skylake: Fix driver hang during shutdown | *72c0e552bc
ALSA: hda: add snd_hdac_stop_streams() helper | *d3a8925d6c
ALSA/ASoC: hda: move/rename snd_hdac_ext_stop_streams to hdac_stream.c | *2727dbfe8d
hwmon: (jc42) Fix missing unlock on error in jc42_write() | *a076490b02
orangefs: Fix kmemleak in orangefs_{kernel,client}_debug_init() | *b8affa0c64
orangefs: Fix kmemleak in orangefs_prepare_debugfs_help_string() | *6e3c4d3fa5
drm/sti: Fix return type of sti_{dvo,hda,hdmi}_connector_mode_valid() | *f3d3f3564e
drm/fsl-dcu: Fix return type of fsl_dcu_drm_connector_mode_valid() | *dcd28191be
hugetlbfs: fix null-ptr-deref in hugetlbfs_parse_param() | *efd025f32f
clk: st: Fix memory leak in st_of_quadfs_setup() | *1c6447d0fc
media: si470x: Fix use-after-free in si470x_int_in_callback() | *a63a1ae134
mmc: renesas_sdhi: better reset from HS400 mode | *58e21146c0
mmc: f-sdh30: Add quirks for broken timeout clock capability | *4b737246ff
regulator: core: fix use_count leakage when handling boot-on | *17c2eb9ce8
libbpf: Avoid enum forward-declarations in public API in C++ mode | *e8022da1fa
blk-mq: fix possible memleak when register 'hctx' failed | *7d7ab25ead
media: dvb-usb: fix memory leak in dvb_usb_adapter_init() | *2abd734338
media: dvbdev: adopts refcnt to avoid UAF | *b42580c8d8
media: dvb-frontends: fix leak of memory fw | *dd1e1bf916
ethtool: avoiding integer overflow in ethtool_phys_id() | *329a766355
bpf: Prevent decl_tag from being referenced in func_proto arg | *148dcbd3af
ppp: associate skb with a device at tx | *755eb08792
mrp: introduce active flags to prevent UAF when applicant uninit | *037db10e3f
net: add atomic_long_t to net_device_stats fields | *e2d60023af
drm/amd/display: fix array index out of bound error in bios parser | *10d713532f
md/raid1: stop mdx_raid1 thread when raid1 array run failed | *100caacfa0
drivers/md/md-bitmap: check the return value of md_bitmap_get_counter() | *7d86851c30
drm/sti: Use drm_mode_copy() | *dd31d73040
drm/rockchip: Use drm_mode_copy() | *4f238212c7
drm/msm: Use drm_mode_copy() | *ebc3c77785
s390/lcs: Fix return type of lcs_start_xmit() | *3ac0217ca9
s390/netiucv: Fix return type of netiucv_tx() | *eeb75f80bc
s390/ctcm: Fix return type of ctc{mp,}m_tx() | *9606bbc271
drm/amdgpu: Fix type of second parameter in odn_edit_dpm_table() callback | *a42a23bdae
drm/amdgpu: Fix type of second parameter in trans_msg() callback | *3cb18dea11
igb: Do not free q_vector unless new one was allocated | *87792567d9
wifi: brcmfmac: Fix potential shift-out-of-bounds in brcmf_fw_alloc_request() | *e7aa8a4709
hamradio: baycom_epp: Fix return type of baycom_send_packet() | *5b0b6553bf
net: ethernet: ti: Fix return type of netcp_ndo_start_xmit() | *6d935a0265
bpf: make sure skb->len != 0 when redirecting to a tunneling device | *ebc2fb6afc
qed (gcc13): use u16 for fid to be big enough | *648cdb8bf3
drm/amd/display: prevent memory leak | *c69bc8e34d
ipmi: fix memleak when unload ipmi driver | *be4cd23cd3
ASoC: codecs: rt298: Add quirk for KBL-R RVP platform | *8af5249271
wifi: ar5523: Fix use-after-free on ar5523_cmd() timed out | *1824ccabee
wifi: ath9k: verify the expected usb_endpoints are present | *2e8bb402b0
brcmfmac: return error when getting invalid max_flowrings from dongle | *6cd4865bb4
drm/etnaviv: add missing quirks for GC300 | *4fd3a11804
hfs: fix OOB Read in __hfs_brec_find | *6edd0cdee5
acct: fix potential integer overflow in encode_comp_t() | *ec93b5430e
nilfs2: fix shift-out-of-bounds due to too large exponent of block size | *d464b035c0
nilfs2: fix shift-out-of-bounds/overflow in nilfs_sb2_bad_offset() | *b0b83d3f3f
ACPICA: Fix error code path in acpi_ds_call_control_method() | *911999b193
fs: jfs: fix shift-out-of-bounds in dbDiscardAG | *40dba68d41
udf: Avoid double brelse() in udf_rename() | *3e997e4ce8
fs: jfs: fix shift-out-of-bounds in dbAllocAG | *dcbc51d31d
binfmt_misc: fix shift-out-of-bounds in check_special_flags | *22c1d8f24c
x86/hyperv: Remove unregister syscore call from Hyper-V cleanup | *9b267051c8
video: hyperv_fb: Avoid taking busy spinlock on panic path | *0461a8c278
arm64: make is_ttbrX_addr() noinstr-safe | *5a52380b81
rcu: Fix __this_cpu_read() lockdep warning in rcu_force_quiescent_state() | *9062493811
net: stream: purge sk_error_queue in sk_stream_kill_queues() | *7c3a20bfd2
myri10ge: Fix an error handling path in myri10ge_probe() | *3c97373690
rxrpc: Fix missing unlock in rxrpc_do_sendmsg() | *5c544c7c6a
net_sched: reject TCF_EM_SIMPLE case for complex ematch module | *a39b4de080
mailbox: zynq-ipi: fix error handling while device_register() fails | *821be5a5ab
skbuff: Account for tail adjustment during pull operations | *6736b61ecf
openvswitch: Fix flow lookup to use unmasked key | *ea14220031
selftests: devlink: fix the fd redirect in dummy_reporter_test | *57ce1a36c0
rtc: mxc_v2: Add missing clk_disable_unprepare() | *1e2a27dab1
igc: Set Qbv start_time and end_time to end_time if not being configured in GCL | *edb995b5ec
igc: Lift TAPRIO schedule restriction | *b48d3db891
igc: recalculate Qbv end_time by considering cycle time | *3f2a944c23
igc: Add checking for basetime less than zero | *a0e2295c2a
igc: Use strict cycles for Qbv scheduling | *413fe82420
igc: Enhance Qbv scheduling by using first flag bit | *a8f9698a05
net: add a helper to avoid issues with HW TX timestamping and SO_TXTIME | *ae5d96bae3
net: igc: use skb_csum_is_sctp instead of protocol check | *4794d07fe6
net: add inline function skb_csum_is_sctp | *67349025f0
net: switch to storing KCOV handle directly in sk_buff | *3d5f83a62e
r6040: Fix kmemleak in probe and remove | *aea9e64dec
nfc: pn533: Clear nfc_target before being used | *6939f84e53
mISDN: hfcmulti: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave() | *b58c871966
mISDN: hfcpci: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave() | *30e0a066b6
mISDN: hfcsusb: don't call dev_kfree_skb/kfree_skb() under spin_lock_irqsave() | *2a930b75bf
net: macsec: fix net device access prior to holding a lock | *c1207219a4
nfsd: under NFSv4.1, fix double svc_xprt_put on rpc_create failure | *452e85cc3e
NFSD: Remove spurious cb_setup_err tracepoint | *09c007e239
rtc: pcf85063: fix pcf85063_clkout_control | *e9453c13ae
rtc: pic32: Move devm_rtc_allocate_device earlier in pic32_rtc_probe() | *34836c5923
rtc: st-lpc: Add missing clk_disable_unprepare in st_rtc_probe() | *8e65e70764
netfilter: flowtable: really fix NAT IPv6 offload | *1f5571cb1d
powerpc/pseries/eeh: use correct API for error log size | *9dc96fee26
powerpc/eeh: Drop redundant spinlock initialization | *12654b7d8d
remoteproc: qcom_q6v5_pas: Fix missing of_node_put() in adsp_alloc_memory_region() | *be5816b27b
remoteproc: qcom_q6v5_pas: detach power domains on remove | *582dd58a18
remoteproc: qcom_q6v5_pas: disable wakeup on probe fail or remove | *e4539eb5c0
remoteproc: sysmon: fix memory leak in qcom_add_sysmon_subdev() | *6a65f4644b
pwm: sifive: Call pwm_sifive_update_clock() while mutex is held | *57f215a231
iommu/sun50i: Remove IOMMU_DOMAIN_IDENTITY | *dc02d8e59d
selftests/powerpc: Fix resource leaks | *464506de9b
powerpc/hv-gpci: Fix hv_gpci event list | *0abfe735be
powerpc/83xx/mpc832x_rdb: call platform_device_put() in error case in of_fsl_spi_probe() | *da1a33795d
powerpc/perf: callchain validate kernel stack pointer bounds | *8d2ff5b3e2
kbuild: refactor single builds of *.ko | *e3bdda22b4
kbuild: unify modules(_install) for in-tree and external modules | *e215512959
kbuild: remove unneeded mkdir for external modules_install | *195cb98861
powerpc/xive: add missing iounmap() in error path in xive_spapr_populate_irq_data() | *f0bd6504e5
powerpc/xmon: Fix -Wswitch-unreachable warning in bpt_cmds | *76957b6aed
powerpc/xmon: Enable breakpoints on 8xx | *81c8bbf5b2
cxl: Fix refcount leak in cxl_calc_capp_routing | *40b4be399e
powerpc/52xx: Fix a resource leak in an error handling path | *7fded04fbb
macintosh/macio-adb: check the return value of ioremap() | *2ac0a7059b
macintosh: fix possible memory leak in macio_add_one_device() | *0d240ac0e4
iommu/fsl_pamu: Fix resource leak in fsl_pamu_probe() | *03f51c7299
iommu/amd: Fix pci device refcount leak in ppr_notifier() | *3929576f10
rtc: pcf85063: Fix reading alarm | *aaed333e22
rtc: snvs: Allow a time difference on clock register read | *f0c36ea424
rtc: cmos: Disable ACPI RTC event on removal | *ca8cb20c22
rtc: cmos: Rename ACPI-related functions | *9f9923baa2
rtc: cmos: Eliminate forward declarations of some functions | *462db582e8
rtc: cmos: Call rtc_wake_setup() from cmos_do_probe() | *14ad1353c5
rtc: cmos: Call cmos_wake_setup() from cmos_do_probe() | *45b96601a6
rtc: cmos: fix build on non-ACPI platforms | *f2ece2c722
rtc: cmos: Fix wake alarm breakage | *0bcfccb486
rtc: cmos: Fix event handler registration ordering issue | *5814d77e2f
rtc: rtc-cmos: Do not check ACPI_FADT_LOW_POWER_S0 | *490b233677
dmaengine: idxd: Fix crc_val field for completion record | *a42e955475
pwm: tegra: Improve required rate calculation | *ddd2bb08bd
include/uapi/linux/swab: Fix potentially missing __always_inline | *c2a9843342
phy: usb: s2 WoL wakeup_count not incremented for USB->Eth devices | *ca31ad0932
iommu/sun50i: Fix flush size | *147af0c1e7
iommu/sun50i: Fix R/W permission check | *3a63c4ff57
iommu/sun50i: Consider all fault sources for reset | *160b92ab4a
iommu/sun50i: Fix reset release | *aeef93416c
RDMA/siw: Fix pointer cast warning | *5beadb55f4
power: supply: fix null pointer dereferencing in power_supply_get_battery_info | *72283ecfdc
HSI: omap_ssi_core: Fix error handling in ssi_init() | *73ca3b19d9
perf symbol: correction while adjusting symbol | *c8e77bd749
perf trace: Handle failure when trace point folder is missed | *bd29da5804
perf trace: Use macro RAW_SYSCALL_ARGS_NUM to replace number | *6364577ae2
perf trace: Return error if a system call doesn't exist | *1d6d90994a
power: supply: fix residue sysfs file in error handle route of __power_supply_register() | *ae2eb995ab
HSI: omap_ssi_core: fix possible memory leak in ssi_probe() | *6ba4b00f88
HSI: omap_ssi_core: fix unbalanced pm_runtime_disable() | *ee13e2aec3
fbdev: uvesafb: Fixes an error handling path in uvesafb_probe() | *164857bc02
fbdev: vermilion: decrease reference count in error path | *71bca42bc4
fbdev: via: Fix error in via_core_init() | *3922415e4c
fbdev: pm2fb: fix missing pci_disable_device() | *f279a7af79
fbdev: ssd1307fb: Drop optional dependency | *c56c1449cc
thermal/drivers/imx8mm_thermal: Validate temperature range | *86fa7bb4e2
samples: vfio-mdev: Fix missing pci_disable_device() in mdpy_fb_probe() | *962f869b36
tracing/hist: Fix issue of losting command info in error_log | *b7bf15aa19
usb: storage: Add check for kcalloc | *9ac541a089
i2c: ismt: Fix an out-of-bounds bug in ismt_access() | *61df25c41b
i2c: mux: reg: check return value after calling platform_get_resource() | *6d79546622
gpiolib: cdev: fix NULL-pointer dereferences | *aeee7ad089
gpiolib: Get rid of redundant 'else' | *37d3de40c1
vme: Fix error not catched in fake_init() | *b9fa01fb31
staging: rtl8192e: Fix potential use-after-free in rtllib_rx_Monitor() | *daa8045a99
staging: rtl8192u: Fix use after free in ieee80211_rx() | *46b3885c8c
i2c: pxa-pci: fix missing pci_disable_device() on error in ce4100_i2c_probe | *c46db6088b
chardev: fix error handling in cdev_device_add() | *7b289b791a
mcb: mcb-parse: fix error handing in chameleon_parse_gdd() | *0d1c2c8db2
drivers: mcb: fix resource leak in mcb_probe() | *e88b89a096
usb: gadget: f_hid: fix refcount leak on error path | *1b6a53e447
usb: gadget: f_hid: fix f_hidg lifetime vs cdev | *52302c30b2
usb: gadget: f_hid: optional SETUP/SET_REPORT mode | *c3767f8105
usb: roles: fix of node refcount leak in usb_role_switch_is_parent() | *07905a9249
counter: stm32-lptimer-cnt: fix the check on arr and cmp registers update | *bb5e9402b2
iio: adis: add '__adis_enable_irq()' implementation | *3feb8fd8bf
iio:imu:adis: Move exports into IIO_ADISLIB namespace | *83e321a2ec
iio: adis: stylistic changes | *d1b73eebc7
iio: adis: handle devices that cannot unmask the drdy pin | *8eb2a679c6
iio:imu:adis: Use IRQF_NO_AUTOEN instead of irq request then disable | *50aaa6b174
genirq: Add IRQF_NO_AUTOEN for request_irq/nmi() | *6b22e715bb
iio: temperature: ltc2983: make bulk write buffer DMA-safe | *0f63c0ddc2
cxl: fix possible null-ptr-deref in cxl_pci_init_afu|adapter() | *170e8c2d2b
cxl: fix possible null-ptr-deref in cxl_guest_init_afu|adapter() | *d34742245e
firmware: raspberrypi: fix possible memory leak in rpi_firmware_probe() | *0f67ed565f
misc: sgi-gru: fix use-after-free error in gru_set_context_option, gru_fault and gru_handle_user_call_os | *57c857353d
misc: tifm: fix possible memory leak in tifm_7xx1_switch_media() | *a40e1b0a92
ocxl: fix pci device refcount leak when calling get_function_0() | *7525741cb3
misc: ocxl: fix possible name leak in ocxl_file_register_afu() | *0b5a89e8bc
test_firmware: fix memory leak in test_firmware_init() | *d7c4331c07
serial: sunsab: Fix error handling in sunsab_init() | *a26b13d158
serial: altera_uart: fix locking in polling mode | *8ff88d007f
tty: serial: altera_uart_{r,t}x_chars() need only uart_port | *af320d1a3c
tty: serial: clean up stop-tx part in altera_uart_tx_chars() | *07f4ca68b0
serial: pch: Fix PCI device refcount leak in pch_request_dma() | *46d08b0e0b
serial: pl011: Do not clear RX FIFO & RX interrupt in unthrottle. | *d5b16eb076
serial: amba-pl011: avoid SBSA UART accessing DMACR register | *fab27438ab
usb: typec: tipd: Fix spurious fwnode_handle_put in error path | *d3b6c28a71
usb: typec: tcpci: fix of node refcount leak in tcpci_register_port() | *1ca02df871
usb: typec: Check for ops->exit instead of ops->enter in altmode_exit | *5d2b286eb0
staging: vme_user: Fix possible UAF in tsi148_dma_list_add | *775a6f8bed
usb: fotg210-udc: Fix ages old endianness issues | *2fcb7c7d52
uio: uio_dmem_genirq: Fix deadlock between irq config and handling | *9bf7a0b2b1
uio: uio_dmem_genirq: Fix missing unlock in irq configuration | *27b612bd09
vfio: platform: Do not pass return buffer to ACPI _RST method | *18a7200646
class: fix possible memory leak in __class_register() | *7e74868a38
serial: tegra: Read DMA status before terminating | *fce9890e1b
drivers: dio: fix possible memory leak in dio_init() | *d217141345
IB/IPoIB: Fix queue count inconsistency for PKEY child interfaces | *aa96aff394
hwrng: geode - Fix PCI device refcount leak | *5998e5c30e
hwrng: amd - Fix PCI device refcount leak | *38da26c855
crypto: img-hash - Fix variable dereferenced before check 'hdev->req' | *15ca148940
RDMA/hns: Fix page size cap from firmware | *83b2c33b53
RDMA/hns: Fix PBL page MTR find | *73ab1c956a
orangefs: Fix sysfs not cleanup when dev init failed | *0c53bb661f
RDMA/srp: Fix error return code in srp_parse_options() | *7cbf2fc276
RDMA/hfi1: Fix error return code in parse_platform_config() | *61c5b47c5b
riscv/mm: add arch hook arch_clear_hugepage_flags | *09814c669d
crypto: omap-sham - Use pm_runtime_resume_and_get() in omap_sham_probe() | *75c7b5d6b5
crypto: amlogic - Remove kcalloc without check | *357f3e1756
RDMA/nldev: Fix failure to send large messages | *25a8dabaab
f2fs: avoid victim selection from previous victim section | *d1b85d2883
RDMA/nldev: Add checks for nla_nest_start() in fill_stat_counter_qps() | *ad27f74e90
scsi: snic: Fix possible UAF in snic_tgt_create() | *22e8c7a56b
scsi: fcoe: Fix transport not deattached when fcoe_if_init() fails | *f4ba143b04
scsi: ipr: Fix WARNING in ipr_init() | *b520a32796
scsi: scsi_debug: Fix possible name leak in sdebug_add_host_helper() | *9d0ad1e2ba
scsi: fcoe: Fix possible name leak when device_register() fails | *2b142f6046
scsi: scsi_debug: Fix a warning in resp_report_zones() | *eaa71cdae8
scsi: scsi_debug: Fix a warning in resp_verify() | *ac5cfe8bbb
scsi: hpsa: Fix possible memory leak in hpsa_add_sas_device() | *f671a3f286
scsi: hpsa: Fix error handling in hpsa_add_sas_host() | *ce1a69cc85
scsi: mpt3sas: Fix possible resource leaks in mpt3sas_transport_port_add() | *7ccfc2bb58
padata: Fix list iterator in padata_do_serial() | *8e0681dd4e
padata: Always leave BHs disabled when running ->parallel() | *4a99e6a104
crypto: tcrypt - Fix multibuffer skcipher speed test mem leak | *c808edbf58
scsi: hpsa: Fix possible memory leak in hpsa_init_one() | *6bb5a62bfd
RDMA/rxe: Fix NULL-ptr-deref in rxe_qp_do_cleanup() when socket create failed | *164fa80330
RDMA/hns: fix memory leak in hns_roce_alloc_mr() | *3d47544ba0
crypto: ccree - Make cc_debugfs_global_fini() available for module init function | *34bab85c2e
RDMA/hfi: Decrease PCI device reference count in error path | *d8f2a0bc52
PCI: Check for alloc failure in pci_request_irq() | *8b5f1af335
RDMA/hns: Fix ext_sge num error when post send | *cc5e915358
RDMA/hns: Repacing 'dseg_len' by macros in fill_ext_sge_inl_data() | *ed97ade655
crypto: hisilicon/qm - add missing pci_dev_put() in q_num_set() | *2dfe1d221e
crypto: cryptd - Use request context instead of stack for sub-request | *1ab9e15b14
crypto: ccree - Remove debugfs when platform_driver_register failed | *33260f4c3e
scsi: scsi_debug: Fix a warning in resp_write_scat() | *917bf4c0a7
RDMA/siw: Set defined status for work completion with undefined status | *237a8936d6
RDMA/nldev: Return "-EAGAIN" if the cm_id isn't from expected port | *75af03fdf3
RDMA/siw: Fix immediate work request flush to completion queue | *ef8e236832
f2fs: fix normal discard process | *9a32aa87a2
apparmor: Fix memleak in alloc_ns() | *417ef568a7
crypto: rockchip - rework by using crypto_engine | *6cd8bbb089
crypto: rockchip - delete unneeded variable initialization | *de041a2e70
crypto: rockchip - remove non-aligned handling | *0971bc99d1
crypto: rockchip - better handle cipher key | *b0b9635f09
crypto: rockchip - add fallback for ahash | *fbd5f112dc
crypto: rockchip - add fallback for cipher | *86f1e7f46b
crypto: rockchip - do not store mode globally | *a13c0ff862
crypto: rockchip - do not do custom power management | *f1acf7e693
f2fs: Fix the race condition of resize flag between resizefs | *c42d8120bf
PCI: pci-epf-test: Register notifier if only core_init_notifier is enabled | *16db9aaa41
RDMA/core: Fix order of nldev_exit call | *9784b01eb4
PCI: dwc: Fix n_fts[] array overrun | *6962f682d0
apparmor: Use pointer to struct aa_label for lbs_cred | *f4c917a4b0
scsi: core: Fix a race between scsi_done() and scsi_timeout() | *3bebfa5f93
crypto: nitrox - avoid double free on error path in nitrox_sriov_init() | *ee3cffc38e
crypto: sun8i-ss - use dma_addr instead u32 | *bf4d7c66a1
apparmor: Fix abi check to include v8 abi | *78629ca972
apparmor: fix lockdep warning when removing a namespace | *935d86b290
apparmor: fix a memleak in multi_transaction_new() | *f694e627c6
stmmac: fix potential division by 0 | *815b961c71
Bluetooth: RFCOMM: don't call kfree_skb() under spin_lock_irqsave() | *4002180e07
Bluetooth: hci_core: don't call kfree_skb() under spin_lock_irqsave() | *82256faaeb
Bluetooth: hci_bcsp: don't call kfree_skb() under spin_lock_irqsave() | *33af776a8d
Bluetooth: hci_h5: don't call kfree_skb() under spin_lock_irqsave() | *5991402fe0
Bluetooth: hci_ll: don't call kfree_skb() under spin_lock_irqsave() | *0169acb41b
Bluetooth: hci_qca: don't call kfree_skb() under spin_lock_irqsave() | *f7dc27702b
Bluetooth: btusb: don't call kfree_skb() under spin_lock_irqsave() | *214346a517
sctp: sysctl: make extra pointers netns aware | *13286ad1c7
ntb_netdev: Use dev_kfree_skb_any() in interrupt context | *4df544f592
net: lan9303: Fix read error execution path | *39b48a92ed
can: tcan4x5x: Remove invalid write in clear_interrupts | *334c9fb892
net: amd-xgbe: Check only the minimum speed for active/passive cables | *03ea9ba5fd
net: amd-xgbe: Fix logic around active and passive cables | *8eb5f8ae51
net: amd: lance: don't call dev_kfree_skb() under spin_lock_irqsave() | *ee3b1364af
hamradio: don't call dev_kfree_skb() under spin_lock_irqsave() | *b242358a27
net: ethernet: dnet: don't call dev_kfree_skb() under spin_lock_irqsave() | *decede59ea
net: emaclite: don't call dev_kfree_skb() under spin_lock_irqsave() | *c43def060c
net: apple: bmac: don't call dev_kfree_skb() under spin_lock_irqsave() | *0e23250149
net: apple: mace: don't call dev_kfree_skb() under spin_lock_irqsave() | *91f09a776a
net/tunnel: wait until all sk_user_data reader finish before releasing the sock | *51e2d1b84a
net: farsync: Fix kmemleak when rmmods farsync | *0b3f452d0c
ethernet: s2io: don't call dev_kfree_skb() under spin_lock_irqsave() | *2b4af99b44
of: overlay: fix null pointer dereferencing in find_dup_cset_node_entry() and find_dup_cset_prop() | *14b349a15c
drivers: net: qlcnic: Fix potential memory leak in qlcnic_sriov_init() | *787d1bae7f
net: stmmac: selftests: fix potential memleak in stmmac_test_arpoffload() | *8ed9994457
net: defxx: Fix missing err handling in dfx_init() | *e2227eee7a
net: vmw_vsock: vmci: Check memcpy_from_msg() | *3e8fd1d0fa
clk: socfpga: Fix memory leak in socfpga_gate_init() | *4b672ee71c
clk: socfpga: use clk_hw_register for a5/c5 | *ae8190f19f
clk: socfpga: clk-pll: Remove unused variable 'rc' | *782d0444ea
blktrace: Fix output non-blktrace event when blk_classic option enabled | *2484f15964
wifi: brcmfmac: Fix error return code in brcmf_sdio_download_firmware() | *f89c0fbb8b
wifi: rtl8xxxu: Fix the channel width reporting | *d430037248
wifi: rtl8xxxu: Add __packed to struct rtl8723bu_c2h | *7f3b4fa482
spi: spi-gpio: Don't set MOSI as an input if not 3WIRE mode | *da13355bb9
clk: samsung: Fix memory leak in _samsung_clk_register_pll() | *d9b37ea886
media: coda: Add check for kmalloc | *35ddd00b36
media: coda: Add check for dcoda_iram_alloc | *6fdb8661b9
media: c8sectpfe: Add of_node_put() when breaking out of loop | *0b1e96d3fd
mmc: mmci: fix return value check of mmc_add_host() | *1922def5cb
mmc: wbsd: fix return value check of mmc_add_host() | *63400da6cd
mmc: via-sdmmc: fix return value check of mmc_add_host() | *64b2c44117
mmc: meson-gx: fix return value check of mmc_add_host() | *fb3d596267
mmc: omap_hsmmc: fix return value check of mmc_add_host() | *00ac0f5f95
mmc: atmel-mci: fix return value check of mmc_add_host() | *9bedf64dda
mmc: wmt-sdmmc: fix return value check of mmc_add_host() | *3049a3b927
mmc: vub300: fix return value check of mmc_add_host() | *aabbedcb6c
mmc: toshsd: fix return value check of mmc_add_host() | *7fa922c7a3
mmc: rtsx_usb_sdmmc: fix return value check of mmc_add_host() | *b896a9b7a0
mmc: pxamci: fix return value check of mmc_add_host() | *3904eb97bb
mmc: mxcmmc: fix return value check of mmc_add_host() | *7c3b301ca8
mmc: moxart: fix return value check of mmc_add_host() | *4a6e5d0222
mmc: alcor: fix return value check of mmc_add_host() | *81ea3d964f
NFSv4.x: Fail client initialisation if state manager thread can't run | *3fbc3c78fa
SUNRPC: Fix missing release socket in rpc_sockname() | *be7d90fc3a
xprtrdma: Fix regbuf data not freed in rpcrdma_req_create() | *0649129359
ALSA: mts64: fix possible null-ptr-defer in snd_mts64_interrupt | *7df1fbe49b
media: saa7164: fix missing pci_disable_device() | *46a9b31369
ALSA: pcm: Set missing stop_operating flag at undoing trigger start | *be719496ae
bpf, sockmap: fix race in sock_map_free() | *8c3ef38a0d
hwmon: (jc42) Restore the min/max/critical temperatures on resume | *e7720ef53b
hwmon: (jc42) Convert register access and caching to regmap/regcache | *6a03c31d08
regulator: core: fix resource leak in regulator_register() | *74ac7c9ee2
configfs: fix possible memory leak in configfs_create_dir() | *0cf92d2356
hsr: Synchronize sequence number updates. | *c671f2d10d
hsr: Synchronize sending frames to have always incremented outgoing seq nr. | *28921ec555
hsr: Disable netpoll. | *8cee8543f0
net: hsr: generate supervision frame without HSR/PRP tag | *38d13a2a9e
hsr: Add a rcu-read lock to hsr_forward_skb(). | *ee4425e81d
clk: qcom: clk-krait: fix wrong div2 functions | *6f25402d8a
regulator: core: fix module refcount leak in set_supply() | *f532db69ab
wifi: mt76: fix coverity overrun-call in mt76_get_txpower() | *4ecb7a6e61
wifi: cfg80211: Fix not unregister reg_pdev when load_builtin_regdb_keys() fails | *b2c0b94f48
wifi: mac80211: fix memory leak in ieee80211_if_add() | *b0163248db
spi: spidev: mask SPI_CS_HIGH in SPI_IOC_RD_MODE | *ab19f402a1
bonding: uninitialized variable in bond_miimon_inspect() | *c58df40e3e
bpf, sockmap: Fix data loss caused by using apply_bytes on ingress redirect | *28e4a763cd
bpf, sockmap: Fix repeated calls to sock_put() when msg has more_data | *429a2a4258
netfilter: conntrack: set icmpv6 redirects as RELATED | *cd0e9ee50c
ASoC: pcm512x: Fix PM disable depth imbalance in pcm512x_probe | *7c1ddf7c66
drm/amdgpu: Fix PCI device refcount leak in amdgpu_atrm_get_bios() | *3991d98a8a
drm/radeon: Fix PCI device refcount leak in radeon_atrm_get_bios() | *a012cdd4fd
drm/amd/pm/smu11: BACO is supported when it's in BACO state | *57491967ad
ASoC: mediatek: mt8173: Enable IRQ when pdata is ready | *52c9ad56c1
ASoC: mediatek: mt8173: Fix debugfs registration for components | *ae966649f6
wifi: iwlwifi: mvm: fix double free on tx path. | *ae66695aa1
ALSA: asihpi: fix missing pci_disable_device() | *5458bc0f9d
NFS: Fix an Oops in nfs_d_automount() | *bc60485b93
NFSv4: Fix a deadlock between nfs4_open_recover_helper() and delegreturn | *d16d7870fd
NFSv4.2: Fix initialisation of struct nfs4_label | *15feece7af
NFSv4.2: Fix a memory stomp in decode_attr_security_label | *58a1023eb5
NFSv4.2: Clear FATTR4_WORD2_SECURITY_LABEL when done decoding | *193691ff5b
ASoC: mediatek: mtk-btcvsd: Add checks for write and read of mtk_btcvsd_snd | *6013c3de95
ASoC: dt-bindings: wcd9335: fix reset line polarity in example | *cf2cbca714
drm/tegra: Add missing clk_disable_unprepare() in tegra_dc_probe() | *54ab127600
media: s5p-mfc: Add variant data for MFC v7 hardware for Exynos 3250 SoC | *559891d430
media: dvb-usb: az6027: fix null-ptr-deref in az6027_i2c_xfer() | *e34cf6cacc
media: dvb-core: Fix ignored return value in dvb_register_frontend() | *05be5d56f7
pinctrl: pinconf-generic: add missing of_node_put() | *9916497a12
clk: imx: replace osc_hdmi with dummy | *dabf7b675c
media: imon: fix a race condition in send_packet() | *14d85b600b
media: vimc: Fix wrong function called when vimc_init() fails | *4518d7cc38
ASoC: qcom: Add checks for devm_kcalloc | *b73fac67f3
drbd: fix an invalid memory access caused by incorrect use of list iterator | *1d0c2b762d
mtd: maps: pxa2xx-flash: fix memory leak in probe | *7d1e0d237c
bonding: fix link recovery in mode 2 when updelay is nonzero | *3725a8f26b
drm/amdgpu: fix pci device refcount leak | *f4d70c139d
clk: rockchip: Fix memory leak in rockchip_clk_register_pll() | *a065be0243
regulator: core: use kfree_const() to free space conditionally | *d7198b63cb
ALSA: seq: fix undefined behavior in bit shift for SNDRV_SEQ_FILTER_USE_EVENT | *88550b4446
ALSA: pcm: fix undefined behavior in bit shift for SNDRV_PCM_RATE_KNOT | *ad2d0a3dc2
HID: hid-sensor-custom: set fixed size for custom attributes | *0d6ae25da5
bpf: Move skb->len == 0 checks into __bpf_redirect | *9920e87a84
inet: add READ_ONCE(sk->sk_bound_dev_if) in inet_csk_bind_conflict() | *49aa080951
media: videobuf-dma-contig: use dma_mmap_coherent | *8470060019
media: platform: exynos4-is: Fix error handling in fimc_md_init() | *49060c0da5
media: solo6x10: fix possible memory leak in solo_sysfs_init() | *0369af6fe3
media: vidtv: Fix use-after-free in vidtv_bridge_dvb_init() | *3afd738e77
Input: elants_i2c - properly handle the reset GPIO when power is off | *0919982a17
mtd: lpddr2_nvm: Fix possible null-ptr-deref | *effbf63616
wifi: ath10k: Fix return value in ath10k_pci_init() | *adf03c3099
ima: Fix misuse of dereference of pointer in template_desc_init_fields() | *3bd737289c
integrity: Fix memory leakage in keyring allocation error path | *102df01caf
drm/fourcc: Fix vsub/hsub for Q410 and Q401 | *6f6a99fb62
drm/fourcc: Add packed 10bit YUV 4:2:0 format | *85273b4a70
amdgpu/pm: prevent array underflow in vega20_odn_edit_dpm_table() | *f48c474efe
regulator: core: fix unbalanced of node refcount in regulator_dev_lookup() | *21a1409e8c
ASoC: pxa: fix null-pointer dereference in filter() | *698bbaf0b4
drm/mediatek: Modify dpi power on/off sequence. | *b4b30f56ec
drm/radeon: Add the missed acpi_put_table() to fix memory leak | *cea79ae89b
rxrpc: Fix ack.bufferSize to be 0 when generating an ack | *00fce49d14
net, proc: Provide PROC_FS=n fallback for proc_create_net_single_write() | *3d5cab726e
media: camss: Clean up received buffers on failed start of streaming | *61c96d99d4
wifi: rsi: Fix handling of 802.3 EAPOL frames sent via control port | *624438195c
Input: joystick - fix Kconfig warning for JOYSTICK_ADC | *330bc5533e
mtd: Fix device name leak when register device failed in add_mtd_device() | *1a79539f4e
clk: qcom: gcc-sm8250: Use retention mode for USB GDSCs | *e1989d808b
bpf: propagate precision across all frames, not just the last one | *cdd73a5ed0
bpf: Check the other end of slot_type for STACK_SPILL | *42b2b7382a
bpf: propagate precision in ALU/ALU64 operations | *7fc38327fd
media: platform: exynos4-is: fix return value check in fimc_md_probe() | *f9d19f3a04
media: vivid: fix compose size exceed boundary | *72e8d9c731
bpf: Fix slot type check in check_stack_write_var_off | *d959ff7fa9
drm/msm/hdmi: drop unused GPIO support | *b12f354fe6
drm/msm/hdmi: switch to drm_bridge_connector | *c4b035b1f0
ima: Handle -ESTALE returned by ima_filter_rule_match() | *d5b227f0d2
ima: Fix fall-through warnings for Clang | *576828e59a
drm/panel/panel-sitronix-st7701: Remove panel on DSI attach failure | *f1aa976857
spi: Update reference to struct spi_controller | *dd958c7f3e
clk: renesas: r9a06g032: Repair grave increment error | *110bf15825
drm/rockchip: lvds: fix PM usage counter unbalance in poweron | *1874f9143f
can: kvaser_usb: Compare requested bittiming parameters with actual parameters in do_set_{,data}_bittiming | *669bdf121f
can: kvaser_usb: Add struct kvaser_usb_busparams | *a50ad6772f
can: kvaser_usb_leaf: Fix bogus restart events | *cd56718e7c
can: kvaser_usb_leaf: Fix wrong CAN state after stopping | *f83742285f
can: kvaser_usb_leaf: Fix improved state not being reported | *fbd155fe14
can: kvaser_usb_leaf: Set Warning state even without bus errors | *96af45b1b4
can: kvaser_usb: kvaser_usb_leaf: Handle CMD_ERROR_EVENT | *caea629409
can: kvaser_usb: kvaser_usb_leaf: Rename {leaf,usbcan}_cmd_error_event to {leaf,usbcan}_cmd_can_error_event | *eafcf1b599
can: kvaser_usb: kvaser_usb_leaf: Get capabilities from device | *cd50258e9c
can: kvaser_usb: do not increase tx statistics when sending error message frames | *580c79fd57
media: exynos4-is: don't rely on the v4l2_async_subdev internals | *c93cac58a7
media: exynos4-is: Use v4l2_async_notifier_add_fwnode_remote_subdev | *4882492ad3
venus: pm_helpers: Fix error check in vcodec_domains_get() | *86d531c1d7
media: i2c: ad5820: Fix error path | *83f7e3c988
media: coda: jpeg: Add check for kmalloc | *7e0ba56c7e
pata_ipx4xx_cf: Fix unsigned comparison with less than zero | *85b297d798
libbpf: Fix null-pointer dereference in find_prog_by_sec_insn() | *c61650b869
libbpf: Fix use-after-free in btf_dump_name_dups | *26ce3f0c8f
drm/bridge: adv7533: remove dynamic lane switching from adv7533 bridge | *9b6851c182
wifi: rtl8xxxu: Fix reading the vendor of combo chips | *98d9172822
wifi: ath9k: hif_usb: Fix use-after-free in ath9k_hif_usb_reg_in_cb() | *c3fb3e9a2c
wifi: ath9k: hif_usb: fix memory leak of urbs in ath9k_hif_usb_dealloc_tx_urbs() | *53915ecc43
rapidio: devices: fix missing put_device in mport_cdev_open | *cff9fefdfb
hfs: Fix OOB Write in hfs_asc2mac | *93cdd12636
relay: fix type mismatch when allocating memory in relay_create_buf() | *bbaa9ca063
eventfd: change int to __u64 in eventfd_signal() ifndef CONFIG_EVENTFD | *5ee850645e
rapidio: fix possible UAF when kfifo_alloc() fails | *ad4842634d
fs: sysv: Fix sysv_nblocks() returns wrong value | *6f8ef1de8c
MIPS: OCTEON: warn only once if deprecated link status is being used | *7b88747d6d
MIPS: BCM63xx: Add check for NULL for clk in clk_enable | *d4c38ee665
platform/x86: intel_scu_ipc: fix possible name leak in __intel_scu_ipc_register() | *17cd8c46cb
platform/x86: mxm-wmi: fix memleak in mxm_wmi_call_mx[ds|mx]() | *f983afc432
PM: runtime: Do not call __rpm_callback() from rpm_idle() | *2cbbd78e08
PM: runtime: Improve path in rpm_idle() when no callback | *46026bb057
xen/privcmd: Fix a possible warning in privcmd_ioctl_mmap_resource() | *70e7f308d7
x86/xen: Fix memory leak in xen_init_lock_cpu() | *fc134c355b
x86/xen: Fix memory leak in xen_smp_intr_init{_pv}() | *95dbcb7e1c
uprobes/x86: Allow to probe a NOP instruction with 0x66 prefix | *02617006b5
ACPICA: Fix use-after-free in acpi_ut_copy_ipackage_to_ipackage() | *7bc9c5ad52
clocksource/drivers/timer-ti-dm: Fix missing clk_disable_unprepare in dmtimer_systimer_init_clock() | *270700e7df
cpu/hotplug: Make target_store() a nop when target == state | *fc89b8853a
futex: Resend potentially swallowed owner death notification | *4750cac4df
futex: Move to kernel/futex/ | *d8e7a44f48
clocksource/drivers/sh_cmt: Access registers according to spec | *0853787db2
clocksource/drivers/sh_cmt: Make sure channel clock supply is enabled | *97d9eb45ff
rapidio: rio: fix possible name leak in rio_register_mport() | *88fa351b20
rapidio: fix possible name leaks when rio_add_device() fails | *2b7e59ed2e
ocfs2: fix memory leak in ocfs2_mount_volume() | *45dabd8fe8
ocfs2: rewrite error handling of ocfs2_fill_super | *e403024c83
ocfs2: ocfs2_mount_volume does cleanup job before return error | *81d26aa903
debugfs: fix error when writing negative value to atomic_t debugfs file | *f649e18c9c
docs: fault-injection: fix non-working usage of negative values | *869a37ad6f
lib/notifier-error-inject: fix error when writing -errno to debugfs file | *c39aa503f4
libfs: add DEFINE_SIMPLE_ATTRIBUTE_SIGNED for signed value | *0080461624
cpufreq: amd_freq_sensitivity: Add missing pci_dev_put() | *9346517ed2
genirq/irqdesc: Don't try to remove non-existing sysfs files | *d97e58f728
nfsd: don't call nfsd_file_put from client states seqfile display | *2db53c7059
EDAC/i10nm: fix refcount leak in pci_get_dev_wrapper() | *f870d5863e
irqchip: gic-pm: Use pm_runtime_resume_and_get() in gic_probe() | *5c0cacdd35
platform/chrome: cros_usbpd_notify: Fix error handling in cros_usbpd_notify_init() | *0afcb759f6
perf/x86/intel/uncore: Fix reference count leak in __uncore_imc_init_box() | *d2afced511
perf/x86/intel/uncore: Fix reference count leak in snr_uncore_mmio_map() | *c0539d5d47
perf/x86/intel/uncore: Fix reference count leak in hswep_has_limit_sbox() | *dac87e295c
PNP: fix name memory leak in pnp_alloc_dev() | *e1049bf0ca
selftests/efivarfs: Add checking of the test return value | *911773f08c
MIPS: vpe-cmp: fix possible memory leak while module exiting | *48d42f4464
MIPS: vpe-mt: fix possible memory leak while module exiting | *f5f2682d3a
ocfs2: fix memory leak in ocfs2_stack_glue_init() | *c9a9aa02f0
lib/fonts: fix undefined behavior in bit shift for get_default_font | *9f6ea28f29
proc: fixup uptime selftest | *d5bf025c5b
timerqueue: Use rb_entry_safe() in timerqueue_getnext() | *2f2ae35c00
platform/x86: huawei-wmi: fix return value calculation | *a1014fbc83
lib/debugobjects: fix stat count and optimize debug_objects_mem_init | *60a7a0aa9d
perf: Fix possible memleak in pmu_dev_alloc() | *294ed8bfc9
selftests/ftrace: event_triggers: wait longer for test_event_enable | *3ef12a4a8e
cpufreq: qcom-hw: Fix memory leak in qcom_cpufreq_hw_read_lut() | *aa5f2912bb
fs: don't audit the capability check in simple_xattr_list() | *9e760e0cf2
PM: hibernate: Fix mistake in kerneldoc comment | *ef875e1c07
alpha: fix syscall entry in !AUDUT_SYSCALL case | *1498d2723e
cpuidle: dt: Return the correct numbers of parsed idle states | *2ff4014417
sched/uclamp: Fix relationship between uclamp and migration margin | *ca9ef12bf7
sched/fair: Cleanup task_util and capacity type | *6389c163c9
tpm/tpm_crb: Fix error message in __crb_relinquish_locality() | *5b217f4e79
tpm/tpm_ftpm_tee: Fix error handling in ftpm_mod_init() | *295f59cd2c
pstore: Avoid kcore oops by vmap()ing with VM_IOREMAP | *480bc6a165
ARM: mmp: fix timer_read delay | *d1b3164d0e
pstore/ram: Fix error return code in ramoops_probe() | *4dad729f7c
arm64: dts: armada-3720-turris-mox: Add missing interrupt for RTC | *872865db3b
ARM: dts: turris-omnia: Add switch port 6 node | *c1322d5f69
ARM: dts: turris-omnia: Add ethernet aliases | *d050513e6f
ARM: dts: armada-39x: Fix assigned-addresses for every PCIe Root Port | *bac1a77b85
ARM: dts: armada-38x: Fix assigned-addresses for every PCIe Root Port | *ea907f3032
ARM: dts: armada-375: Fix assigned-addresses for every PCIe Root Port | *ea8e313bb9
ARM: dts: armada-xp: Fix assigned-addresses for every PCIe Root Port | *697b92a648
ARM: dts: armada-370: Fix assigned-addresses for every PCIe Root Port | *73ab831afd
ARM: dts: dove: Fix assigned-addresses for every PCIe Root Port | *c2cb1683d1
arm64: dts: mediatek: mt6797: Fix 26M oscillator unit name | *1261352836
arm64: dts: mediatek: pumpkin-common: Fix devicetree warnings | *853d57e961
arm64: dts: mt2712-evb: Fix usb vbus regulators unit names | *436ac713a4
arm64: dts: mt2712-evb: Fix vproc fixed regulators unit names | *148e773557
arm64: dts: mt2712e: Fix unit address for pinctrl node | *a938c2a774
arm64: dts: mt2712e: Fix unit_address_vs_reg warning for oscillators | *a455b0c509
arm64: dts: ti: k3-j721e-main: Drop dma-coherent in crypto node | *42d97a024e
arm64: dts: ti: k3-am65-main: Drop dma-coherent in crypto node | *359286f886
perf/smmuv3: Fix hotplug callback leak in arm_smmu_pmu_init() | *9afac95b87
perf: arm_dsu: Fix hotplug callback leak in dsu_pmu_init() | *5e88aec62e
soc: ti: smartreflex: Fix PM disable depth imbalance in omap_sr_probe | *6a9a31c578
soc: ti: knav_qmss_queue: Fix PM disable depth imbalance in knav_queue_probe | *e325b4ee41
soc: ti: knav_qmss_queue: Use pm_runtime_resume_and_get instead of pm_runtime_get_sync | *0542d56e63
arm: dts: spear600: Fix clcd interrupt | *a8d4fb0bf1
soc: qcom: apr: Add check for idr_alloc and of_property_read_string_index | *6213df4f5f
soc: qcom: apr: make code more reuseable | *45d180a9f6
soc: qcom: llcc: make irq truly optional | *8fb204a4b5
drivers: soc: ti: knav_qmss_queue: Mark knav_acc_firmwares as static | *6a2faf6fce
ARM: dts: stm32: Fix AV96 WLAN regulator gpio property | *6d1b6dc38f
ARM: dts: stm32: Drop stm32mp15xc.dtsi from Avenger96 | *933499bed7
objtool, kcsan: Add volatile read/write instrumentation to whitelist | *275a67e909
arm64: dts: qcom: msm8916: Drop MSS fallback compatible | *82baee2263
arm64: dts: qcom: sdm845-cheza: fix AP suspend pin bias | *82569f7e40
arm64: dts: qcom: sdm630: fix UART1 pin bias | *4cef81dec2
ARM: dts: qcom: apq8064: fix coresight compatible | *5465b9a813
arm64: dts: qcom: msm8996: fix GPU OPP table | *6cad948c9f
arm64: dts: qcom: ipq6018-cp01-c1: use BLSPI1 pins | *60184b1437
usb: musb: remove extra check in musb_gadget_vbus_draw * |9e60339cb4
ANDROID: Update .xml due to ABI preservation fix * |1cd4863ea8
ANDROID: struct io_uring ABI preservation hack for 5.10.162 changes * |4c961b9302
ANDROID: fix up struct task_struct ABI change in 5.10.162 * |332c489d8b
ANDROID: add flags variable back to struct proto_ops * |8596b99884
Merge 5.10.162 into android12-5.10-lts |\| | *0fe4548663
Linux 5.10.162 | *189556b05e
io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups | *4ef66581d7
eventfd: provide a eventfd_signal_mask() helper | *2f09377502
eventpoll: add EPOLL_URING_WAKE poll wakeup flag | *b76c5373f0
Revert "proc: don't allow async path resolution of /proc/self components" | *87cb08dc6b
Revert "proc: don't allow async path resolution of /proc/thread-self components" | *a3025359ff
net: remove cmsg restriction from io_uring based send/recvmsg calls | *6ef2b4728a
task_work: unconditionally run task_work from get_signal() | *c91ab04781
signal: kill JOBCTL_TASK_WORK | *788d082426
io_uring: import 5.15-stable io_uring | *ed30050329
task_work: add helper for more targeted task_work canceling | *831cb78a2a
kernel: don't call do_exit() for PF_IO_WORKER threads | *9ded44b69c
kernel: stop masking signals in create_io_thread() | *f0a5f0dc01
x86/process: setup io_threads more like normal user space threads | *dd26e2cec7
arch: ensure parisc/powerpc handle PF_IO_WORKER in copy_thread() | *320c8057ec
arch: setup PF_IO_WORKER threads like PF_KTHREAD | *000de389ad
entry/kvm: Exit to user mode when TIF_NOTIFY_SIGNAL is set | *0f735cf52b
kernel: allow fork with TIF_NOTIFY_SIGNAL pending | *4b4d2c7992
coredump: Limit what can interrupt coredumps | *90a2c3821b
kernel: remove checking for TIF_NOTIFY_SIGNAL | *61bdeb142e
task_work: remove legacy TWA_SIGNAL path | *6e2bce21ac
alpha: fix TIF_NOTIFY_SIGNAL handling | *db911277a2
ARC: unbork 5.11 bootup: fix snafu in _TIF_NOTIFY_SIGNAL handling | *a1240cc413
ia64: don't call handle_signal() unless there's actually a signal queued | *e1402ba4df
sparc: add support for TIF_NOTIFY_SIGNAL | *78a53ff026
riscv: add support for TIF_NOTIFY_SIGNAL | *57e833a0a0
nds32: add support for TIF_NOTIFY_SIGNAL | *751fedb9ba
ia64: add support for TIF_NOTIFY_SIGNAL | *48e9e35d33
h8300: add support for TIF_NOTIFY_SIGNAL | *c82617d9de
c6x: add support for TIF_NOTIFY_SIGNAL | *30b78a17ac
alpha: add support for TIF_NOTIFY_SIGNAL | *bf0b619593
xtensa: add support for TIF_NOTIFY_SIGNAL | *1bee9dbbca
arm: add support for TIF_NOTIFY_SIGNAL | *02d383a59c
microblaze: add support for TIF_NOTIFY_SIGNAL | *19f3e328b4
hexagon: add support for TIF_NOTIFY_SIGNAL | *c2037d61de
csky: add support for TIF_NOTIFY_SIGNAL | *12284aec88
openrisc: add support for TIF_NOTIFY_SIGNAL | *3fde31e962
sh: add support for TIF_NOTIFY_SIGNAL | *dc808ffd97
um: add support for TIF_NOTIFY_SIGNAL | *0aef2ec063
s390: add support for TIF_NOTIFY_SIGNAL | *8ca2e57099
mips: add support for TIF_NOTIFY_SIGNAL | *abab3d4444
powerpc: add support for TIF_NOTIFY_SIGNAL | *45b365bc6c
parisc: add support for TIF_NOTIFY_SIGNAL | *cf3c648673
nios32: add support for TIF_NOTIFY_SIGNAL | *fe137f46d4
m68k: add support for TIF_NOTIFY_SIGNAL | *79a9991e87
arm64: add support for TIF_NOTIFY_SIGNAL | *2dbb035451
arc: add support for TIF_NOTIFY_SIGNAL | *4b1dcf8ec9
x86: Wire up TIF_NOTIFY_SIGNAL | *eb42e7b304
task_work: Use TIF_NOTIFY_SIGNAL if available | *3c295bd2dd
entry: Add support for TIF_NOTIFY_SIGNAL | *d2136fc145
fs: provide locked helper variant of close_fd_get_file() | *57b2053036
file: Rename __close_fd_get_file close_fd_get_file | *214f80e251
fs: make do_renameat2() take struct filename | *52cfde6bbf
signal: Add task_sigpending() helper | *ad0b013795
net: add accept helper not installing fd | *069ac28d92
net: provide __sys_shutdown_sock() that takes a socket | *0b8cd5d814
tools headers UAPI: Sync openat2.h with the kernel sources | *5683caa735
fs: expose LOOKUP_CACHED through openat2() RESOLVE_CACHED | *0cf0ce8fb5
Make sure nd->path.mnt and nd->path.dentry are always valid pointers | *146fe79fff
fix handling of nd->depth on LOOKUP_CACHED failures in try_to_unlazy* | *c1fe7bd3e1
fs: add support for LOOKUP_CACHED | *36ec31201a
saner calling conventions for unlazy_child() | *e86db87191
iov_iter: add helper to save iov_iter state | *1500fed008
kernel: provide create_io_thread() helper * |bf760358ea
Merge branch 'android12-5.10' into android12-5.10-lts * |416c4356f3
Merge 5.10.161 into android12-5.10-lts |/ *1a9148dfd8
Linux 5.10.161 *eec1c3ade4
net: loopback: use NET_NAME_PREDICTABLE for name_assign_type *f3fe681715
Bluetooth: L2CAP: Fix u8 overflow *7c3a523c9b
HID: uclogic: Add HID_QUIRK_HIDINPUT_FORCE quirk *1d5db0c322
HID: ite: Enable QUIRK_TOUCHPAD_ON_OFF_REPORT on Acer Aspire Switch V 10 *263a1782a6
HID: ite: Enable QUIRK_TOUCHPAD_ON_OFF_REPORT on Acer Aspire Switch 10E *a20b5eec07
HID: ite: Add support for Acer S1002 keyboard-dock *f2479c3daa
igb: Initialize mailbox message for VF reset *9ff7aff40e
xhci: Apply XHCI_RESET_TO_DEFAULT quirk to ADL-N *c8bf31a00f
USB: serial: f81534: fix division by zero on line-speed change *5b75a00416
USB: serial: f81232: fix division by zero on line-speed change *9895ce5ea2
USB: serial: cp210x: add Kamstrup RF sniffer PIDs *398215f783
USB: serial: option: add Quectel EM05-G modem *c79538f32d
usb: gadget: uvc: Prevent buffer overflow in setup handler *8b2f86f82c
udf: Fix extending file within last block *db873b770d
udf: Do not bother looking for prealloc extents if i_lenExtents matches i_size *1a075f4a54
udf: Fix preallocation discarding at indirect extent boundary *1f7f7365ae
udf: Discard preallocation before extending file with a hole Change-Id: I1463ff16fd85e32614dc83f585aa6b3957024a74 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2935 lines
71 KiB
C
2935 lines
71 KiB
C
/* CPU control.
|
|
* (C) 2001, 2002, 2003, 2004 Rusty Russell
|
|
*
|
|
* This code is licenced under the GPL.
|
|
*/
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/init.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/sched/hotplug.h>
|
|
#include <linux/sched/isolation.h>
|
|
#include <linux/sched/task.h>
|
|
#include <linux/sched/smt.h>
|
|
#include <linux/unistd.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/oom.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/export.h>
|
|
#include <linux/bug.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/stop_machine.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/lockdep.h>
|
|
#include <linux/tick.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/nmi.h>
|
|
#include <linux/smpboot.h>
|
|
#include <linux/relay.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/scs.h>
|
|
#include <linux/percpu-rwsem.h>
|
|
#include <linux/cpuset.h>
|
|
#include <linux/random.h>
|
|
#include <uapi/linux/sched/types.h>
|
|
|
|
#include <trace/events/power.h>
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/cpuhp.h>
|
|
|
|
#undef CREATE_TRACE_POINTS
|
|
#include <trace/hooks/sched.h>
|
|
#include <trace/hooks/cpu.h>
|
|
|
|
#include "smpboot.h"
|
|
|
|
/**
|
|
* cpuhp_cpu_state - Per cpu hotplug state storage
|
|
* @state: The current cpu state
|
|
* @target: The target state
|
|
* @thread: Pointer to the hotplug thread
|
|
* @should_run: Thread should execute
|
|
* @rollback: Perform a rollback
|
|
* @single: Single callback invocation
|
|
* @bringup: Single callback bringup or teardown selector
|
|
* @cb_state: The state for a single callback (install/uninstall)
|
|
* @result: Result of the operation
|
|
* @done_up: Signal completion to the issuer of the task for cpu-up
|
|
* @done_down: Signal completion to the issuer of the task for cpu-down
|
|
*/
|
|
struct cpuhp_cpu_state {
|
|
enum cpuhp_state state;
|
|
enum cpuhp_state target;
|
|
enum cpuhp_state fail;
|
|
#ifdef CONFIG_SMP
|
|
struct task_struct *thread;
|
|
bool should_run;
|
|
bool rollback;
|
|
bool single;
|
|
bool bringup;
|
|
struct hlist_node *node;
|
|
struct hlist_node *last;
|
|
enum cpuhp_state cb_state;
|
|
int result;
|
|
struct completion done_up;
|
|
struct completion done_down;
|
|
#endif
|
|
};
|
|
|
|
static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
|
|
.fail = CPUHP_INVALID,
|
|
};
|
|
|
|
#ifdef CONFIG_SMP
|
|
cpumask_t cpus_booted_once_mask;
|
|
#endif
|
|
|
|
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
|
|
static struct lockdep_map cpuhp_state_up_map =
|
|
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
|
|
static struct lockdep_map cpuhp_state_down_map =
|
|
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
|
|
|
|
|
|
static inline void cpuhp_lock_acquire(bool bringup)
|
|
{
|
|
lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
|
|
}
|
|
|
|
static inline void cpuhp_lock_release(bool bringup)
|
|
{
|
|
lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
|
|
}
|
|
#else
|
|
|
|
static inline void cpuhp_lock_acquire(bool bringup) { }
|
|
static inline void cpuhp_lock_release(bool bringup) { }
|
|
|
|
#endif
|
|
|
|
/**
|
|
* cpuhp_step - Hotplug state machine step
|
|
* @name: Name of the step
|
|
* @startup: Startup function of the step
|
|
* @teardown: Teardown function of the step
|
|
* @cant_stop: Bringup/teardown can't be stopped at this step
|
|
*/
|
|
struct cpuhp_step {
|
|
const char *name;
|
|
union {
|
|
int (*single)(unsigned int cpu);
|
|
int (*multi)(unsigned int cpu,
|
|
struct hlist_node *node);
|
|
} startup;
|
|
union {
|
|
int (*single)(unsigned int cpu);
|
|
int (*multi)(unsigned int cpu,
|
|
struct hlist_node *node);
|
|
} teardown;
|
|
struct hlist_head list;
|
|
bool cant_stop;
|
|
bool multi_instance;
|
|
};
|
|
|
|
static DEFINE_MUTEX(cpuhp_state_mutex);
|
|
static struct cpuhp_step cpuhp_hp_states[];
|
|
|
|
static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
|
|
{
|
|
return cpuhp_hp_states + state;
|
|
}
|
|
|
|
/**
|
|
* cpuhp_invoke_callback _ Invoke the callbacks for a given state
|
|
* @cpu: The cpu for which the callback should be invoked
|
|
* @state: The state to do callbacks for
|
|
* @bringup: True if the bringup callback should be invoked
|
|
* @node: For multi-instance, do a single entry callback for install/remove
|
|
* @lastp: For multi-instance rollback, remember how far we got
|
|
*
|
|
* Called from cpu hotplug and from the state register machinery.
|
|
*/
|
|
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
|
|
bool bringup, struct hlist_node *node,
|
|
struct hlist_node **lastp)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
struct cpuhp_step *step = cpuhp_get_step(state);
|
|
int (*cbm)(unsigned int cpu, struct hlist_node *node);
|
|
int (*cb)(unsigned int cpu);
|
|
int ret, cnt;
|
|
|
|
if (st->fail == state) {
|
|
st->fail = CPUHP_INVALID;
|
|
|
|
if (!(bringup ? step->startup.single : step->teardown.single))
|
|
return 0;
|
|
|
|
return -EAGAIN;
|
|
}
|
|
|
|
if (!step->multi_instance) {
|
|
WARN_ON_ONCE(lastp && *lastp);
|
|
cb = bringup ? step->startup.single : step->teardown.single;
|
|
if (!cb)
|
|
return 0;
|
|
trace_cpuhp_enter(cpu, st->target, state, cb);
|
|
ret = cb(cpu);
|
|
trace_cpuhp_exit(cpu, st->state, state, ret);
|
|
return ret;
|
|
}
|
|
cbm = bringup ? step->startup.multi : step->teardown.multi;
|
|
if (!cbm)
|
|
return 0;
|
|
|
|
/* Single invocation for instance add/remove */
|
|
if (node) {
|
|
WARN_ON_ONCE(lastp && *lastp);
|
|
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
|
|
ret = cbm(cpu, node);
|
|
trace_cpuhp_exit(cpu, st->state, state, ret);
|
|
return ret;
|
|
}
|
|
|
|
/* State transition. Invoke on all instances */
|
|
cnt = 0;
|
|
hlist_for_each(node, &step->list) {
|
|
if (lastp && node == *lastp)
|
|
break;
|
|
|
|
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
|
|
ret = cbm(cpu, node);
|
|
trace_cpuhp_exit(cpu, st->state, state, ret);
|
|
if (ret) {
|
|
if (!lastp)
|
|
goto err;
|
|
|
|
*lastp = node;
|
|
return ret;
|
|
}
|
|
cnt++;
|
|
}
|
|
if (lastp)
|
|
*lastp = NULL;
|
|
return 0;
|
|
err:
|
|
/* Rollback the instances if one failed */
|
|
cbm = !bringup ? step->startup.multi : step->teardown.multi;
|
|
if (!cbm)
|
|
return ret;
|
|
|
|
hlist_for_each(node, &step->list) {
|
|
if (!cnt--)
|
|
break;
|
|
|
|
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
|
|
ret = cbm(cpu, node);
|
|
trace_cpuhp_exit(cpu, st->state, state, ret);
|
|
/*
|
|
* Rollback must not fail,
|
|
*/
|
|
WARN_ON_ONCE(ret);
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static bool cpuhp_is_ap_state(enum cpuhp_state state)
|
|
{
|
|
/*
|
|
* The extra check for CPUHP_TEARDOWN_CPU is only for documentation
|
|
* purposes as that state is handled explicitly in cpu_down.
|
|
*/
|
|
return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
|
|
}
|
|
|
|
static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
|
|
{
|
|
struct completion *done = bringup ? &st->done_up : &st->done_down;
|
|
wait_for_completion(done);
|
|
}
|
|
|
|
static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
|
|
{
|
|
struct completion *done = bringup ? &st->done_up : &st->done_down;
|
|
complete(done);
|
|
}
|
|
|
|
/*
|
|
* The former STARTING/DYING states, ran with IRQs disabled and must not fail.
|
|
*/
|
|
static bool cpuhp_is_atomic_state(enum cpuhp_state state)
|
|
{
|
|
return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
|
|
}
|
|
|
|
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
|
|
static DEFINE_MUTEX(cpu_add_remove_lock);
|
|
bool cpuhp_tasks_frozen;
|
|
EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
|
|
|
|
/*
|
|
* The following two APIs (cpu_maps_update_begin/done) must be used when
|
|
* attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
|
|
*/
|
|
void cpu_maps_update_begin(void)
|
|
{
|
|
mutex_lock(&cpu_add_remove_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpu_maps_update_begin);
|
|
|
|
void cpu_maps_update_done(void)
|
|
{
|
|
mutex_unlock(&cpu_add_remove_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpu_maps_update_done);
|
|
|
|
/*
|
|
* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
|
|
* Should always be manipulated under cpu_add_remove_lock
|
|
*/
|
|
static int cpu_hotplug_disabled;
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
|
|
|
|
void cpus_read_lock(void)
|
|
{
|
|
percpu_down_read(&cpu_hotplug_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpus_read_lock);
|
|
|
|
int cpus_read_trylock(void)
|
|
{
|
|
return percpu_down_read_trylock(&cpu_hotplug_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpus_read_trylock);
|
|
|
|
void cpus_read_unlock(void)
|
|
{
|
|
percpu_up_read(&cpu_hotplug_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpus_read_unlock);
|
|
|
|
void cpus_write_lock(void)
|
|
{
|
|
percpu_down_write(&cpu_hotplug_lock);
|
|
}
|
|
|
|
void cpus_write_unlock(void)
|
|
{
|
|
percpu_up_write(&cpu_hotplug_lock);
|
|
}
|
|
|
|
void lockdep_assert_cpus_held(void)
|
|
{
|
|
/*
|
|
* We can't have hotplug operations before userspace starts running,
|
|
* and some init codepaths will knowingly not take the hotplug lock.
|
|
* This is all valid, so mute lockdep until it makes sense to report
|
|
* unheld locks.
|
|
*/
|
|
if (system_state < SYSTEM_RUNNING)
|
|
return;
|
|
|
|
percpu_rwsem_assert_held(&cpu_hotplug_lock);
|
|
}
|
|
|
|
static void lockdep_acquire_cpus_lock(void)
|
|
{
|
|
rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
|
|
}
|
|
|
|
static void lockdep_release_cpus_lock(void)
|
|
{
|
|
rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
|
|
}
|
|
|
|
/*
|
|
* Wait for currently running CPU hotplug operations to complete (if any) and
|
|
* disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
|
|
* the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
|
|
* hotplug path before performing hotplug operations. So acquiring that lock
|
|
* guarantees mutual exclusion from any currently running hotplug operations.
|
|
*/
|
|
void cpu_hotplug_disable(void)
|
|
{
|
|
cpu_maps_update_begin();
|
|
cpu_hotplug_disabled++;
|
|
cpu_maps_update_done();
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
|
|
|
|
static void __cpu_hotplug_enable(void)
|
|
{
|
|
if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
|
|
return;
|
|
cpu_hotplug_disabled--;
|
|
}
|
|
|
|
void cpu_hotplug_enable(void)
|
|
{
|
|
cpu_maps_update_begin();
|
|
__cpu_hotplug_enable();
|
|
cpu_maps_update_done();
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
|
|
|
|
#else
|
|
|
|
static void lockdep_acquire_cpus_lock(void)
|
|
{
|
|
}
|
|
|
|
static void lockdep_release_cpus_lock(void)
|
|
{
|
|
}
|
|
|
|
#endif /* CONFIG_HOTPLUG_CPU */
|
|
|
|
/*
|
|
* Architectures that need SMT-specific errata handling during SMT hotplug
|
|
* should override this.
|
|
*/
|
|
void __weak arch_smt_update(void) { }
|
|
|
|
#ifdef CONFIG_HOTPLUG_SMT
|
|
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
|
|
|
|
void __init cpu_smt_disable(bool force)
|
|
{
|
|
if (!cpu_smt_possible())
|
|
return;
|
|
|
|
if (force) {
|
|
pr_info("SMT: Force disabled\n");
|
|
cpu_smt_control = CPU_SMT_FORCE_DISABLED;
|
|
} else {
|
|
pr_info("SMT: disabled\n");
|
|
cpu_smt_control = CPU_SMT_DISABLED;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* The decision whether SMT is supported can only be done after the full
|
|
* CPU identification. Called from architecture code.
|
|
*/
|
|
void __init cpu_smt_check_topology(void)
|
|
{
|
|
if (!topology_smt_supported())
|
|
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
|
|
}
|
|
|
|
static int __init smt_cmdline_disable(char *str)
|
|
{
|
|
cpu_smt_disable(str && !strcmp(str, "force"));
|
|
return 0;
|
|
}
|
|
early_param("nosmt", smt_cmdline_disable);
|
|
|
|
static inline bool cpu_smt_allowed(unsigned int cpu)
|
|
{
|
|
if (cpu_smt_control == CPU_SMT_ENABLED)
|
|
return true;
|
|
|
|
if (topology_is_primary_thread(cpu))
|
|
return true;
|
|
|
|
/*
|
|
* On x86 it's required to boot all logical CPUs at least once so
|
|
* that the init code can get a chance to set CR4.MCE on each
|
|
* CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
|
|
* core will shutdown the machine.
|
|
*/
|
|
return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
|
|
}
|
|
|
|
/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
|
|
bool cpu_smt_possible(void)
|
|
{
|
|
return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
|
|
cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpu_smt_possible);
|
|
#else
|
|
static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
|
|
#endif
|
|
|
|
static inline enum cpuhp_state
|
|
cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
|
|
{
|
|
enum cpuhp_state prev_state = st->state;
|
|
|
|
st->rollback = false;
|
|
st->last = NULL;
|
|
|
|
st->target = target;
|
|
st->single = false;
|
|
st->bringup = st->state < target;
|
|
|
|
return prev_state;
|
|
}
|
|
|
|
static inline void
|
|
cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
|
|
{
|
|
st->rollback = true;
|
|
|
|
/*
|
|
* If we have st->last we need to undo partial multi_instance of this
|
|
* state first. Otherwise start undo at the previous state.
|
|
*/
|
|
if (!st->last) {
|
|
if (st->bringup)
|
|
st->state--;
|
|
else
|
|
st->state++;
|
|
}
|
|
|
|
st->target = prev_state;
|
|
st->bringup = !st->bringup;
|
|
}
|
|
|
|
/* Regular hotplug invocation of the AP hotplug thread */
|
|
static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
|
|
{
|
|
if (!st->single && st->state == st->target)
|
|
return;
|
|
|
|
st->result = 0;
|
|
/*
|
|
* Make sure the above stores are visible before should_run becomes
|
|
* true. Paired with the mb() above in cpuhp_thread_fun()
|
|
*/
|
|
smp_mb();
|
|
st->should_run = true;
|
|
wake_up_process(st->thread);
|
|
wait_for_ap_thread(st, st->bringup);
|
|
}
|
|
|
|
static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
|
|
{
|
|
enum cpuhp_state prev_state;
|
|
int ret;
|
|
|
|
prev_state = cpuhp_set_state(st, target);
|
|
__cpuhp_kick_ap(st);
|
|
if ((ret = st->result)) {
|
|
cpuhp_reset_state(st, prev_state);
|
|
__cpuhp_kick_ap(st);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int bringup_wait_for_ap(unsigned int cpu)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
|
|
/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
|
|
wait_for_ap_thread(st, true);
|
|
if (WARN_ON_ONCE((!cpu_online(cpu))))
|
|
return -ECANCELED;
|
|
|
|
/* Unpark the hotplug thread of the target cpu */
|
|
kthread_unpark(st->thread);
|
|
|
|
/*
|
|
* SMT soft disabling on X86 requires to bring the CPU out of the
|
|
* BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
|
|
* CPU marked itself as booted_once in notify_cpu_starting() so the
|
|
* cpu_smt_allowed() check will now return false if this is not the
|
|
* primary sibling.
|
|
*/
|
|
if (!cpu_smt_allowed(cpu))
|
|
return -ECANCELED;
|
|
|
|
if (st->target <= CPUHP_AP_ONLINE_IDLE)
|
|
return 0;
|
|
|
|
return cpuhp_kick_ap(st, st->target);
|
|
}
|
|
|
|
static int bringup_cpu(unsigned int cpu)
|
|
{
|
|
struct task_struct *idle = idle_thread_get(cpu);
|
|
int ret;
|
|
|
|
/*
|
|
* Reset stale stack state from the last time this CPU was online.
|
|
*/
|
|
scs_task_reset(idle);
|
|
kasan_unpoison_task_stack(idle);
|
|
|
|
/*
|
|
* Some architectures have to walk the irq descriptors to
|
|
* setup the vector space for the cpu which comes online.
|
|
* Prevent irq alloc/free across the bringup.
|
|
*/
|
|
irq_lock_sparse();
|
|
|
|
/* Arch-specific enabling code. */
|
|
ret = __cpu_up(cpu, idle);
|
|
irq_unlock_sparse();
|
|
if (ret)
|
|
return ret;
|
|
return bringup_wait_for_ap(cpu);
|
|
}
|
|
|
|
static int finish_cpu(unsigned int cpu)
|
|
{
|
|
struct task_struct *idle = idle_thread_get(cpu);
|
|
struct mm_struct *mm = idle->active_mm;
|
|
|
|
/*
|
|
* idle_task_exit() will have switched to &init_mm, now
|
|
* clean up any remaining active_mm state.
|
|
*/
|
|
if (mm != &init_mm)
|
|
idle->active_mm = &init_mm;
|
|
mmdrop(mm);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Hotplug state machine related functions
|
|
*/
|
|
|
|
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
|
|
{
|
|
for (st->state--; st->state > st->target; st->state--)
|
|
cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
|
|
}
|
|
|
|
static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
|
|
{
|
|
if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
|
|
return true;
|
|
/*
|
|
* When CPU hotplug is disabled, then taking the CPU down is not
|
|
* possible because takedown_cpu() and the architecture and
|
|
* subsystem specific mechanisms are not available. So the CPU
|
|
* which would be completely unplugged again needs to stay around
|
|
* in the current state.
|
|
*/
|
|
return st->state <= CPUHP_BRINGUP_CPU;
|
|
}
|
|
|
|
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
|
|
enum cpuhp_state target)
|
|
{
|
|
enum cpuhp_state prev_state = st->state;
|
|
int ret = 0;
|
|
|
|
while (st->state < target) {
|
|
st->state++;
|
|
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
|
|
if (ret) {
|
|
if (can_rollback_cpu(st)) {
|
|
st->target = prev_state;
|
|
undo_cpu_up(cpu, st);
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* The cpu hotplug threads manage the bringup and teardown of the cpus
|
|
*/
|
|
static void cpuhp_create(unsigned int cpu)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
|
|
init_completion(&st->done_up);
|
|
init_completion(&st->done_down);
|
|
}
|
|
|
|
static int cpuhp_should_run(unsigned int cpu)
|
|
{
|
|
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
|
|
|
|
return st->should_run;
|
|
}
|
|
|
|
/*
|
|
* Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
|
|
* callbacks when a state gets [un]installed at runtime.
|
|
*
|
|
* Each invocation of this function by the smpboot thread does a single AP
|
|
* state callback.
|
|
*
|
|
* It has 3 modes of operation:
|
|
* - single: runs st->cb_state
|
|
* - up: runs ++st->state, while st->state < st->target
|
|
* - down: runs st->state--, while st->state > st->target
|
|
*
|
|
* When complete or on error, should_run is cleared and the completion is fired.
|
|
*/
|
|
static void cpuhp_thread_fun(unsigned int cpu)
|
|
{
|
|
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
|
|
bool bringup = st->bringup;
|
|
enum cpuhp_state state;
|
|
|
|
if (WARN_ON_ONCE(!st->should_run))
|
|
return;
|
|
|
|
/*
|
|
* ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
|
|
* that if we see ->should_run we also see the rest of the state.
|
|
*/
|
|
smp_mb();
|
|
|
|
/*
|
|
* The BP holds the hotplug lock, but we're now running on the AP,
|
|
* ensure that anybody asserting the lock is held, will actually find
|
|
* it so.
|
|
*/
|
|
lockdep_acquire_cpus_lock();
|
|
cpuhp_lock_acquire(bringup);
|
|
|
|
if (st->single) {
|
|
state = st->cb_state;
|
|
st->should_run = false;
|
|
} else {
|
|
if (bringup) {
|
|
st->state++;
|
|
state = st->state;
|
|
st->should_run = (st->state < st->target);
|
|
WARN_ON_ONCE(st->state > st->target);
|
|
} else {
|
|
state = st->state;
|
|
st->state--;
|
|
st->should_run = (st->state > st->target);
|
|
WARN_ON_ONCE(st->state < st->target);
|
|
}
|
|
}
|
|
|
|
WARN_ON_ONCE(!cpuhp_is_ap_state(state));
|
|
|
|
if (cpuhp_is_atomic_state(state)) {
|
|
local_irq_disable();
|
|
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
|
|
local_irq_enable();
|
|
|
|
/*
|
|
* STARTING/DYING must not fail!
|
|
*/
|
|
WARN_ON_ONCE(st->result);
|
|
} else {
|
|
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
|
|
}
|
|
|
|
if (st->result) {
|
|
/*
|
|
* If we fail on a rollback, we're up a creek without no
|
|
* paddle, no way forward, no way back. We loose, thanks for
|
|
* playing.
|
|
*/
|
|
WARN_ON_ONCE(st->rollback);
|
|
st->should_run = false;
|
|
}
|
|
|
|
cpuhp_lock_release(bringup);
|
|
lockdep_release_cpus_lock();
|
|
|
|
if (!st->should_run)
|
|
complete_ap_thread(st, bringup);
|
|
}
|
|
|
|
/* Invoke a single callback on a remote cpu */
|
|
static int
|
|
cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
|
|
struct hlist_node *node)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
int ret;
|
|
|
|
if (!cpu_online(cpu))
|
|
return 0;
|
|
|
|
cpuhp_lock_acquire(false);
|
|
cpuhp_lock_release(false);
|
|
|
|
cpuhp_lock_acquire(true);
|
|
cpuhp_lock_release(true);
|
|
|
|
/*
|
|
* If we are up and running, use the hotplug thread. For early calls
|
|
* we invoke the thread function directly.
|
|
*/
|
|
if (!st->thread)
|
|
return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
|
|
|
|
st->rollback = false;
|
|
st->last = NULL;
|
|
|
|
st->node = node;
|
|
st->bringup = bringup;
|
|
st->cb_state = state;
|
|
st->single = true;
|
|
|
|
__cpuhp_kick_ap(st);
|
|
|
|
/*
|
|
* If we failed and did a partial, do a rollback.
|
|
*/
|
|
if ((ret = st->result) && st->last) {
|
|
st->rollback = true;
|
|
st->bringup = !bringup;
|
|
|
|
__cpuhp_kick_ap(st);
|
|
}
|
|
|
|
/*
|
|
* Clean up the leftovers so the next hotplug operation wont use stale
|
|
* data.
|
|
*/
|
|
st->node = st->last = NULL;
|
|
return ret;
|
|
}
|
|
|
|
static int cpuhp_kick_ap_work(unsigned int cpu)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
enum cpuhp_state prev_state = st->state;
|
|
int ret;
|
|
|
|
cpuhp_lock_acquire(false);
|
|
cpuhp_lock_release(false);
|
|
|
|
cpuhp_lock_acquire(true);
|
|
cpuhp_lock_release(true);
|
|
|
|
trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
|
|
ret = cpuhp_kick_ap(st, st->target);
|
|
trace_cpuhp_exit(cpu, st->state, prev_state, ret);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static struct smp_hotplug_thread cpuhp_threads = {
|
|
.store = &cpuhp_state.thread,
|
|
.create = &cpuhp_create,
|
|
.thread_should_run = cpuhp_should_run,
|
|
.thread_fn = cpuhp_thread_fun,
|
|
.thread_comm = "cpuhp/%u",
|
|
.selfparking = true,
|
|
};
|
|
|
|
void __init cpuhp_threads_init(void)
|
|
{
|
|
BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
|
|
kthread_unpark(this_cpu_read(cpuhp_state.thread));
|
|
}
|
|
|
|
/*
|
|
*
|
|
* Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
|
|
* protected region.
|
|
*
|
|
* The operation is still serialized against concurrent CPU hotplug via
|
|
* cpu_add_remove_lock, i.e. CPU map protection. But it is _not_
|
|
* serialized against other hotplug related activity like adding or
|
|
* removing of state callbacks and state instances, which invoke either the
|
|
* startup or the teardown callback of the affected state.
|
|
*
|
|
* This is required for subsystems which are unfixable vs. CPU hotplug and
|
|
* evade lock inversion problems by scheduling work which has to be
|
|
* completed _before_ cpu_up()/_cpu_down() returns.
|
|
*
|
|
* Don't even think about adding anything to this for any new code or even
|
|
* drivers. It's only purpose is to keep existing lock order trainwrecks
|
|
* working.
|
|
*
|
|
* For cpu_down() there might be valid reasons to finish cleanups which are
|
|
* not required to be done under cpu_hotplug_lock, but that's a different
|
|
* story and would be not invoked via this.
|
|
*/
|
|
static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
|
|
{
|
|
/*
|
|
* cpusets delegate hotplug operations to a worker to "solve" the
|
|
* lock order problems. Wait for the worker, but only if tasks are
|
|
* _not_ frozen (suspend, hibernate) as that would wait forever.
|
|
*
|
|
* The wait is required because otherwise the hotplug operation
|
|
* returns with inconsistent state, which could even be observed in
|
|
* user space when a new CPU is brought up. The CPU plug uevent
|
|
* would be delivered and user space reacting on it would fail to
|
|
* move tasks to the newly plugged CPU up to the point where the
|
|
* work has finished because up to that point the newly plugged CPU
|
|
* is not assignable in cpusets/cgroups. On unplug that's not
|
|
* necessarily a visible issue, but it is still inconsistent state,
|
|
* which is the real problem which needs to be "fixed". This can't
|
|
* prevent the transient state between scheduling the work and
|
|
* returning from waiting for it.
|
|
*/
|
|
if (!tasks_frozen)
|
|
cpuset_wait_for_hotplug();
|
|
}
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
#ifndef arch_clear_mm_cpumask_cpu
|
|
#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
|
|
#endif
|
|
|
|
/**
|
|
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
|
|
* @cpu: a CPU id
|
|
*
|
|
* This function walks all processes, finds a valid mm struct for each one and
|
|
* then clears a corresponding bit in mm's cpumask. While this all sounds
|
|
* trivial, there are various non-obvious corner cases, which this function
|
|
* tries to solve in a safe manner.
|
|
*
|
|
* Also note that the function uses a somewhat relaxed locking scheme, so it may
|
|
* be called only for an already offlined CPU.
|
|
*/
|
|
void clear_tasks_mm_cpumask(int cpu)
|
|
{
|
|
struct task_struct *p;
|
|
|
|
/*
|
|
* This function is called after the cpu is taken down and marked
|
|
* offline, so its not like new tasks will ever get this cpu set in
|
|
* their mm mask. -- Peter Zijlstra
|
|
* Thus, we may use rcu_read_lock() here, instead of grabbing
|
|
* full-fledged tasklist_lock.
|
|
*/
|
|
WARN_ON(cpu_online(cpu));
|
|
rcu_read_lock();
|
|
for_each_process(p) {
|
|
struct task_struct *t;
|
|
|
|
/*
|
|
* Main thread might exit, but other threads may still have
|
|
* a valid mm. Find one.
|
|
*/
|
|
t = find_lock_task_mm(p);
|
|
if (!t)
|
|
continue;
|
|
arch_clear_mm_cpumask_cpu(cpu, t->mm);
|
|
task_unlock(t);
|
|
}
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
/* Take this CPU down. */
|
|
static int take_cpu_down(void *_param)
|
|
{
|
|
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
|
|
enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
|
|
int err, cpu = smp_processor_id();
|
|
int ret;
|
|
|
|
/* Ensure this CPU doesn't handle any more interrupts. */
|
|
err = __cpu_disable();
|
|
if (err < 0)
|
|
return err;
|
|
|
|
/*
|
|
* We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
|
|
* do this step again.
|
|
*/
|
|
WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
|
|
st->state--;
|
|
/* Invoke the former CPU_DYING callbacks */
|
|
for (; st->state > target; st->state--) {
|
|
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
|
|
/*
|
|
* DYING must not fail!
|
|
*/
|
|
WARN_ON_ONCE(ret);
|
|
}
|
|
|
|
/* Give up timekeeping duties */
|
|
tick_handover_do_timer();
|
|
/* Remove CPU from timer broadcasting */
|
|
tick_offline_cpu(cpu);
|
|
/* Park the stopper thread */
|
|
stop_machine_park(cpu);
|
|
return 0;
|
|
}
|
|
|
|
static int takedown_cpu(unsigned int cpu)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
int err;
|
|
|
|
/* Park the smpboot threads */
|
|
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
|
|
|
|
/*
|
|
* Prevent irq alloc/free while the dying cpu reorganizes the
|
|
* interrupt affinities.
|
|
*/
|
|
irq_lock_sparse();
|
|
|
|
/*
|
|
* So now all preempt/rcu users must observe !cpu_active().
|
|
*/
|
|
err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
|
|
if (err) {
|
|
/* CPU refused to die */
|
|
irq_unlock_sparse();
|
|
/* Unpark the hotplug thread so we can rollback there */
|
|
kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
|
|
return err;
|
|
}
|
|
BUG_ON(cpu_online(cpu));
|
|
|
|
/*
|
|
* The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
|
|
* all runnable tasks from the CPU, there's only the idle task left now
|
|
* that the migration thread is done doing the stop_machine thing.
|
|
*
|
|
* Wait for the stop thread to go away.
|
|
*/
|
|
wait_for_ap_thread(st, false);
|
|
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
|
|
|
|
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
|
|
irq_unlock_sparse();
|
|
|
|
hotplug_cpu__broadcast_tick_pull(cpu);
|
|
/* This actually kills the CPU. */
|
|
__cpu_die(cpu);
|
|
|
|
tick_cleanup_dead_cpu(cpu);
|
|
rcutree_migrate_callbacks(cpu);
|
|
return 0;
|
|
}
|
|
|
|
static void cpuhp_complete_idle_dead(void *arg)
|
|
{
|
|
struct cpuhp_cpu_state *st = arg;
|
|
|
|
complete_ap_thread(st, false);
|
|
}
|
|
|
|
void cpuhp_report_idle_dead(void)
|
|
{
|
|
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
|
|
|
|
BUG_ON(st->state != CPUHP_AP_OFFLINE);
|
|
rcu_report_dead(smp_processor_id());
|
|
st->state = CPUHP_AP_IDLE_DEAD;
|
|
/*
|
|
* We cannot call complete after rcu_report_dead() so we delegate it
|
|
* to an online cpu.
|
|
*/
|
|
smp_call_function_single(cpumask_first(cpu_online_mask),
|
|
cpuhp_complete_idle_dead, st, 0);
|
|
}
|
|
|
|
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
|
|
{
|
|
for (st->state++; st->state < st->target; st->state++)
|
|
cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
|
|
}
|
|
|
|
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
|
|
enum cpuhp_state target)
|
|
{
|
|
enum cpuhp_state prev_state = st->state;
|
|
int ret = 0;
|
|
|
|
for (; st->state > target; st->state--) {
|
|
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
|
|
if (ret) {
|
|
st->target = prev_state;
|
|
if (st->state < prev_state)
|
|
undo_cpu_down(cpu, st);
|
|
break;
|
|
}
|
|
}
|
|
return ret;
|
|
}
|
|
|
|
/* Requires cpu_add_remove_lock to be held */
|
|
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
|
|
enum cpuhp_state target)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
int prev_state, ret = 0;
|
|
|
|
if (num_active_cpus() == 1 && cpu_active(cpu))
|
|
return -EBUSY;
|
|
|
|
if (!cpu_present(cpu))
|
|
return -EINVAL;
|
|
|
|
cpus_write_lock();
|
|
|
|
cpuhp_tasks_frozen = tasks_frozen;
|
|
|
|
prev_state = cpuhp_set_state(st, target);
|
|
/*
|
|
* If the current CPU state is in the range of the AP hotplug thread,
|
|
* then we need to kick the thread.
|
|
*/
|
|
if (st->state > CPUHP_TEARDOWN_CPU) {
|
|
st->target = max((int)target, CPUHP_TEARDOWN_CPU);
|
|
ret = cpuhp_kick_ap_work(cpu);
|
|
/*
|
|
* The AP side has done the error rollback already. Just
|
|
* return the error code..
|
|
*/
|
|
if (ret)
|
|
goto out;
|
|
|
|
/*
|
|
* We might have stopped still in the range of the AP hotplug
|
|
* thread. Nothing to do anymore.
|
|
*/
|
|
if (st->state > CPUHP_TEARDOWN_CPU)
|
|
goto out;
|
|
|
|
st->target = target;
|
|
}
|
|
/*
|
|
* The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
|
|
* to do the further cleanups.
|
|
*/
|
|
ret = cpuhp_down_callbacks(cpu, st, target);
|
|
if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
|
|
cpuhp_reset_state(st, prev_state);
|
|
__cpuhp_kick_ap(st);
|
|
}
|
|
|
|
out:
|
|
cpus_write_unlock();
|
|
/*
|
|
* Do post unplug cleanup. This is still protected against
|
|
* concurrent CPU hotplug via cpu_add_remove_lock.
|
|
*/
|
|
lockup_detector_cleanup();
|
|
arch_smt_update();
|
|
cpu_up_down_serialize_trainwrecks(tasks_frozen);
|
|
return ret;
|
|
}
|
|
|
|
static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
|
|
{
|
|
if (cpu_hotplug_disabled)
|
|
return -EBUSY;
|
|
return _cpu_down(cpu, 0, target);
|
|
}
|
|
|
|
static int cpu_down(unsigned int cpu, enum cpuhp_state target)
|
|
{
|
|
int err;
|
|
|
|
cpu_maps_update_begin();
|
|
err = cpu_down_maps_locked(cpu, target);
|
|
cpu_maps_update_done();
|
|
return err;
|
|
}
|
|
|
|
/**
|
|
* cpu_device_down - Bring down a cpu device
|
|
* @dev: Pointer to the cpu device to offline
|
|
*
|
|
* This function is meant to be used by device core cpu subsystem only.
|
|
*
|
|
* Other subsystems should use remove_cpu() instead.
|
|
*/
|
|
int cpu_device_down(struct device *dev)
|
|
{
|
|
return cpu_down(dev->id, CPUHP_OFFLINE);
|
|
}
|
|
|
|
int remove_cpu(unsigned int cpu)
|
|
{
|
|
int ret;
|
|
|
|
lock_device_hotplug();
|
|
ret = device_offline(get_cpu_device(cpu));
|
|
unlock_device_hotplug();
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(remove_cpu);
|
|
|
|
extern int dl_cpu_busy(int cpu, struct task_struct *p);
|
|
|
|
int __pause_drain_rq(struct cpumask *cpus)
|
|
{
|
|
unsigned int cpu;
|
|
int err = 0;
|
|
|
|
/*
|
|
* Disabling preemption avoids that one of the stopper, started from
|
|
* sched_cpu_drain_rq(), blocks firing draining for the whole cpumask.
|
|
*/
|
|
preempt_disable();
|
|
for_each_cpu(cpu, cpus) {
|
|
err = sched_cpu_drain_rq(cpu);
|
|
if (err)
|
|
break;
|
|
}
|
|
preempt_enable();
|
|
|
|
return err;
|
|
}
|
|
|
|
void __wait_drain_rq(struct cpumask *cpus)
|
|
{
|
|
unsigned int cpu;
|
|
|
|
for_each_cpu(cpu, cpus)
|
|
sched_cpu_drain_rq_wait(cpu);
|
|
}
|
|
|
|
/* if rt task, set to cfs and return previous prio */
|
|
static int pause_reduce_prio(void)
|
|
{
|
|
int prev_prio = -1;
|
|
|
|
if (current->prio < MAX_RT_PRIO) {
|
|
struct sched_param param = { .sched_priority = 0 };
|
|
|
|
prev_prio = current->prio;
|
|
sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
|
|
}
|
|
|
|
return prev_prio;
|
|
}
|
|
|
|
/* if previous prio was set, restore */
|
|
static void pause_restore_prio(int prev_prio)
|
|
{
|
|
if (prev_prio >= 0 && prev_prio < MAX_RT_PRIO) {
|
|
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1-prev_prio };
|
|
|
|
sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
|
|
}
|
|
}
|
|
|
|
int pause_cpus(struct cpumask *cpus)
|
|
{
|
|
int err = 0;
|
|
int cpu;
|
|
u64 start_time = 0;
|
|
int prev_prio;
|
|
|
|
start_time = sched_clock();
|
|
|
|
cpu_maps_update_begin();
|
|
|
|
if (cpu_hotplug_disabled) {
|
|
err = -EBUSY;
|
|
goto err_cpu_maps_update;
|
|
}
|
|
|
|
/* Pausing an already inactive CPU isn't an error */
|
|
cpumask_and(cpus, cpus, cpu_active_mask);
|
|
|
|
for_each_cpu(cpu, cpus) {
|
|
if (!cpu_online(cpu) || dl_cpu_busy(cpu, NULL) ||
|
|
get_cpu_device(cpu)->offline_disabled == true) {
|
|
err = -EBUSY;
|
|
goto err_cpu_maps_update;
|
|
}
|
|
}
|
|
|
|
if (cpumask_weight(cpus) >= num_active_cpus()) {
|
|
err = -EBUSY;
|
|
goto err_cpu_maps_update;
|
|
}
|
|
|
|
if (cpumask_empty(cpus))
|
|
goto err_cpu_maps_update;
|
|
|
|
/*
|
|
* Lazy migration:
|
|
*
|
|
* We do care about how fast a CPU can go idle and stay this in this
|
|
* state. If we try to take the cpus_write_lock() here, we would have
|
|
* to wait for a few dozens of ms, as this function might schedule.
|
|
* However, we can, as a first step, flip the active mask and migrate
|
|
* anything currently on the run-queue, to give a chance to the paused
|
|
* CPUs to reach quickly an idle state. There's a risk meanwhile for
|
|
* another CPU to observe an out-of-date active_mask or to incompletely
|
|
* update a cpuset. Both problems would be resolved later in the slow
|
|
* path, which ensures active_mask synchronization, triggers a cpuset
|
|
* rebuild and migrate any task that would have escaped the lazy
|
|
* migration.
|
|
*/
|
|
for_each_cpu(cpu, cpus)
|
|
set_cpu_active(cpu, false);
|
|
err = __pause_drain_rq(cpus);
|
|
if (err) {
|
|
__wait_drain_rq(cpus);
|
|
for_each_cpu(cpu, cpus)
|
|
set_cpu_active(cpu, true);
|
|
goto err_cpu_maps_update;
|
|
}
|
|
|
|
prev_prio = pause_reduce_prio();
|
|
|
|
/*
|
|
* Slow path deactivation:
|
|
*
|
|
* Now that paused CPUs are most likely idle, we can go through a
|
|
* complete scheduler deactivation.
|
|
*
|
|
* The cpu_active_mask being already set and cpus_write_lock calling
|
|
* synchronize_rcu(), we know that all preempt-disabled and RCU users
|
|
* will observe the updated value.
|
|
*/
|
|
cpus_write_lock();
|
|
|
|
__wait_drain_rq(cpus);
|
|
|
|
cpuhp_tasks_frozen = 0;
|
|
|
|
if (sched_cpus_deactivate_nosync(cpus)) {
|
|
err = -EBUSY;
|
|
goto err_cpus_write_unlock;
|
|
}
|
|
|
|
err = __pause_drain_rq(cpus);
|
|
__wait_drain_rq(cpus);
|
|
if (err) {
|
|
for_each_cpu(cpu, cpus)
|
|
sched_cpu_activate(cpu);
|
|
goto err_cpus_write_unlock;
|
|
}
|
|
|
|
/*
|
|
* Even if living on the side of the regular HP path, pause is using
|
|
* one of the HP step (CPUHP_AP_ACTIVE). This should be reflected on the
|
|
* current state of the CPU.
|
|
*/
|
|
for_each_cpu(cpu, cpus) {
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
|
|
st->state = CPUHP_AP_ACTIVE - 1;
|
|
st->target = st->state;
|
|
}
|
|
|
|
err_cpus_write_unlock:
|
|
cpus_write_unlock();
|
|
pause_restore_prio(prev_prio);
|
|
err_cpu_maps_update:
|
|
cpu_maps_update_done();
|
|
|
|
trace_cpuhp_pause(cpus, start_time, 1);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(pause_cpus);
|
|
|
|
int resume_cpus(struct cpumask *cpus)
|
|
{
|
|
unsigned int cpu;
|
|
int err = 0;
|
|
u64 start_time = 0;
|
|
int prev_prio;
|
|
|
|
start_time = sched_clock();
|
|
|
|
cpu_maps_update_begin();
|
|
|
|
if (cpu_hotplug_disabled) {
|
|
err = -EBUSY;
|
|
goto err_cpu_maps_update;
|
|
}
|
|
|
|
/* Resuming an already active CPU isn't an error */
|
|
cpumask_andnot(cpus, cpus, cpu_active_mask);
|
|
|
|
for_each_cpu(cpu, cpus) {
|
|
if (!cpu_online(cpu)) {
|
|
err = -EBUSY;
|
|
goto err_cpu_maps_update;
|
|
}
|
|
}
|
|
|
|
if (cpumask_empty(cpus))
|
|
goto err_cpu_maps_update;
|
|
|
|
for_each_cpu(cpu, cpus)
|
|
set_cpu_active(cpu, true);
|
|
|
|
trace_android_rvh_resume_cpus(cpus, &err);
|
|
if (err)
|
|
goto err_cpu_maps_update;
|
|
|
|
prev_prio = pause_reduce_prio();
|
|
|
|
/* Lazy Resume. Build domains through schedule a workqueue on
|
|
* resuming cpu. This is so that the resuming cpu can work more
|
|
* early, and cannot add additional load to other busy cpu.
|
|
*/
|
|
cpuset_update_active_cpus_affine(cpumask_first(cpus));
|
|
|
|
cpus_write_lock();
|
|
|
|
cpuhp_tasks_frozen = 0;
|
|
|
|
if (sched_cpus_activate(cpus)) {
|
|
err = -EBUSY;
|
|
goto err_cpus_write_unlock;
|
|
}
|
|
|
|
/*
|
|
* see pause_cpus.
|
|
*/
|
|
for_each_cpu(cpu, cpus) {
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
|
|
st->state = CPUHP_ONLINE;
|
|
st->target = st->state;
|
|
}
|
|
|
|
err_cpus_write_unlock:
|
|
cpus_write_unlock();
|
|
pause_restore_prio(prev_prio);
|
|
err_cpu_maps_update:
|
|
cpu_maps_update_done();
|
|
|
|
trace_cpuhp_pause(cpus, start_time, 0);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(resume_cpus);
|
|
|
|
void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
|
|
{
|
|
unsigned int cpu;
|
|
int error;
|
|
|
|
cpu_maps_update_begin();
|
|
|
|
/*
|
|
* Make certain the cpu I'm about to reboot on is online.
|
|
*
|
|
* This is inline to what migrate_to_reboot_cpu() already do.
|
|
*/
|
|
if (!cpu_online(primary_cpu))
|
|
primary_cpu = cpumask_first(cpu_online_mask);
|
|
|
|
for_each_online_cpu(cpu) {
|
|
if (cpu == primary_cpu)
|
|
continue;
|
|
|
|
error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
|
|
if (error) {
|
|
pr_err("Failed to offline CPU%d - error=%d",
|
|
cpu, error);
|
|
break;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Ensure all but the reboot CPU are offline.
|
|
*/
|
|
BUG_ON(num_online_cpus() > 1);
|
|
|
|
/*
|
|
* Make sure the CPUs won't be enabled by someone else after this
|
|
* point. Kexec will reboot to a new kernel shortly resetting
|
|
* everything along the way.
|
|
*/
|
|
cpu_hotplug_disabled++;
|
|
|
|
cpu_maps_update_done();
|
|
}
|
|
|
|
#else
|
|
#define takedown_cpu NULL
|
|
#endif /*CONFIG_HOTPLUG_CPU*/
|
|
|
|
/**
|
|
* notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
|
|
* @cpu: cpu that just started
|
|
*
|
|
* It must be called by the arch code on the new cpu, before the new cpu
|
|
* enables interrupts and before the "boot" cpu returns from __cpu_up().
|
|
*/
|
|
void notify_cpu_starting(unsigned int cpu)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
|
|
int ret;
|
|
|
|
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
|
|
cpumask_set_cpu(cpu, &cpus_booted_once_mask);
|
|
while (st->state < target) {
|
|
st->state++;
|
|
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
|
|
/*
|
|
* STARTING must not fail!
|
|
*/
|
|
WARN_ON_ONCE(ret);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Called from the idle task. Wake up the controlling task which brings the
|
|
* hotplug thread of the upcoming CPU up and then delegates the rest of the
|
|
* online bringup to the hotplug thread.
|
|
*/
|
|
void cpuhp_online_idle(enum cpuhp_state state)
|
|
{
|
|
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
|
|
|
|
/* Happens for the boot cpu */
|
|
if (state != CPUHP_AP_ONLINE_IDLE)
|
|
return;
|
|
|
|
/*
|
|
* Unpart the stopper thread before we start the idle loop (and start
|
|
* scheduling); this ensures the stopper task is always available.
|
|
*/
|
|
stop_machine_unpark(smp_processor_id());
|
|
|
|
st->state = CPUHP_AP_ONLINE_IDLE;
|
|
complete_ap_thread(st, true);
|
|
}
|
|
|
|
static int switch_to_rt_policy(void)
|
|
{
|
|
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
|
|
unsigned int policy = current->policy;
|
|
|
|
if (policy == SCHED_NORMAL)
|
|
/* Switch to SCHED_FIFO from SCHED_NORMAL. */
|
|
return sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
|
|
else
|
|
return 1;
|
|
}
|
|
|
|
static int switch_to_fair_policy(void)
|
|
{
|
|
struct sched_param param = { .sched_priority = 0 };
|
|
|
|
return sched_setscheduler_nocheck(current, SCHED_NORMAL, ¶m);
|
|
}
|
|
|
|
/* Requires cpu_add_remove_lock to be held */
|
|
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
struct task_struct *idle;
|
|
int ret = 0;
|
|
|
|
cpus_write_lock();
|
|
|
|
if (!cpu_present(cpu)) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* The caller of cpu_up() might have raced with another
|
|
* caller. Nothing to do.
|
|
*/
|
|
if (st->state >= target)
|
|
goto out;
|
|
|
|
if (st->state == CPUHP_OFFLINE) {
|
|
/* Let it fail before we try to bring the cpu up */
|
|
idle = idle_thread_get(cpu);
|
|
if (IS_ERR(idle)) {
|
|
ret = PTR_ERR(idle);
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
cpuhp_tasks_frozen = tasks_frozen;
|
|
|
|
cpuhp_set_state(st, target);
|
|
/*
|
|
* If the current CPU state is in the range of the AP hotplug thread,
|
|
* then we need to kick the thread once more.
|
|
*/
|
|
if (st->state > CPUHP_BRINGUP_CPU) {
|
|
ret = cpuhp_kick_ap_work(cpu);
|
|
/*
|
|
* The AP side has done the error rollback already. Just
|
|
* return the error code..
|
|
*/
|
|
if (ret)
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* Try to reach the target state. We max out on the BP at
|
|
* CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
|
|
* responsible for bringing it up to the target state.
|
|
*/
|
|
target = min((int)target, CPUHP_BRINGUP_CPU);
|
|
ret = cpuhp_up_callbacks(cpu, st, target);
|
|
out:
|
|
cpus_write_unlock();
|
|
arch_smt_update();
|
|
cpu_up_down_serialize_trainwrecks(tasks_frozen);
|
|
return ret;
|
|
}
|
|
|
|
static int cpu_up(unsigned int cpu, enum cpuhp_state target)
|
|
{
|
|
int err = 0;
|
|
int switch_err;
|
|
|
|
if (!cpu_possible(cpu)) {
|
|
pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
|
|
cpu);
|
|
#if defined(CONFIG_IA64)
|
|
pr_err("please check additional_cpus= boot parameter\n");
|
|
#endif
|
|
return -EINVAL;
|
|
}
|
|
|
|
trace_android_vh_cpu_up(cpu);
|
|
|
|
/*
|
|
* CPU hotplug operations consists of many steps and each step
|
|
* calls a callback of core kernel subsystem. CPU hotplug-in
|
|
* operation may get preempted by other CFS tasks and whole
|
|
* operation of cpu hotplug in CPU gets delayed. Switch the
|
|
* current task to SCHED_FIFO from SCHED_NORMAL, so that
|
|
* hotplug in operation may complete quickly in heavy loaded
|
|
* conditions and new CPU will start handle the workload.
|
|
*/
|
|
|
|
switch_err = switch_to_rt_policy();
|
|
|
|
err = try_online_node(cpu_to_node(cpu));
|
|
if (err)
|
|
goto switch_out;
|
|
|
|
cpu_maps_update_begin();
|
|
|
|
if (cpu_hotplug_disabled) {
|
|
err = -EBUSY;
|
|
goto out;
|
|
}
|
|
if (!cpu_smt_allowed(cpu)) {
|
|
err = -EPERM;
|
|
goto out;
|
|
}
|
|
|
|
err = _cpu_up(cpu, 0, target);
|
|
out:
|
|
cpu_maps_update_done();
|
|
switch_out:
|
|
if (!switch_err) {
|
|
switch_err = switch_to_fair_policy();
|
|
if (switch_err)
|
|
pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
|
|
switch_err, current->comm, current->pid);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
|
|
/**
|
|
* cpu_device_up - Bring up a cpu device
|
|
* @dev: Pointer to the cpu device to online
|
|
*
|
|
* This function is meant to be used by device core cpu subsystem only.
|
|
*
|
|
* Other subsystems should use add_cpu() instead.
|
|
*/
|
|
int cpu_device_up(struct device *dev)
|
|
{
|
|
return cpu_up(dev->id, CPUHP_ONLINE);
|
|
}
|
|
|
|
int add_cpu(unsigned int cpu)
|
|
{
|
|
int ret;
|
|
|
|
lock_device_hotplug();
|
|
ret = device_online(get_cpu_device(cpu));
|
|
unlock_device_hotplug();
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(add_cpu);
|
|
|
|
/**
|
|
* bringup_hibernate_cpu - Bring up the CPU that we hibernated on
|
|
* @sleep_cpu: The cpu we hibernated on and should be brought up.
|
|
*
|
|
* On some architectures like arm64, we can hibernate on any CPU, but on
|
|
* wake up the CPU we hibernated on might be offline as a side effect of
|
|
* using maxcpus= for example.
|
|
*/
|
|
int bringup_hibernate_cpu(unsigned int sleep_cpu)
|
|
{
|
|
int ret;
|
|
|
|
if (!cpu_online(sleep_cpu)) {
|
|
pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
|
|
ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
|
|
if (ret) {
|
|
pr_err("Failed to bring hibernate-CPU up!\n");
|
|
return ret;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
void bringup_nonboot_cpus(unsigned int setup_max_cpus)
|
|
{
|
|
unsigned int cpu;
|
|
|
|
for_each_present_cpu(cpu) {
|
|
if (num_online_cpus() >= setup_max_cpus)
|
|
break;
|
|
if (!cpu_online(cpu))
|
|
cpu_up(cpu, CPUHP_ONLINE);
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_PM_SLEEP_SMP
|
|
static cpumask_var_t frozen_cpus;
|
|
|
|
int freeze_secondary_cpus(int primary)
|
|
{
|
|
int cpu, error = 0;
|
|
|
|
cpu_maps_update_begin();
|
|
if (primary == -1) {
|
|
primary = cpumask_first(cpu_online_mask);
|
|
if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
|
|
primary = housekeeping_any_cpu(HK_FLAG_TIMER);
|
|
} else {
|
|
if (!cpu_online(primary))
|
|
primary = cpumask_first(cpu_online_mask);
|
|
}
|
|
|
|
/*
|
|
* We take down all of the non-boot CPUs in one shot to avoid races
|
|
* with the userspace trying to use the CPU hotplug at the same time
|
|
*/
|
|
cpumask_clear(frozen_cpus);
|
|
|
|
pr_info("Disabling non-boot CPUs ...\n");
|
|
for_each_online_cpu(cpu) {
|
|
if (cpu == primary)
|
|
continue;
|
|
|
|
if (pm_wakeup_pending()) {
|
|
pr_info("Wakeup pending. Abort CPU freeze\n");
|
|
error = -EBUSY;
|
|
break;
|
|
}
|
|
|
|
trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
|
|
error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
|
|
trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
|
|
if (!error)
|
|
cpumask_set_cpu(cpu, frozen_cpus);
|
|
else {
|
|
pr_err("Error taking CPU%d down: %d\n", cpu, error);
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!error)
|
|
BUG_ON(num_online_cpus() > 1);
|
|
else
|
|
pr_err("Non-boot CPUs are not disabled\n");
|
|
|
|
/*
|
|
* Make sure the CPUs won't be enabled by someone else. We need to do
|
|
* this even in case of failure as all freeze_secondary_cpus() users are
|
|
* supposed to do thaw_secondary_cpus() on the failure path.
|
|
*/
|
|
cpu_hotplug_disabled++;
|
|
|
|
cpu_maps_update_done();
|
|
return error;
|
|
}
|
|
|
|
void __weak arch_thaw_secondary_cpus_begin(void)
|
|
{
|
|
}
|
|
|
|
void __weak arch_thaw_secondary_cpus_end(void)
|
|
{
|
|
}
|
|
|
|
void thaw_secondary_cpus(void)
|
|
{
|
|
int cpu, error;
|
|
struct device *cpu_device;
|
|
|
|
/* Allow everyone to use the CPU hotplug again */
|
|
cpu_maps_update_begin();
|
|
__cpu_hotplug_enable();
|
|
if (cpumask_empty(frozen_cpus))
|
|
goto out;
|
|
|
|
pr_info("Enabling non-boot CPUs ...\n");
|
|
|
|
arch_thaw_secondary_cpus_begin();
|
|
|
|
for_each_cpu(cpu, frozen_cpus) {
|
|
trace_suspend_resume(TPS("CPU_ON"), cpu, true);
|
|
error = _cpu_up(cpu, 1, CPUHP_ONLINE);
|
|
trace_suspend_resume(TPS("CPU_ON"), cpu, false);
|
|
if (!error) {
|
|
pr_info("CPU%d is up\n", cpu);
|
|
cpu_device = get_cpu_device(cpu);
|
|
if (!cpu_device)
|
|
pr_err("%s: failed to get cpu%d device\n",
|
|
__func__, cpu);
|
|
else
|
|
kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
|
|
continue;
|
|
}
|
|
pr_warn("Error taking CPU%d up: %d\n", cpu, error);
|
|
}
|
|
|
|
arch_thaw_secondary_cpus_end();
|
|
|
|
cpumask_clear(frozen_cpus);
|
|
out:
|
|
cpu_maps_update_done();
|
|
}
|
|
|
|
static int __init alloc_frozen_cpus(void)
|
|
{
|
|
if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
|
|
return -ENOMEM;
|
|
return 0;
|
|
}
|
|
core_initcall(alloc_frozen_cpus);
|
|
|
|
/*
|
|
* When callbacks for CPU hotplug notifications are being executed, we must
|
|
* ensure that the state of the system with respect to the tasks being frozen
|
|
* or not, as reported by the notification, remains unchanged *throughout the
|
|
* duration* of the execution of the callbacks.
|
|
* Hence we need to prevent the freezer from racing with regular CPU hotplug.
|
|
*
|
|
* This synchronization is implemented by mutually excluding regular CPU
|
|
* hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
|
|
* Hibernate notifications.
|
|
*/
|
|
static int
|
|
cpu_hotplug_pm_callback(struct notifier_block *nb,
|
|
unsigned long action, void *ptr)
|
|
{
|
|
switch (action) {
|
|
|
|
case PM_SUSPEND_PREPARE:
|
|
case PM_HIBERNATION_PREPARE:
|
|
cpu_hotplug_disable();
|
|
break;
|
|
|
|
case PM_POST_SUSPEND:
|
|
case PM_POST_HIBERNATION:
|
|
cpu_hotplug_enable();
|
|
break;
|
|
|
|
default:
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
|
|
static int __init cpu_hotplug_pm_sync_init(void)
|
|
{
|
|
/*
|
|
* cpu_hotplug_pm_callback has higher priority than x86
|
|
* bsp_pm_callback which depends on cpu_hotplug_pm_callback
|
|
* to disable cpu hotplug to avoid cpu hotplug race.
|
|
*/
|
|
pm_notifier(cpu_hotplug_pm_callback, 0);
|
|
return 0;
|
|
}
|
|
core_initcall(cpu_hotplug_pm_sync_init);
|
|
|
|
#endif /* CONFIG_PM_SLEEP_SMP */
|
|
|
|
int __boot_cpu_id;
|
|
|
|
/* Horrific hacks because we can't add more to cpuhp_hp_states. */
|
|
static int random_and_perf_prepare_fusion(unsigned int cpu)
|
|
{
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
perf_event_init_cpu(cpu);
|
|
#endif
|
|
random_prepare_cpu(cpu);
|
|
return 0;
|
|
}
|
|
static int random_and_workqueue_online_fusion(unsigned int cpu)
|
|
{
|
|
workqueue_online_cpu(cpu);
|
|
random_online_cpu(cpu);
|
|
return 0;
|
|
}
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/* Boot processor state steps */
|
|
static struct cpuhp_step cpuhp_hp_states[] = {
|
|
[CPUHP_OFFLINE] = {
|
|
.name = "offline",
|
|
.startup.single = NULL,
|
|
.teardown.single = NULL,
|
|
},
|
|
#ifdef CONFIG_SMP
|
|
[CPUHP_CREATE_THREADS]= {
|
|
.name = "threads:prepare",
|
|
.startup.single = smpboot_create_threads,
|
|
.teardown.single = NULL,
|
|
.cant_stop = true,
|
|
},
|
|
[CPUHP_PERF_PREPARE] = {
|
|
.name = "perf:prepare",
|
|
.startup.single = random_and_perf_prepare_fusion,
|
|
.teardown.single = perf_event_exit_cpu,
|
|
},
|
|
[CPUHP_WORKQUEUE_PREP] = {
|
|
.name = "workqueue:prepare",
|
|
.startup.single = workqueue_prepare_cpu,
|
|
.teardown.single = NULL,
|
|
},
|
|
[CPUHP_HRTIMERS_PREPARE] = {
|
|
.name = "hrtimers:prepare",
|
|
.startup.single = hrtimers_prepare_cpu,
|
|
.teardown.single = hrtimers_dead_cpu,
|
|
},
|
|
[CPUHP_SMPCFD_PREPARE] = {
|
|
.name = "smpcfd:prepare",
|
|
.startup.single = smpcfd_prepare_cpu,
|
|
.teardown.single = smpcfd_dead_cpu,
|
|
},
|
|
[CPUHP_RELAY_PREPARE] = {
|
|
.name = "relay:prepare",
|
|
.startup.single = relay_prepare_cpu,
|
|
.teardown.single = NULL,
|
|
},
|
|
[CPUHP_SLAB_PREPARE] = {
|
|
.name = "slab:prepare",
|
|
.startup.single = slab_prepare_cpu,
|
|
.teardown.single = slab_dead_cpu,
|
|
},
|
|
[CPUHP_RCUTREE_PREP] = {
|
|
.name = "RCU/tree:prepare",
|
|
.startup.single = rcutree_prepare_cpu,
|
|
.teardown.single = rcutree_dead_cpu,
|
|
},
|
|
/*
|
|
* On the tear-down path, timers_dead_cpu() must be invoked
|
|
* before blk_mq_queue_reinit_notify() from notify_dead(),
|
|
* otherwise a RCU stall occurs.
|
|
*/
|
|
[CPUHP_TIMERS_PREPARE] = {
|
|
.name = "timers:prepare",
|
|
.startup.single = timers_prepare_cpu,
|
|
.teardown.single = timers_dead_cpu,
|
|
},
|
|
/* Kicks the plugged cpu into life */
|
|
[CPUHP_BRINGUP_CPU] = {
|
|
.name = "cpu:bringup",
|
|
.startup.single = bringup_cpu,
|
|
.teardown.single = finish_cpu,
|
|
.cant_stop = true,
|
|
},
|
|
/* Final state before CPU kills itself */
|
|
[CPUHP_AP_IDLE_DEAD] = {
|
|
.name = "idle:dead",
|
|
},
|
|
/*
|
|
* Last state before CPU enters the idle loop to die. Transient state
|
|
* for synchronization.
|
|
*/
|
|
[CPUHP_AP_OFFLINE] = {
|
|
.name = "ap:offline",
|
|
.cant_stop = true,
|
|
},
|
|
/* First state is scheduler control. Interrupts are disabled */
|
|
[CPUHP_AP_SCHED_STARTING] = {
|
|
.name = "sched:starting",
|
|
.startup.single = sched_cpu_starting,
|
|
.teardown.single = sched_cpu_dying,
|
|
},
|
|
[CPUHP_AP_RCUTREE_DYING] = {
|
|
.name = "RCU/tree:dying",
|
|
.startup.single = NULL,
|
|
.teardown.single = rcutree_dying_cpu,
|
|
},
|
|
[CPUHP_AP_SMPCFD_DYING] = {
|
|
.name = "smpcfd:dying",
|
|
.startup.single = NULL,
|
|
.teardown.single = smpcfd_dying_cpu,
|
|
},
|
|
/* Entry state on starting. Interrupts enabled from here on. Transient
|
|
* state for synchronsization */
|
|
[CPUHP_AP_ONLINE] = {
|
|
.name = "ap:online",
|
|
},
|
|
/*
|
|
* Handled on controll processor until the plugged processor manages
|
|
* this itself.
|
|
*/
|
|
[CPUHP_TEARDOWN_CPU] = {
|
|
.name = "cpu:teardown",
|
|
.startup.single = NULL,
|
|
.teardown.single = takedown_cpu,
|
|
.cant_stop = true,
|
|
},
|
|
/* Handle smpboot threads park/unpark */
|
|
[CPUHP_AP_SMPBOOT_THREADS] = {
|
|
.name = "smpboot/threads:online",
|
|
.startup.single = smpboot_unpark_threads,
|
|
.teardown.single = smpboot_park_threads,
|
|
},
|
|
[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
|
|
.name = "irq/affinity:online",
|
|
.startup.single = irq_affinity_online_cpu,
|
|
.teardown.single = NULL,
|
|
},
|
|
[CPUHP_AP_PERF_ONLINE] = {
|
|
.name = "perf:online",
|
|
.startup.single = perf_event_init_cpu,
|
|
.teardown.single = perf_event_exit_cpu,
|
|
},
|
|
[CPUHP_AP_WATCHDOG_ONLINE] = {
|
|
.name = "lockup_detector:online",
|
|
.startup.single = lockup_detector_online_cpu,
|
|
.teardown.single = lockup_detector_offline_cpu,
|
|
},
|
|
[CPUHP_AP_WORKQUEUE_ONLINE] = {
|
|
.name = "workqueue:online",
|
|
.startup.single = random_and_workqueue_online_fusion,
|
|
.teardown.single = workqueue_offline_cpu,
|
|
},
|
|
[CPUHP_AP_RCUTREE_ONLINE] = {
|
|
.name = "RCU/tree:online",
|
|
.startup.single = rcutree_online_cpu,
|
|
.teardown.single = rcutree_offline_cpu,
|
|
},
|
|
#endif
|
|
/*
|
|
* The dynamically registered state space is here
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
/* Last state is scheduler control setting the cpu active */
|
|
[CPUHP_AP_ACTIVE] = {
|
|
.name = "sched:active",
|
|
.startup.single = sched_cpu_activate,
|
|
.teardown.single = sched_cpu_deactivate,
|
|
},
|
|
#endif
|
|
|
|
/* CPU is fully up and running. */
|
|
[CPUHP_ONLINE] = {
|
|
.name = "online",
|
|
.startup.single = NULL,
|
|
.teardown.single = NULL,
|
|
},
|
|
};
|
|
|
|
/* Sanity check for callbacks */
|
|
static int cpuhp_cb_check(enum cpuhp_state state)
|
|
{
|
|
if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Returns a free for dynamic slot assignment of the Online state. The states
|
|
* are protected by the cpuhp_slot_states mutex and an empty slot is identified
|
|
* by having no name assigned.
|
|
*/
|
|
static int cpuhp_reserve_state(enum cpuhp_state state)
|
|
{
|
|
enum cpuhp_state i, end;
|
|
struct cpuhp_step *step;
|
|
|
|
switch (state) {
|
|
case CPUHP_AP_ONLINE_DYN:
|
|
step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
|
|
end = CPUHP_AP_ONLINE_DYN_END;
|
|
break;
|
|
case CPUHP_BP_PREPARE_DYN:
|
|
step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
|
|
end = CPUHP_BP_PREPARE_DYN_END;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
for (i = state; i <= end; i++, step++) {
|
|
if (!step->name)
|
|
return i;
|
|
}
|
|
WARN(1, "No more dynamic states available for CPU hotplug\n");
|
|
return -ENOSPC;
|
|
}
|
|
|
|
static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
|
|
int (*startup)(unsigned int cpu),
|
|
int (*teardown)(unsigned int cpu),
|
|
bool multi_instance)
|
|
{
|
|
/* (Un)Install the callbacks for further cpu hotplug operations */
|
|
struct cpuhp_step *sp;
|
|
int ret = 0;
|
|
|
|
/*
|
|
* If name is NULL, then the state gets removed.
|
|
*
|
|
* CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
|
|
* the first allocation from these dynamic ranges, so the removal
|
|
* would trigger a new allocation and clear the wrong (already
|
|
* empty) state, leaving the callbacks of the to be cleared state
|
|
* dangling, which causes wreckage on the next hotplug operation.
|
|
*/
|
|
if (name && (state == CPUHP_AP_ONLINE_DYN ||
|
|
state == CPUHP_BP_PREPARE_DYN)) {
|
|
ret = cpuhp_reserve_state(state);
|
|
if (ret < 0)
|
|
return ret;
|
|
state = ret;
|
|
}
|
|
sp = cpuhp_get_step(state);
|
|
if (name && sp->name)
|
|
return -EBUSY;
|
|
|
|
sp->startup.single = startup;
|
|
sp->teardown.single = teardown;
|
|
sp->name = name;
|
|
sp->multi_instance = multi_instance;
|
|
INIT_HLIST_HEAD(&sp->list);
|
|
return ret;
|
|
}
|
|
|
|
static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
|
|
{
|
|
return cpuhp_get_step(state)->teardown.single;
|
|
}
|
|
|
|
/*
|
|
* Call the startup/teardown function for a step either on the AP or
|
|
* on the current CPU.
|
|
*/
|
|
static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
|
|
struct hlist_node *node)
|
|
{
|
|
struct cpuhp_step *sp = cpuhp_get_step(state);
|
|
int ret;
|
|
|
|
/*
|
|
* If there's nothing to do, we done.
|
|
* Relies on the union for multi_instance.
|
|
*/
|
|
if ((bringup && !sp->startup.single) ||
|
|
(!bringup && !sp->teardown.single))
|
|
return 0;
|
|
/*
|
|
* The non AP bound callbacks can fail on bringup. On teardown
|
|
* e.g. module removal we crash for now.
|
|
*/
|
|
#ifdef CONFIG_SMP
|
|
if (cpuhp_is_ap_state(state))
|
|
ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
|
|
else
|
|
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
|
|
#else
|
|
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
|
|
#endif
|
|
BUG_ON(ret && !bringup);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Called from __cpuhp_setup_state on a recoverable failure.
|
|
*
|
|
* Note: The teardown callbacks for rollback are not allowed to fail!
|
|
*/
|
|
static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
|
|
struct hlist_node *node)
|
|
{
|
|
int cpu;
|
|
|
|
/* Roll back the already executed steps on the other cpus */
|
|
for_each_present_cpu(cpu) {
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
int cpustate = st->state;
|
|
|
|
if (cpu >= failedcpu)
|
|
break;
|
|
|
|
/* Did we invoke the startup call on that cpu ? */
|
|
if (cpustate >= state)
|
|
cpuhp_issue_call(cpu, state, false, node);
|
|
}
|
|
}
|
|
|
|
int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
|
|
struct hlist_node *node,
|
|
bool invoke)
|
|
{
|
|
struct cpuhp_step *sp;
|
|
int cpu;
|
|
int ret;
|
|
|
|
lockdep_assert_cpus_held();
|
|
|
|
sp = cpuhp_get_step(state);
|
|
if (sp->multi_instance == false)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&cpuhp_state_mutex);
|
|
|
|
if (!invoke || !sp->startup.multi)
|
|
goto add_node;
|
|
|
|
/*
|
|
* Try to call the startup callback for each present cpu
|
|
* depending on the hotplug state of the cpu.
|
|
*/
|
|
for_each_present_cpu(cpu) {
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
int cpustate = st->state;
|
|
|
|
if (cpustate < state)
|
|
continue;
|
|
|
|
ret = cpuhp_issue_call(cpu, state, true, node);
|
|
if (ret) {
|
|
if (sp->teardown.multi)
|
|
cpuhp_rollback_install(cpu, state, node);
|
|
goto unlock;
|
|
}
|
|
}
|
|
add_node:
|
|
ret = 0;
|
|
hlist_add_head(node, &sp->list);
|
|
unlock:
|
|
mutex_unlock(&cpuhp_state_mutex);
|
|
return ret;
|
|
}
|
|
|
|
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
|
|
bool invoke)
|
|
{
|
|
int ret;
|
|
|
|
cpus_read_lock();
|
|
ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
|
|
cpus_read_unlock();
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
|
|
|
|
/**
|
|
* __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
|
|
* @state: The state to setup
|
|
* @invoke: If true, the startup function is invoked for cpus where
|
|
* cpu state >= @state
|
|
* @startup: startup callback function
|
|
* @teardown: teardown callback function
|
|
* @multi_instance: State is set up for multiple instances which get
|
|
* added afterwards.
|
|
*
|
|
* The caller needs to hold cpus read locked while calling this function.
|
|
* Returns:
|
|
* On success:
|
|
* Positive state number if @state is CPUHP_AP_ONLINE_DYN
|
|
* 0 for all other states
|
|
* On failure: proper (negative) error code
|
|
*/
|
|
int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
|
|
const char *name, bool invoke,
|
|
int (*startup)(unsigned int cpu),
|
|
int (*teardown)(unsigned int cpu),
|
|
bool multi_instance)
|
|
{
|
|
int cpu, ret = 0;
|
|
bool dynstate;
|
|
|
|
lockdep_assert_cpus_held();
|
|
|
|
if (cpuhp_cb_check(state) || !name)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&cpuhp_state_mutex);
|
|
|
|
ret = cpuhp_store_callbacks(state, name, startup, teardown,
|
|
multi_instance);
|
|
|
|
dynstate = state == CPUHP_AP_ONLINE_DYN;
|
|
if (ret > 0 && dynstate) {
|
|
state = ret;
|
|
ret = 0;
|
|
}
|
|
|
|
if (ret || !invoke || !startup)
|
|
goto out;
|
|
|
|
/*
|
|
* Try to call the startup callback for each present cpu
|
|
* depending on the hotplug state of the cpu.
|
|
*/
|
|
for_each_present_cpu(cpu) {
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
int cpustate = st->state;
|
|
|
|
if (cpustate < state)
|
|
continue;
|
|
|
|
ret = cpuhp_issue_call(cpu, state, true, NULL);
|
|
if (ret) {
|
|
if (teardown)
|
|
cpuhp_rollback_install(cpu, state, NULL);
|
|
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
|
|
goto out;
|
|
}
|
|
}
|
|
out:
|
|
mutex_unlock(&cpuhp_state_mutex);
|
|
/*
|
|
* If the requested state is CPUHP_AP_ONLINE_DYN, return the
|
|
* dynamically allocated state in case of success.
|
|
*/
|
|
if (!ret && dynstate)
|
|
return state;
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
|
|
|
|
int __cpuhp_setup_state(enum cpuhp_state state,
|
|
const char *name, bool invoke,
|
|
int (*startup)(unsigned int cpu),
|
|
int (*teardown)(unsigned int cpu),
|
|
bool multi_instance)
|
|
{
|
|
int ret;
|
|
|
|
cpus_read_lock();
|
|
ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
|
|
teardown, multi_instance);
|
|
cpus_read_unlock();
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(__cpuhp_setup_state);
|
|
|
|
int __cpuhp_state_remove_instance(enum cpuhp_state state,
|
|
struct hlist_node *node, bool invoke)
|
|
{
|
|
struct cpuhp_step *sp = cpuhp_get_step(state);
|
|
int cpu;
|
|
|
|
BUG_ON(cpuhp_cb_check(state));
|
|
|
|
if (!sp->multi_instance)
|
|
return -EINVAL;
|
|
|
|
cpus_read_lock();
|
|
mutex_lock(&cpuhp_state_mutex);
|
|
|
|
if (!invoke || !cpuhp_get_teardown_cb(state))
|
|
goto remove;
|
|
/*
|
|
* Call the teardown callback for each present cpu depending
|
|
* on the hotplug state of the cpu. This function is not
|
|
* allowed to fail currently!
|
|
*/
|
|
for_each_present_cpu(cpu) {
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
int cpustate = st->state;
|
|
|
|
if (cpustate >= state)
|
|
cpuhp_issue_call(cpu, state, false, node);
|
|
}
|
|
|
|
remove:
|
|
hlist_del(node);
|
|
mutex_unlock(&cpuhp_state_mutex);
|
|
cpus_read_unlock();
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
|
|
|
|
/**
|
|
* __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
|
|
* @state: The state to remove
|
|
* @invoke: If true, the teardown function is invoked for cpus where
|
|
* cpu state >= @state
|
|
*
|
|
* The caller needs to hold cpus read locked while calling this function.
|
|
* The teardown callback is currently not allowed to fail. Think
|
|
* about module removal!
|
|
*/
|
|
void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
|
|
{
|
|
struct cpuhp_step *sp = cpuhp_get_step(state);
|
|
int cpu;
|
|
|
|
BUG_ON(cpuhp_cb_check(state));
|
|
|
|
lockdep_assert_cpus_held();
|
|
|
|
mutex_lock(&cpuhp_state_mutex);
|
|
if (sp->multi_instance) {
|
|
WARN(!hlist_empty(&sp->list),
|
|
"Error: Removing state %d which has instances left.\n",
|
|
state);
|
|
goto remove;
|
|
}
|
|
|
|
if (!invoke || !cpuhp_get_teardown_cb(state))
|
|
goto remove;
|
|
|
|
/*
|
|
* Call the teardown callback for each present cpu depending
|
|
* on the hotplug state of the cpu. This function is not
|
|
* allowed to fail currently!
|
|
*/
|
|
for_each_present_cpu(cpu) {
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
|
int cpustate = st->state;
|
|
|
|
if (cpustate >= state)
|
|
cpuhp_issue_call(cpu, state, false, NULL);
|
|
}
|
|
remove:
|
|
cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
|
|
mutex_unlock(&cpuhp_state_mutex);
|
|
}
|
|
EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
|
|
|
|
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
|
|
{
|
|
cpus_read_lock();
|
|
__cpuhp_remove_state_cpuslocked(state, invoke);
|
|
cpus_read_unlock();
|
|
}
|
|
EXPORT_SYMBOL(__cpuhp_remove_state);
|
|
|
|
#ifdef CONFIG_HOTPLUG_SMT
|
|
static void cpuhp_offline_cpu_device(unsigned int cpu)
|
|
{
|
|
struct device *dev = get_cpu_device(cpu);
|
|
|
|
dev->offline = true;
|
|
/* Tell user space about the state change */
|
|
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
|
|
}
|
|
|
|
static void cpuhp_online_cpu_device(unsigned int cpu)
|
|
{
|
|
struct device *dev = get_cpu_device(cpu);
|
|
|
|
dev->offline = false;
|
|
/* Tell user space about the state change */
|
|
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
|
|
}
|
|
|
|
int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
|
{
|
|
int cpu, ret = 0;
|
|
|
|
cpu_maps_update_begin();
|
|
for_each_online_cpu(cpu) {
|
|
if (topology_is_primary_thread(cpu))
|
|
continue;
|
|
ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
|
|
if (ret)
|
|
break;
|
|
/*
|
|
* As this needs to hold the cpu maps lock it's impossible
|
|
* to call device_offline() because that ends up calling
|
|
* cpu_down() which takes cpu maps lock. cpu maps lock
|
|
* needs to be held as this might race against in kernel
|
|
* abusers of the hotplug machinery (thermal management).
|
|
*
|
|
* So nothing would update device:offline state. That would
|
|
* leave the sysfs entry stale and prevent onlining after
|
|
* smt control has been changed to 'off' again. This is
|
|
* called under the sysfs hotplug lock, so it is properly
|
|
* serialized against the regular offline usage.
|
|
*/
|
|
cpuhp_offline_cpu_device(cpu);
|
|
}
|
|
if (!ret)
|
|
cpu_smt_control = ctrlval;
|
|
cpu_maps_update_done();
|
|
return ret;
|
|
}
|
|
|
|
int cpuhp_smt_enable(void)
|
|
{
|
|
int cpu, ret = 0;
|
|
|
|
cpu_maps_update_begin();
|
|
cpu_smt_control = CPU_SMT_ENABLED;
|
|
for_each_present_cpu(cpu) {
|
|
/* Skip online CPUs and CPUs on offline nodes */
|
|
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
|
|
continue;
|
|
ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
|
|
if (ret)
|
|
break;
|
|
/* See comment in cpuhp_smt_disable() */
|
|
cpuhp_online_cpu_device(cpu);
|
|
}
|
|
cpu_maps_update_done();
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
|
|
static ssize_t show_cpuhp_state(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
|
|
|
|
return sprintf(buf, "%d\n", st->state);
|
|
}
|
|
static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
|
|
|
|
static ssize_t write_cpuhp_target(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
|
|
struct cpuhp_step *sp;
|
|
int target, ret;
|
|
|
|
ret = kstrtoint(buf, 10, &target);
|
|
if (ret)
|
|
return ret;
|
|
|
|
#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
|
|
if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
|
|
return -EINVAL;
|
|
#else
|
|
if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
|
|
return -EINVAL;
|
|
#endif
|
|
|
|
ret = lock_device_hotplug_sysfs();
|
|
if (ret)
|
|
return ret;
|
|
|
|
mutex_lock(&cpuhp_state_mutex);
|
|
sp = cpuhp_get_step(target);
|
|
ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
|
|
mutex_unlock(&cpuhp_state_mutex);
|
|
if (ret)
|
|
goto out;
|
|
|
|
if (st->state < target)
|
|
ret = cpu_up(dev->id, target);
|
|
else if (st->state > target)
|
|
ret = cpu_down(dev->id, target);
|
|
else if (WARN_ON(st->target != target))
|
|
st->target = target;
|
|
out:
|
|
unlock_device_hotplug();
|
|
return ret ? ret : count;
|
|
}
|
|
|
|
static ssize_t show_cpuhp_target(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
|
|
|
|
return sprintf(buf, "%d\n", st->target);
|
|
}
|
|
static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
|
|
|
|
|
|
static ssize_t write_cpuhp_fail(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
|
|
struct cpuhp_step *sp;
|
|
int fail, ret;
|
|
|
|
ret = kstrtoint(buf, 10, &fail);
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Cannot fail STARTING/DYING callbacks.
|
|
*/
|
|
if (cpuhp_is_atomic_state(fail))
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Cannot fail anything that doesn't have callbacks.
|
|
*/
|
|
mutex_lock(&cpuhp_state_mutex);
|
|
sp = cpuhp_get_step(fail);
|
|
if (!sp->startup.single && !sp->teardown.single)
|
|
ret = -EINVAL;
|
|
mutex_unlock(&cpuhp_state_mutex);
|
|
if (ret)
|
|
return ret;
|
|
|
|
st->fail = fail;
|
|
|
|
return count;
|
|
}
|
|
|
|
static ssize_t show_cpuhp_fail(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
|
|
|
|
return sprintf(buf, "%d\n", st->fail);
|
|
}
|
|
|
|
static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
|
|
|
|
static struct attribute *cpuhp_cpu_attrs[] = {
|
|
&dev_attr_state.attr,
|
|
&dev_attr_target.attr,
|
|
&dev_attr_fail.attr,
|
|
NULL
|
|
};
|
|
|
|
static const struct attribute_group cpuhp_cpu_attr_group = {
|
|
.attrs = cpuhp_cpu_attrs,
|
|
.name = "hotplug",
|
|
NULL
|
|
};
|
|
|
|
static ssize_t show_cpuhp_states(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
ssize_t cur, res = 0;
|
|
int i;
|
|
|
|
mutex_lock(&cpuhp_state_mutex);
|
|
for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
|
|
struct cpuhp_step *sp = cpuhp_get_step(i);
|
|
|
|
if (sp->name) {
|
|
cur = sprintf(buf, "%3d: %s\n", i, sp->name);
|
|
buf += cur;
|
|
res += cur;
|
|
}
|
|
}
|
|
mutex_unlock(&cpuhp_state_mutex);
|
|
return res;
|
|
}
|
|
static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
|
|
|
|
static struct attribute *cpuhp_cpu_root_attrs[] = {
|
|
&dev_attr_states.attr,
|
|
NULL
|
|
};
|
|
|
|
static const struct attribute_group cpuhp_cpu_root_attr_group = {
|
|
.attrs = cpuhp_cpu_root_attrs,
|
|
.name = "hotplug",
|
|
NULL
|
|
};
|
|
|
|
#ifdef CONFIG_HOTPLUG_SMT
|
|
|
|
static ssize_t
|
|
__store_smt_control(struct device *dev, struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
int ctrlval, ret;
|
|
|
|
if (sysfs_streq(buf, "on"))
|
|
ctrlval = CPU_SMT_ENABLED;
|
|
else if (sysfs_streq(buf, "off"))
|
|
ctrlval = CPU_SMT_DISABLED;
|
|
else if (sysfs_streq(buf, "forceoff"))
|
|
ctrlval = CPU_SMT_FORCE_DISABLED;
|
|
else
|
|
return -EINVAL;
|
|
|
|
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
|
|
return -EPERM;
|
|
|
|
if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
|
|
return -ENODEV;
|
|
|
|
ret = lock_device_hotplug_sysfs();
|
|
if (ret)
|
|
return ret;
|
|
|
|
if (ctrlval != cpu_smt_control) {
|
|
switch (ctrlval) {
|
|
case CPU_SMT_ENABLED:
|
|
ret = cpuhp_smt_enable();
|
|
break;
|
|
case CPU_SMT_DISABLED:
|
|
case CPU_SMT_FORCE_DISABLED:
|
|
ret = cpuhp_smt_disable(ctrlval);
|
|
break;
|
|
}
|
|
}
|
|
|
|
unlock_device_hotplug();
|
|
return ret ? ret : count;
|
|
}
|
|
|
|
#else /* !CONFIG_HOTPLUG_SMT */
|
|
static ssize_t
|
|
__store_smt_control(struct device *dev, struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
return -ENODEV;
|
|
}
|
|
#endif /* CONFIG_HOTPLUG_SMT */
|
|
|
|
static const char *smt_states[] = {
|
|
[CPU_SMT_ENABLED] = "on",
|
|
[CPU_SMT_DISABLED] = "off",
|
|
[CPU_SMT_FORCE_DISABLED] = "forceoff",
|
|
[CPU_SMT_NOT_SUPPORTED] = "notsupported",
|
|
[CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
|
|
};
|
|
|
|
static ssize_t
|
|
show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
|
|
{
|
|
const char *state = smt_states[cpu_smt_control];
|
|
|
|
return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
|
|
}
|
|
|
|
static ssize_t
|
|
store_smt_control(struct device *dev, struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
return __store_smt_control(dev, attr, buf, count);
|
|
}
|
|
static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
|
|
|
|
static ssize_t
|
|
show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
|
|
{
|
|
return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
|
|
}
|
|
static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
|
|
|
|
static struct attribute *cpuhp_smt_attrs[] = {
|
|
&dev_attr_control.attr,
|
|
&dev_attr_active.attr,
|
|
NULL
|
|
};
|
|
|
|
static const struct attribute_group cpuhp_smt_attr_group = {
|
|
.attrs = cpuhp_smt_attrs,
|
|
.name = "smt",
|
|
NULL
|
|
};
|
|
|
|
static int __init cpu_smt_sysfs_init(void)
|
|
{
|
|
return sysfs_create_group(&cpu_subsys.dev_root->kobj,
|
|
&cpuhp_smt_attr_group);
|
|
}
|
|
|
|
static int __init cpuhp_sysfs_init(void)
|
|
{
|
|
int cpu, ret;
|
|
|
|
ret = cpu_smt_sysfs_init();
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
|
|
&cpuhp_cpu_root_attr_group);
|
|
if (ret)
|
|
return ret;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
struct device *dev = get_cpu_device(cpu);
|
|
|
|
if (!dev)
|
|
continue;
|
|
ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
return 0;
|
|
}
|
|
device_initcall(cpuhp_sysfs_init);
|
|
#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
|
|
|
|
/*
|
|
* cpu_bit_bitmap[] is a special, "compressed" data structure that
|
|
* represents all NR_CPUS bits binary values of 1<<nr.
|
|
*
|
|
* It is used by cpumask_of() to get a constant address to a CPU
|
|
* mask value that has a single bit set only.
|
|
*/
|
|
|
|
/* cpu_bit_bitmap[0] is empty - so we can back into it */
|
|
#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
|
|
#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
|
|
#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
|
|
#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
|
|
|
|
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
|
|
|
|
MASK_DECLARE_8(0), MASK_DECLARE_8(8),
|
|
MASK_DECLARE_8(16), MASK_DECLARE_8(24),
|
|
#if BITS_PER_LONG > 32
|
|
MASK_DECLARE_8(32), MASK_DECLARE_8(40),
|
|
MASK_DECLARE_8(48), MASK_DECLARE_8(56),
|
|
#endif
|
|
};
|
|
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
|
|
|
|
const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
|
|
EXPORT_SYMBOL(cpu_all_bits);
|
|
|
|
#ifdef CONFIG_INIT_ALL_POSSIBLE
|
|
struct cpumask __cpu_possible_mask __read_mostly
|
|
= {CPU_BITS_ALL};
|
|
#else
|
|
struct cpumask __cpu_possible_mask __read_mostly;
|
|
#endif
|
|
EXPORT_SYMBOL(__cpu_possible_mask);
|
|
|
|
struct cpumask __cpu_online_mask __read_mostly;
|
|
EXPORT_SYMBOL(__cpu_online_mask);
|
|
|
|
struct cpumask __cpu_present_mask __read_mostly;
|
|
EXPORT_SYMBOL(__cpu_present_mask);
|
|
|
|
struct cpumask __cpu_active_mask __read_mostly;
|
|
EXPORT_SYMBOL(__cpu_active_mask);
|
|
|
|
atomic_t __num_online_cpus __read_mostly;
|
|
EXPORT_SYMBOL(__num_online_cpus);
|
|
|
|
void init_cpu_present(const struct cpumask *src)
|
|
{
|
|
cpumask_copy(&__cpu_present_mask, src);
|
|
}
|
|
|
|
void init_cpu_possible(const struct cpumask *src)
|
|
{
|
|
cpumask_copy(&__cpu_possible_mask, src);
|
|
}
|
|
|
|
void init_cpu_online(const struct cpumask *src)
|
|
{
|
|
cpumask_copy(&__cpu_online_mask, src);
|
|
}
|
|
|
|
void set_cpu_online(unsigned int cpu, bool online)
|
|
{
|
|
/*
|
|
* atomic_inc/dec() is required to handle the horrid abuse of this
|
|
* function by the reboot and kexec code which invoke it from
|
|
* IPI/NMI broadcasts when shutting down CPUs. Invocation from
|
|
* regular CPU hotplug is properly serialized.
|
|
*
|
|
* Note, that the fact that __num_online_cpus is of type atomic_t
|
|
* does not protect readers which are not serialized against
|
|
* concurrent hotplug operations.
|
|
*/
|
|
if (online) {
|
|
if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
|
|
atomic_inc(&__num_online_cpus);
|
|
} else {
|
|
if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
|
|
atomic_dec(&__num_online_cpus);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Activate the first processor.
|
|
*/
|
|
void __init boot_cpu_init(void)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
|
|
/* Mark the boot cpu "present", "online" etc for SMP and UP case */
|
|
set_cpu_online(cpu, true);
|
|
set_cpu_active(cpu, true);
|
|
set_cpu_present(cpu, true);
|
|
set_cpu_possible(cpu, true);
|
|
|
|
#ifdef CONFIG_SMP
|
|
__boot_cpu_id = cpu;
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Must be called _AFTER_ setting up the per_cpu areas
|
|
*/
|
|
void __init boot_cpu_hotplug_init(void)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
|
|
#endif
|
|
this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
|
|
}
|
|
|
|
/*
|
|
* These are used for a global "mitigations=" cmdline option for toggling
|
|
* optional CPU mitigations.
|
|
*/
|
|
enum cpu_mitigations {
|
|
CPU_MITIGATIONS_OFF,
|
|
CPU_MITIGATIONS_AUTO,
|
|
CPU_MITIGATIONS_AUTO_NOSMT,
|
|
};
|
|
|
|
static enum cpu_mitigations cpu_mitigations __ro_after_init =
|
|
CPU_MITIGATIONS_AUTO;
|
|
|
|
static int __init mitigations_parse_cmdline(char *arg)
|
|
{
|
|
if (!strcmp(arg, "off"))
|
|
cpu_mitigations = CPU_MITIGATIONS_OFF;
|
|
else if (!strcmp(arg, "auto"))
|
|
cpu_mitigations = CPU_MITIGATIONS_AUTO;
|
|
else if (!strcmp(arg, "auto,nosmt"))
|
|
cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
|
|
else
|
|
pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
|
|
arg);
|
|
|
|
return 0;
|
|
}
|
|
early_param("mitigations", mitigations_parse_cmdline);
|
|
|
|
/* mitigations=off */
|
|
bool cpu_mitigations_off(void)
|
|
{
|
|
return cpu_mitigations == CPU_MITIGATIONS_OFF;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpu_mitigations_off);
|
|
|
|
/* mitigations=auto,nosmt */
|
|
bool cpu_mitigations_auto_nosmt(void)
|
|
{
|
|
return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
|
|
}
|
|
EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
|