Files
android_kernel_xiaomi_sm8450/kernel/sched/rt.c
Greg Kroah-Hartman 4ec3c2eea5 Merge 5.10.94 into android12-5.10-lts
Changes in 5.10.94
	KVM: VMX: switch blocked_vcpu_on_cpu_lock to raw spinlock
	HID: uhid: Fix worker destroying device without any protection
	HID: wacom: Reset expected and received contact counts at the same time
	HID: wacom: Ignore the confidence flag when a touch is removed
	HID: wacom: Avoid using stale array indicies to read contact count
	f2fs: fix to do sanity check in is_alive()
	nfc: llcp: fix NULL error pointer dereference on sendmsg() after failed bind()
	mtd: rawnand: gpmi: Add ERR007117 protection for nfc_apply_timings
	mtd: rawnand: gpmi: Remove explicit default gpmi clock setting for i.MX6
	mtd: Fixed breaking list in __mtd_del_partition.
	mtd: rawnand: davinci: Don't calculate ECC when reading page
	mtd: rawnand: davinci: Avoid duplicated page read
	mtd: rawnand: davinci: Rewrite function description
	x86/gpu: Reserve stolen memory for first integrated Intel GPU
	tools/nolibc: x86-64: Fix startup code bug
	tools/nolibc: i386: fix initial stack alignment
	tools/nolibc: fix incorrect truncation of exit code
	rtc: cmos: take rtc_lock while reading from CMOS
	media: v4l2-ioctl.c: readbuffers depends on V4L2_CAP_READWRITE
	media: flexcop-usb: fix control-message timeouts
	media: mceusb: fix control-message timeouts
	media: em28xx: fix control-message timeouts
	media: cpia2: fix control-message timeouts
	media: s2255: fix control-message timeouts
	media: dib0700: fix undefined behavior in tuner shutdown
	media: redrat3: fix control-message timeouts
	media: pvrusb2: fix control-message timeouts
	media: stk1160: fix control-message timeouts
	media: cec-pin: fix interrupt en/disable handling
	can: softing_cs: softingcs_probe(): fix memleak on registration failure
	iio: adc: ti-adc081c: Partial revert of removal of ACPI IDs
	lkdtm: Fix content of section containing lkdtm_rodata_do_nothing()
	iommu/io-pgtable-arm-v7s: Add error handle for page table allocation failure
	gpu: host1x: Add back arm_iommu_detach_device()
	dma_fence_array: Fix PENDING_ERROR leak in dma_fence_array_signaled()
	PCI: Add function 1 DMA alias quirk for Marvell 88SE9125 SATA controller
	mm_zone: add function to check if managed dma zone exists
	dma/pool: create dma atomic pool only if dma zone has managed pages
	mm/page_alloc.c: do not warn allocation failure on zone DMA if no managed pages
	shmem: fix a race between shmem_unused_huge_shrink and shmem_evict_inode
	drm/ttm: Put BO in its memory manager's lru list
	Bluetooth: L2CAP: Fix not initializing sk_peer_pid
	drm/bridge: display-connector: fix an uninitialized pointer in probe()
	drm: fix null-ptr-deref in drm_dev_init_release()
	drm/panel: kingdisplay-kd097d04: Delete panel on attach() failure
	drm/panel: innolux-p079zca: Delete panel on attach() failure
	drm/rockchip: dsi: Fix unbalanced clock on probe error
	drm/rockchip: dsi: Hold pm-runtime across bind/unbind
	drm/rockchip: dsi: Disable PLL clock on bind error
	drm/rockchip: dsi: Reconfigure hardware on resume()
	Bluetooth: cmtp: fix possible panic when cmtp_init_sockets() fails
	clk: bcm-2835: Pick the closest clock rate
	clk: bcm-2835: Remove rounding up the dividers
	drm/vc4: hdmi: Set a default HSM rate
	wcn36xx: ensure pairing of init_scan/finish_scan and start_scan/end_scan
	wcn36xx: Indicate beacon not connection loss on MISSED_BEACON_IND
	wcn36xx: Fix DMA channel enable/disable cycle
	wcn36xx: Release DMA channel descriptor allocations
	wcn36xx: Put DXE block into reset before freeing memory
	wcn36xx: populate band before determining rate on RX
	wcn36xx: fix RX BD rate mapping for 5GHz legacy rates
	ath11k: Send PPDU_STATS_CFG with proper pdev mask to firmware
	mtd: hyperbus: rpc-if: Check return value of rpcif_sw_init()
	media: videobuf2: Fix the size printk format
	media: atomisp: add missing media_device_cleanup() in atomisp_unregister_entities()
	media: atomisp: fix punit_ddr_dvfs_enable() argument for mrfld_power up case
	media: atomisp: fix inverted logic in buffers_needed()
	media: atomisp: do not use err var when checking port validity for ISP2400
	media: atomisp: fix inverted error check for ia_css_mipi_is_source_port_valid()
	media: atomisp: fix ifdefs in sh_css.c
	media: staging: media: atomisp: pci: Balance braces around conditional statements in file atomisp_cmd.c
	media: atomisp: add NULL check for asd obtained from atomisp_video_pipe
	media: atomisp: fix enum formats logic
	media: atomisp: fix uninitialized bug in gmin_get_pmic_id_and_addr()
	media: aspeed: fix mode-detect always time out at 2nd run
	media: em28xx: fix memory leak in em28xx_init_dev
	media: aspeed: Update signal status immediately to ensure sane hw state
	arm64: dts: amlogic: meson-g12: Fix GPU operating point table node name
	arm64: dts: amlogic: Fix SPI NOR flash node name for ODROID N2/N2+
	arm64: dts: meson-gxbb-wetek: fix HDMI in early boot
	arm64: dts: meson-gxbb-wetek: fix missing GPIO binding
	fs: dlm: use sk->sk_socket instead of con->sock
	fs: dlm: don't call kernel_getpeername() in error_report()
	memory: renesas-rpc-if: Return error in case devm_ioremap_resource() fails
	Bluetooth: stop proccessing malicious adv data
	ath11k: Fix ETSI regd with weather radar overlap
	ath11k: clear the keys properly via DISABLE_KEY
	ath11k: reset RSN/WPA present state for open BSS
	tee: fix put order in teedev_close_context()
	fs: dlm: fix build with CONFIG_IPV6 disabled
	drm/vboxvideo: fix a NULL vs IS_ERR() check
	arm64: dts: renesas: cat875: Add rx/tx delays
	media: dmxdev: fix UAF when dvb_register_device() fails
	crypto: qce - fix uaf on qce_ahash_register_one
	crypto: qce - fix uaf on qce_skcipher_register_one
	mtd: hyperbus: rpc-if: fix bug in rpcif_hb_remove
	ARM: dts: stm32: fix dtbs_check warning on ili9341 dts binding on stm32f429 disco
	crypto: qat - fix spelling mistake: "messge" -> "message"
	crypto: qat - remove unnecessary collision prevention step in PFVF
	crypto: qat - make pfvf send message direction agnostic
	crypto: qat - fix undetected PFVF timeout in ACK loop
	ath11k: Use host CE parameters for CE interrupts configuration
	arm64: dts: ti: k3-j721e: correct cache-sets info
	tty: serial: atmel: Check return code of dmaengine_submit()
	tty: serial: atmel: Call dma_async_issue_pending()
	mfd: atmel-flexcom: Remove #ifdef CONFIG_PM_SLEEP
	mfd: atmel-flexcom: Use .resume_noirq
	media: rcar-csi2: Correct the selection of hsfreqrange
	media: imx-pxp: Initialize the spinlock prior to using it
	media: si470x-i2c: fix possible memory leak in si470x_i2c_probe()
	media: mtk-vcodec: call v4l2_m2m_ctx_release first when file is released
	media: coda: fix CODA960 JPEG encoder buffer overflow
	media: venus: pm_helpers: Control core power domain manually
	media: venus: core, venc, vdec: Fix probe dependency error
	media: venus: core: Fix a potential NULL pointer dereference in an error handling path
	media: venus: core: Fix a resource leak in the error handling path of 'venus_probe()'
	thermal/drivers/imx: Implement runtime PM support
	netfilter: bridge: add support for pppoe filtering
	arm64: dts: qcom: msm8916: fix MMC controller aliases
	cgroup: Trace event cgroup id fields should be u64
	ACPI: EC: Rework flushing of EC work while suspended to idle
	thermal/drivers/imx8mm: Enable ADC when enabling monitor
	drm/amdgpu: Fix a NULL pointer dereference in amdgpu_connector_lcd_native_mode()
	drm/radeon/radeon_kms: Fix a NULL pointer dereference in radeon_driver_open_kms()
	arm64: dts: ti: k3-j7200: Fix the L2 cache sets
	arm64: dts: ti: k3-j721e: Fix the L2 cache sets
	arm64: dts: ti: k3-j7200: Correct the d-cache-sets info
	tty: serial: uartlite: allow 64 bit address
	serial: amba-pl011: do not request memory region twice
	floppy: Fix hang in watchdog when disk is ejected
	staging: rtl8192e: return error code from rtllib_softmac_init()
	staging: rtl8192e: rtllib_module: fix error handle case in alloc_rtllib()
	Bluetooth: btmtksdio: fix resume failure
	sched/fair: Fix detection of per-CPU kthreads waking a task
	sched/fair: Fix per-CPU kthread and wakee stacking for asym CPU capacity
	bpf: Adjust BTF log size limit.
	bpf: Disallow BPF_LOG_KERNEL log level for bpf(BPF_BTF_LOAD)
	bpf: Remove config check to enable bpf support for branch records
	arm64: lib: Annotate {clear, copy}_page() as position-independent
	arm64: clear_page() shouldn't use DC ZVA when DCZID_EL0.DZP == 1
	media: dib8000: Fix a memleak in dib8000_init()
	media: saa7146: mxb: Fix a NULL pointer dereference in mxb_attach()
	media: si2157: Fix "warm" tuner state detection
	wireless: iwlwifi: Fix a double free in iwl_txq_dyn_alloc_dma
	sched/rt: Try to restart rt period timer when rt runtime exceeded
	drm/msm/dp: displayPort driver need algorithm rational
	rcu/exp: Mark current CPU as exp-QS in IPI loop second pass
	mwifiex: Fix possible ABBA deadlock
	xfrm: fix a small bug in xfrm_sa_len()
	x86/uaccess: Move variable into switch case statement
	selftests: clone3: clone3: add case CLONE3_ARGS_NO_TEST
	selftests: harness: avoid false negatives if test has no ASSERTs
	crypto: stm32 - Fix last sparse warning in stm32_cryp_check_ctr_counter
	crypto: stm32/cryp - fix CTR counter carry
	crypto: stm32/cryp - fix xts and race condition in crypto_engine requests
	crypto: stm32/cryp - check early input data
	crypto: stm32/cryp - fix double pm exit
	crypto: stm32/cryp - fix lrw chaining mode
	crypto: stm32/cryp - fix bugs and crash in tests
	crypto: stm32 - Revert broken pm_runtime_resume_and_get changes
	ath11k: Fix deleting uninitialized kernel timer during fragment cache flush
	ARM: dts: gemini: NAS4220-B: fis-index-block with 128 KiB sectors
	media: dw2102: Fix use after free
	media: msi001: fix possible null-ptr-deref in msi001_probe()
	media: coda/imx-vdoa: Handle dma_set_coherent_mask error codes
	ath11k: Fix a NULL pointer dereference in ath11k_mac_op_hw_scan()
	arm64: dts: qcom: c630: Fix soundcard setup
	arm64: dts: qcom: ipq6018: Fix gpio-ranges property
	drm/msm/dpu: fix safe status debugfs file
	drm/bridge: ti-sn65dsi86: Set max register for regmap
	drm/tegra: vic: Fix DMA API misuse
	media: hantro: Fix probe func error path
	xfrm: interface with if_id 0 should return error
	xfrm: state and policy should fail if XFRMA_IF_ID 0
	ARM: 9159/1: decompressor: Avoid UNPREDICTABLE NOP encoding
	usb: ftdi-elan: fix memory leak on device disconnect
	arm64: dts: marvell: cn9130: add GPIO and SPI aliases
	arm64: dts: marvell: cn9130: enable CP0 GPIO controllers
	ARM: dts: armada-38x: Add generic compatible to UART nodes
	iwlwifi: mvm: fix 32-bit build in FTM
	iwlwifi: mvm: test roc running status bits before removing the sta
	mmc: meson-mx-sdhc: add IRQ check
	mmc: meson-mx-sdio: add IRQ check
	selinux: fix potential memleak in selinux_add_opt()
	um: fix ndelay/udelay defines
	um: virtio_uml: Fix time-travel external time propagation
	Bluetooth: L2CAP: Fix using wrong mode
	bpftool: Enable line buffering for stdout
	backlight: qcom-wled: Validate enabled string indices in DT
	backlight: qcom-wled: Pass number of elements to read to read_u32_array
	backlight: qcom-wled: Fix off-by-one maximum with default num_strings
	backlight: qcom-wled: Override default length with qcom,enabled-strings
	backlight: qcom-wled: Use cpu_to_le16 macro to perform conversion
	backlight: qcom-wled: Respect enabled-strings in set_brightness
	software node: fix wrong node passed to find nargs_prop
	Bluetooth: hci_qca: Stop IBS timer during BT OFF
	x86/boot/compressed: Move CLANG_FLAGS to beginning of KBUILD_CFLAGS
	hwmon: (mr75203) fix wrong power-up delay value
	x86/mce/inject: Avoid out-of-bounds write when setting flags
	ACPI: scan: Create platform device for BCM4752 and LNV4752 ACPI nodes
	pcmcia: rsrc_nonstatic: Fix a NULL pointer dereference in __nonstatic_find_io_region()
	pcmcia: rsrc_nonstatic: Fix a NULL pointer dereference in nonstatic_find_mem_region()
	power: reset: mt6397: Check for null res pointer
	netfilter: ipt_CLUSTERIP: fix refcount leak in clusterip_tg_check()
	bpf: Don't promote bogus looking registers after null check.
	bpf: Fix SO_RCVBUF/SO_SNDBUF handling in _bpf_setsockopt().
	netfilter: nft_set_pipapo: allocate pcpu scratch maps on clone
	ppp: ensure minimum packet size in ppp_write()
	rocker: fix a sleeping in atomic bug
	staging: greybus: audio: Check null pointer
	fsl/fman: Check for null pointer after calling devm_ioremap
	Bluetooth: hci_bcm: Check for error irq
	Bluetooth: hci_qca: Fix NULL vs IS_ERR_OR_NULL check in qca_serdev_probe
	usb: dwc3: qcom: Fix NULL vs IS_ERR checking in dwc3_qcom_probe
	HID: hid-uclogic-params: Invalid parameter check in uclogic_params_init
	HID: hid-uclogic-params: Invalid parameter check in uclogic_params_get_str_desc
	HID: hid-uclogic-params: Invalid parameter check in uclogic_params_huion_init
	HID: hid-uclogic-params: Invalid parameter check in uclogic_params_frame_init_v1_buttonpad
	debugfs: lockdown: Allow reading debugfs files that are not world readable
	net/mlx5e: Fix page DMA map/unmap attributes
	net/mlx5e: Don't block routes with nexthop objects in SW
	Revert "net/mlx5e: Block offload of outer header csum for UDP tunnels"
	net/mlx5: Set command entry semaphore up once got index free
	lib/mpi: Add the return value check of kcalloc()
	Bluetooth: L2CAP: uninitialized variables in l2cap_sock_setsockopt()
	spi: spi-meson-spifc: Add missing pm_runtime_disable() in meson_spifc_probe
	ax25: uninitialized variable in ax25_setsockopt()
	netrom: fix api breakage in nr_setsockopt()
	regmap: Call regmap_debugfs_exit() prior to _init()
	can: mcp251xfd: add missing newline to printed strings
	tpm: add request_locality before write TPM_INT_ENABLE
	tpm_tis: Fix an error handling path in 'tpm_tis_core_init()'
	can: softing: softing_startstop(): fix set but not used variable warning
	can: xilinx_can: xcan_probe(): check for error irq
	pcmcia: fix setting of kthread task states
	iwlwifi: mvm: Use div_s64 instead of do_div in iwl_mvm_ftm_rtt_smoothing()
	net: mcs7830: handle usb read errors properly
	ext4: avoid trim error on fs with small groups
	ALSA: jack: Add missing rwsem around snd_ctl_remove() calls
	ALSA: PCM: Add missing rwsem around snd_ctl_remove() calls
	ALSA: hda: Add missing rwsem around snd_ctl_remove() calls
	RDMA/bnxt_re: Scan the whole bitmap when checking if "disabling RCFW with pending cmd-bit"
	RDMA/hns: Validate the pkey index
	scsi: pm80xx: Update WARN_ON check in pm8001_mpi_build_cmd()
	clk: imx8mn: Fix imx8mn_clko1_sels
	powerpc/prom_init: Fix improper check of prom_getprop()
	ASoC: uniphier: drop selecting non-existing SND_SOC_UNIPHIER_AIO_DMA
	dt-bindings: thermal: Fix definition of cooling-maps contribution property
	powerpc/64s: Convert some cpu_setup() and cpu_restore() functions to C
	powerpc/perf: MMCR0 control for PMU registers under PMCC=00
	powerpc/perf: move perf irq/nmi handling details into traps.c
	powerpc/irq: Add helper to set regs->softe
	powerpc/perf: Fix PMU callbacks to clear pending PMI before resetting an overflown PMC
	powerpc/32s: Fix shift-out-of-bounds in KASAN init
	clocksource: Reduce clocksource-skew threshold
	clocksource: Avoid accidental unstable marking of clocksources
	ALSA: oss: fix compile error when OSS_DEBUG is enabled
	ALSA: usb-audio: Drop superfluous '0' in Presonus Studio 1810c's ID
	char/mwave: Adjust io port register size
	binder: fix handling of error during copy
	openrisc: Add clone3 ABI wrapper
	iommu/io-pgtable-arm: Fix table descriptor paddr formatting
	scsi: ufs: Fix race conditions related to driver data
	RDMA/qedr: Fix reporting max_{send/recv}_wr attrs
	PCI/MSI: Fix pci_irq_vector()/pci_irq_get_affinity()
	powerpc/powermac: Add additional missing lockdep_register_key()
	RDMA/core: Let ib_find_gid() continue search even after empty entry
	RDMA/cma: Let cma_resolve_ib_dev() continue search even after empty entry
	ASoC: rt5663: Handle device_property_read_u32_array error codes
	of: unittest: fix warning on PowerPC frame size warning
	of: unittest: 64 bit dma address test requires arch support
	clk: stm32: Fix ltdc's clock turn off by clk_disable_unused() after system enter shell
	mips: add SYS_HAS_CPU_MIPS64_R5 config for MIPS Release 5 support
	mips: fix Kconfig reference to PHYS_ADDR_T_64BIT
	dmaengine: pxa/mmp: stop referencing config->slave_id
	iommu/amd: Remove iommu_init_ga()
	iommu/amd: Restore GA log/tail pointer on host resume
	ASoC: Intel: catpt: Test dmaengine_submit() result before moving on
	iommu/iova: Fix race between FQ timeout and teardown
	scsi: block: pm: Always set request queue runtime active in blk_post_runtime_resume()
	phy: uniphier-usb3ss: fix unintended writing zeros to PHY register
	ASoC: mediatek: Check for error clk pointer
	ASoC: samsung: idma: Check of ioremap return value
	misc: lattice-ecp3-config: Fix task hung when firmware load failed
	counter: stm32-lptimer-cnt: remove iio counter abi
	arm64: tegra: Fix Tegra194 HDA {clock,reset}-names ordering
	arm64: tegra: Remove non existent Tegra194 reset
	mips: lantiq: add support for clk_set_parent()
	mips: bcm63xx: add support for clk_set_parent()
	powerpc/xive: Add missing null check after calling kmalloc
	ASoC: fsl_mqs: fix MODULE_ALIAS
	RDMA/cxgb4: Set queue pair state when being queried
	ASoC: fsl_asrc: refine the check of available clock divider
	clk: bm1880: remove kfrees on static allocations
	of: base: Fix phandle argument length mismatch error message
	ARM: dts: omap3-n900: Fix lp5523 for multi color
	Bluetooth: Fix debugfs entry leak in hci_register_dev()
	fs: dlm: filter user dlm messages for kernel locks
	drm/lima: fix warning when CONFIG_DEBUG_SG=y & CONFIG_DMA_API_DEBUG=y
	selftests/bpf: Fix bpf_object leak in skb_ctx selftest
	ar5523: Fix null-ptr-deref with unexpected WDCMSG_TARGET_START reply
	drm/bridge: dw-hdmi: handle ELD when DRM_BRIDGE_ATTACH_NO_CONNECTOR
	drm/nouveau/pmu/gm200-: avoid touching PMU outside of DEVINIT/PREOS/ACR
	media: atomisp: fix try_fmt logic
	media: atomisp: set per-device's default mode
	media: atomisp-ov2680: Fix ov2680_set_fmt() clobbering the exposure
	ARM: shmobile: rcar-gen2: Add missing of_node_put()
	batman-adv: allow netlink usage in unprivileged containers
	media: atomisp: handle errors at sh_css_create_isp_params()
	ath11k: Fix crash caused by uninitialized TX ring
	usb: gadget: f_fs: Use stream_open() for endpoint files
	drm: panel-orientation-quirks: Add quirk for the Lenovo Yoga Book X91F/L
	HID: apple: Do not reset quirks when the Fn key is not found
	media: b2c2: Add missing check in flexcop_pci_isr:
	EDAC/synopsys: Use the quirk for version instead of ddr version
	ARM: imx: rename DEBUG_IMX21_IMX27_UART to DEBUG_IMX27_UART
	drm/amd/display: check top_pipe_to_program pointer
	drm/amdgpu/display: set vblank_disable_immediate for DC
	soc: ti: pruss: fix referenced node in error message
	mlxsw: pci: Add shutdown method in PCI driver
	drm/bridge: megachips: Ensure both bridges are probed before registration
	tty: serial: imx: disable UCR4_OREN in .stop_rx() instead of .shutdown()
	gpiolib: acpi: Do not set the IRQ type if the IRQ is already in use
	HSI: core: Fix return freed object in hsi_new_client
	crypto: jitter - consider 32 LSB for APT
	mwifiex: Fix skb_over_panic in mwifiex_usb_recv()
	rsi: Fix use-after-free in rsi_rx_done_handler()
	rsi: Fix out-of-bounds read in rsi_read_pkt()
	ath11k: Avoid NULL ptr access during mgmt tx cleanup
	media: venus: avoid calling core_clk_setrate() concurrently during concurrent video sessions
	ACPI / x86: Drop PWM2 device on Lenovo Yoga Book from always present table
	ACPI: Change acpi_device_always_present() into acpi_device_override_status()
	ACPI / x86: Allow specifying acpi_device_override_status() quirks by path
	ACPI / x86: Add not-present quirk for the PCI0.SDHB.BRC1 device on the GPD win
	arm64: dts: ti: j7200-main: Fix 'dtbs_check' serdes_ln_ctrl node
	usb: uhci: add aspeed ast2600 uhci support
	floppy: Add max size check for user space request
	x86/mm: Flush global TLB when switching to trampoline page-table
	drm: rcar-du: Fix CRTC timings when CMM is used
	media: uvcvideo: Increase UVC_CTRL_CONTROL_TIMEOUT to 5 seconds.
	media: rcar-vin: Update format alignment constraints
	media: saa7146: hexium_orion: Fix a NULL pointer dereference in hexium_attach()
	media: m920x: don't use stack on USB reads
	thunderbolt: Runtime PM activate both ends of the device link
	iwlwifi: mvm: synchronize with FW after multicast commands
	iwlwifi: mvm: avoid clearing a just saved session protection id
	ath11k: avoid deadlock by change ieee80211_queue_work for regd_update_work
	ath10k: Fix tx hanging
	net-sysfs: update the queue counts in the unregistration path
	net: phy: prefer 1000baseT over 1000baseKX
	gpio: aspeed: Convert aspeed_gpio.lock to raw_spinlock
	selftests/ftrace: make kprobe profile testcase description unique
	ath11k: Avoid false DEADLOCK warning reported by lockdep
	x86/mce: Allow instrumentation during task work queueing
	x86/mce: Mark mce_panic() noinstr
	x86/mce: Mark mce_end() noinstr
	x86/mce: Mark mce_read_aux() noinstr
	net: bonding: debug: avoid printing debug logs when bond is not notifying peers
	bpf: Do not WARN in bpf_warn_invalid_xdp_action()
	HID: quirks: Allow inverting the absolute X/Y values
	media: igorplugusb: receiver overflow should be reported
	media: saa7146: hexium_gemini: Fix a NULL pointer dereference in hexium_attach()
	mmc: core: Fixup storing of OCR for MMC_QUIRK_NONSTD_SDIO
	audit: ensure userspace is penalized the same as the kernel when under pressure
	arm64: dts: ls1028a-qds: move rtc node to the correct i2c bus
	arm64: tegra: Adjust length of CCPLEX cluster MMIO region
	PM: runtime: Add safety net to supplier device release
	cpufreq: Fix initialization of min and max frequency QoS requests
	usb: hub: Add delay for SuperSpeed hub resume to let links transit to U0
	ath9k: Fix out-of-bound memcpy in ath9k_hif_usb_rx_stream
	rtw88: 8822c: update rx settings to prevent potential hw deadlock
	PM: AVS: qcom-cpr: Use div64_ul instead of do_div
	iwlwifi: fix leaks/bad data after failed firmware load
	iwlwifi: remove module loading failure message
	iwlwifi: mvm: Fix calculation of frame length
	iwlwifi: pcie: make sure prph_info is set when treating wakeup IRQ
	um: registers: Rename function names to avoid conflicts and build problems
	ath11k: Fix napi related hang
	Bluetooth: vhci: Set HCI_QUIRK_VALID_LE_STATES
	xfrm: rate limit SA mapping change message to user space
	drm/etnaviv: consider completed fence seqno in hang check
	jffs2: GC deadlock reading a page that is used in jffs2_write_begin()
	ACPICA: actypes.h: Expand the ACPI_ACCESS_ definitions
	ACPICA: Utilities: Avoid deleting the same object twice in a row
	ACPICA: Executer: Fix the REFCLASS_REFOF case in acpi_ex_opcode_1A_0T_1R()
	ACPICA: Fix wrong interpretation of PCC address
	ACPICA: Hardware: Do not flush CPU cache when entering S4 and S5
	drm/amdgpu: fixup bad vram size on gmc v8
	amdgpu/pm: Make sysfs pm attributes as read-only for VFs
	ACPI: battery: Add the ThinkPad "Not Charging" quirk
	btrfs: remove BUG_ON() in find_parent_nodes()
	btrfs: remove BUG_ON(!eie) in find_parent_nodes
	net: mdio: Demote probed message to debug print
	mac80211: allow non-standard VHT MCS-10/11
	dm btree: add a defensive bounds check to insert_at()
	dm space map common: add bounds check to sm_ll_lookup_bitmap()
	mlxsw: pci: Avoid flow control for EMAD packets
	net: phy: marvell: configure RGMII delays for 88E1118
	net: gemini: allow any RGMII interface mode
	regulator: qcom_smd: Align probe function with rpmh-regulator
	serial: pl010: Drop CR register reset on set_termios
	serial: core: Keep mctrl register state and cached copy in sync
	random: do not throw away excess input to crng_fast_load
	parisc: Avoid calling faulthandler_disabled() twice
	x86/kbuild: Enable CONFIG_KALLSYMS_ALL=y in the defconfigs
	powerpc/6xx: add missing of_node_put
	powerpc/powernv: add missing of_node_put
	powerpc/cell: add missing of_node_put
	powerpc/btext: add missing of_node_put
	powerpc/watchdog: Fix missed watchdog reset due to memory ordering race
	i2c: i801: Don't silently correct invalid transfer size
	powerpc/smp: Move setup_profiling_timer() under CONFIG_PROFILING
	i2c: mpc: Correct I2C reset procedure
	clk: meson: gxbb: Fix the SDM_EN bit for MPLL0 on GXBB
	powerpc/powermac: Add missing lockdep_register_key()
	KVM: PPC: Book3S: Suppress warnings when allocating too big memory slots
	KVM: PPC: Book3S: Suppress failed alloc warning in H_COPY_TOFROM_GUEST
	w1: Misuse of get_user()/put_user() reported by sparse
	nvmem: core: set size for sysfs bin file
	dm: fix alloc_dax error handling in alloc_dev
	scsi: lpfc: Trigger SLI4 firmware dump before doing driver cleanup
	ALSA: seq: Set upper limit of processed events
	MIPS: Loongson64: Use three arguments for slti
	powerpc/40x: Map 32Mbytes of memory at startup
	selftests/powerpc/spectre_v2: Return skip code when miss_percent is high
	powerpc: handle kdump appropriately with crash_kexec_post_notifiers option
	powerpc/fadump: Fix inaccurate CPU state info in vmcore generated with panic
	udf: Fix error handling in udf_new_inode()
	MIPS: OCTEON: add put_device() after of_find_device_by_node()
	irqchip/gic-v4: Disable redistributors' view of the VPE table at boot time
	i2c: designware-pci: Fix to change data types of hcnt and lcnt parameters
	MIPS: Octeon: Fix build errors using clang
	scsi: sr: Don't use GFP_DMA
	ASoC: mediatek: mt8173: fix device_node leak
	ASoC: mediatek: mt8183: fix device_node leak
	phy: mediatek: Fix missing check in mtk_mipi_tx_probe
	rpmsg: core: Clean up resources on announce_create failure.
	crypto: omap-aes - Fix broken pm_runtime_and_get() usage
	crypto: stm32/crc32 - Fix kernel BUG triggered in probe()
	crypto: caam - replace this_cpu_ptr with raw_cpu_ptr
	ubifs: Error path in ubifs_remount_rw() seems to wrongly free write buffers
	tpm: fix NPE on probe for missing device
	spi: uniphier: Fix a bug that doesn't point to private data correctly
	xen/gntdev: fix unmap notification order
	fuse: Pass correct lend value to filemap_write_and_wait_range()
	serial: Fix incorrect rs485 polarity on uart open
	cputime, cpuacct: Include guest time in user time in cpuacct.stat
	tracing/kprobes: 'nmissed' not showed correctly for kretprobe
	iwlwifi: mvm: Increase the scan timeout guard to 30 seconds
	s390/mm: fix 2KB pgtable release race
	device property: Fix fwnode_graph_devcon_match() fwnode leak
	drm/etnaviv: limit submit sizes
	drm/nouveau/kms/nv04: use vzalloc for nv04_display
	drm/bridge: analogix_dp: Make PSR-exit block less
	parisc: Fix lpa and lpa_user defines
	powerpc/64s/radix: Fix huge vmap false positive
	PCI: xgene: Fix IB window setup
	PCI: pciehp: Use down_read/write_nested(reset_lock) to fix lockdep errors
	PCI: pci-bridge-emul: Make expansion ROM Base Address register read-only
	PCI: pci-bridge-emul: Properly mark reserved PCIe bits in PCI config space
	PCI: pci-bridge-emul: Fix definitions of reserved bits
	PCI: pci-bridge-emul: Correctly set PCIe capabilities
	PCI: pci-bridge-emul: Set PCI_STATUS_CAP_LIST for PCIe device
	xfrm: fix policy lookup for ipv6 gre packets
	btrfs: fix deadlock between quota enable and other quota operations
	btrfs: check the root node for uptodate before returning it
	btrfs: respect the max size in the header when activating swap file
	ext4: make sure to reset inode lockdep class when quota enabling fails
	ext4: make sure quota gets properly shutdown on error
	ext4: fix a possible ABBA deadlock due to busy PA
	ext4: initialize err_blk before calling __ext4_get_inode_loc
	ext4: fix fast commit may miss tracking range for FALLOC_FL_ZERO_RANGE
	ext4: set csum seed in tmp inode while migrating to extents
	ext4: Fix BUG_ON in ext4_bread when write quota data
	ext4: use ext4_ext_remove_space() for fast commit replay delete range
	ext4: fast commit may miss tracking unwritten range during ftruncate
	ext4: destroy ext4_fc_dentry_cachep kmemcache on module removal
	ext4: fix null-ptr-deref in '__ext4_journal_ensure_credits'
	ext4: don't use the orphan list when migrating an inode
	drm/radeon: fix error handling in radeon_driver_open_kms
	of: base: Improve argument length mismatch error
	firmware: Update Kconfig help text for Google firmware
	can: mcp251xfd: mcp251xfd_tef_obj_read(): fix typo in error message
	media: rcar-csi2: Optimize the selection PHTW register
	drm/vc4: hdmi: Make sure the device is powered with CEC
	media: correct MEDIA_TEST_SUPPORT help text
	Documentation: dmaengine: Correctly describe dmatest with channel unset
	Documentation: ACPI: Fix data node reference documentation
	Documentation: refer to config RANDOMIZE_BASE for kernel address-space randomization
	Documentation: fix firewire.rst ABI file path error
	Bluetooth: hci_sync: Fix not setting adv set duration
	scsi: core: Show SCMD_LAST in text form
	dmaengine: uniphier-xdmac: Fix type of address variables
	RDMA/hns: Modify the mapping attribute of doorbell to device
	RDMA/rxe: Fix a typo in opcode name
	dmaengine: stm32-mdma: fix STM32_MDMA_CTBR_TSEL_MASK
	Revert "net/mlx5: Add retry mechanism to the command entry index allocation"
	powerpc/cell: Fix clang -Wimplicit-fallthrough warning
	powerpc/fsl/dts: Enable WA for erratum A-009885 on fman3l MDIO buses
	block: Fix fsync always failed if once failed
	bpftool: Remove inclusion of utilities.mak from Makefiles
	xdp: check prog type before updating BPF link
	perf evsel: Override attr->sample_period for non-libpfm4 events
	ipv4: update fib_info_cnt under spinlock protection
	ipv4: avoid quadratic behavior in netns dismantle
	net/fsl: xgmac_mdio: Add workaround for erratum A-009885
	net/fsl: xgmac_mdio: Fix incorrect iounmap when removing module
	parisc: pdc_stable: Fix memory leak in pdcs_register_pathentries
	f2fs: compress: fix potential deadlock of compress file
	f2fs: fix to reserve space for IO align feature
	af_unix: annote lockless accesses to unix_tot_inflight & gc_in_progress
	clk: Emit a stern warning with writable debugfs enabled
	clk: si5341: Fix clock HW provider cleanup
	net/smc: Fix hung_task when removing SMC-R devices
	net: axienet: increase reset timeout
	net: axienet: Wait for PhyRstCmplt after core reset
	net: axienet: reset core on initialization prior to MDIO access
	net: axienet: add missing memory barriers
	net: axienet: limit minimum TX ring size
	net: axienet: Fix TX ring slot available check
	net: axienet: fix number of TX ring slots for available check
	net: axienet: fix for TX busy handling
	net: axienet: increase default TX ring size to 128
	HID: vivaldi: fix handling devices not using numbered reports
	rtc: pxa: fix null pointer dereference
	vdpa/mlx5: Fix wrong configuration of virtio_version_1_0
	virtio_ring: mark ring unused on error
	taskstats: Cleanup the use of task->exit_code
	inet: frags: annotate races around fqdir->dead and fqdir->high_thresh
	netns: add schedule point in ops_exit_list()
	xfrm: Don't accidentally set RTO_ONLINK in decode_session4()
	gre: Don't accidentally set RTO_ONLINK in gre_fill_metadata_dst()
	libcxgb: Don't accidentally set RTO_ONLINK in cxgb_find_route()
	perf script: Fix hex dump character output
	dmaengine: at_xdmac: Don't start transactions at tx_submit level
	dmaengine: at_xdmac: Start transfer for cyclic channels in issue_pending
	dmaengine: at_xdmac: Print debug message after realeasing the lock
	dmaengine: at_xdmac: Fix concurrency over xfers_list
	dmaengine: at_xdmac: Fix lld view setting
	dmaengine: at_xdmac: Fix at_xdmac_lld struct definition
	perf probe: Fix ppc64 'perf probe add events failed' case
	devlink: Remove misleading internal_flags from health reporter dump
	arm64: dts: qcom: msm8996: drop not documented adreno properties
	net: bonding: fix bond_xmit_broadcast return value error bug
	net_sched: restore "mpu xxx" handling
	bcmgenet: add WOL IRQ check
	net: ethernet: mtk_eth_soc: fix error checking in mtk_mac_config()
	net: sfp: fix high power modules without diagnostic monitoring
	net: mscc: ocelot: fix using match before it is set
	dt-bindings: display: meson-dw-hdmi: add missing sound-name-prefix property
	dt-bindings: display: meson-vpu: Add missing amlogic,canvas property
	dt-bindings: watchdog: Require samsung,syscon-phandle for Exynos7
	scripts/dtc: dtx_diff: remove broken example from help text
	lib82596: Fix IRQ check in sni_82596_probe
	mm/hmm.c: allow VM_MIXEDMAP to work with hmm_range_fault
	lib/test_meminit: destroy cache in kmem_cache_alloc_bulk() test
	mtd: nand: bbt: Fix corner case in bad block table handling
	ath10k: Fix the MTU size on QCA9377 SDIO
	scripts: sphinx-pre-install: add required ctex dependency
	scripts: sphinx-pre-install: Fix ctex support on Debian
	Linux 5.10.94

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I857f2417c899508815a1ba13d1285fd400a1f133
2022-01-27 11:49:22 +01:00

2916 lines
69 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
* policies)
*/
#include "sched.h"
#include "pelt.h"
#include <trace/hooks/sched.h>
int sched_rr_timeslice = RR_TIMESLICE;
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
/* More than 4 hours if BW_SHIFT equals 20. */
static const u64 max_rt_runtime = MAX_BW;
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
struct rt_bandwidth def_rt_bandwidth;
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
struct rt_bandwidth *rt_b =
container_of(timer, struct rt_bandwidth, rt_period_timer);
int idle = 0;
int overrun;
raw_spin_lock(&rt_b->rt_runtime_lock);
for (;;) {
overrun = hrtimer_forward_now(timer, rt_b->rt_period);
if (!overrun)
break;
raw_spin_unlock(&rt_b->rt_runtime_lock);
idle = do_sched_rt_period_timer(rt_b, overrun);
raw_spin_lock(&rt_b->rt_runtime_lock);
}
if (idle)
rt_b->rt_period_active = 0;
raw_spin_unlock(&rt_b->rt_runtime_lock);
return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
}
void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
{
rt_b->rt_period = ns_to_ktime(period);
rt_b->rt_runtime = runtime;
raw_spin_lock_init(&rt_b->rt_runtime_lock);
hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_HARD);
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
raw_spin_lock(&rt_b->rt_runtime_lock);
if (!rt_b->rt_period_active) {
rt_b->rt_period_active = 1;
/*
* SCHED_DEADLINE updates the bandwidth, as a run away
* RT task with a DL task could hog a CPU. But DL does
* not reset the period. If a deadline task was running
* without an RT task running, it can cause RT tasks to
* throttle when they start up. Kick the timer right away
* to update the period.
*/
hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
hrtimer_start_expires(&rt_b->rt_period_timer,
HRTIMER_MODE_ABS_PINNED_HARD);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return;
do_start_rt_bandwidth(rt_b);
}
void init_rt_rq(struct rt_rq *rt_rq)
{
struct rt_prio_array *array;
int i;
array = &rt_rq->active;
for (i = 0; i < MAX_RT_PRIO; i++) {
INIT_LIST_HEAD(array->queue + i);
__clear_bit(i, array->bitmap);
}
/* delimiter for bitsearch: */
__set_bit(MAX_RT_PRIO, array->bitmap);
#if defined CONFIG_SMP
rt_rq->highest_prio.curr = MAX_RT_PRIO;
rt_rq->highest_prio.next = MAX_RT_PRIO;
rt_rq->rt_nr_migratory = 0;
rt_rq->overloaded = 0;
plist_head_init(&rt_rq->pushable_tasks);
#endif /* CONFIG_SMP */
/* We start is dequeued state, because no RT tasks are queued */
rt_rq->rt_queued = 0;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
rt_rq->rt_runtime = 0;
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
}
#ifdef CONFIG_RT_GROUP_SCHED
static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
{
hrtimer_cancel(&rt_b->rt_period_timer);
}
#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_SCHED_DEBUG
WARN_ON_ONCE(!rt_entity_is_task(rt_se));
#endif
return container_of(rt_se, struct task_struct, rt);
}
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return rt_rq->rq;
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
return rt_se->rt_rq;
}
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = rt_se->rt_rq;
return rt_rq->rq;
}
void free_rt_sched_group(struct task_group *tg)
{
int i;
if (tg->rt_se)
destroy_rt_bandwidth(&tg->rt_bandwidth);
for_each_possible_cpu(i) {
if (tg->rt_rq)
kfree(tg->rt_rq[i]);
if (tg->rt_se)
kfree(tg->rt_se[i]);
}
kfree(tg->rt_rq);
kfree(tg->rt_se);
}
void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent)
{
struct rq *rq = cpu_rq(cpu);
rt_rq->highest_prio.curr = MAX_RT_PRIO;
rt_rq->rt_nr_boosted = 0;
rt_rq->rq = rq;
rt_rq->tg = tg;
tg->rt_rq[cpu] = rt_rq;
tg->rt_se[cpu] = rt_se;
if (!rt_se)
return;
if (!parent)
rt_se->rt_rq = &rq->rt;
else
rt_se->rt_rq = parent->my_q;
rt_se->my_q = rt_rq;
rt_se->parent = parent;
INIT_LIST_HEAD(&rt_se->run_list);
}
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
struct rt_rq *rt_rq;
struct sched_rt_entity *rt_se;
int i;
tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
if (!tg->rt_rq)
goto err;
tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
if (!tg->rt_se)
goto err;
init_rt_bandwidth(&tg->rt_bandwidth,
ktime_to_ns(def_rt_bandwidth.rt_period), 0);
for_each_possible_cpu(i) {
rt_rq = kzalloc_node(sizeof(struct rt_rq),
GFP_KERNEL, cpu_to_node(i));
if (!rt_rq)
goto err;
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
GFP_KERNEL, cpu_to_node(i));
if (!rt_se)
goto err_free_rq;
init_rt_rq(rt_rq);
rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
}
return 1;
err_free_rq:
kfree(rt_rq);
err:
return 0;
}
#else /* CONFIG_RT_GROUP_SCHED */
#define rt_entity_is_task(rt_se) (1)
static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
{
return container_of(rt_se, struct task_struct, rt);
}
static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
return container_of(rt_rq, struct rq, rt);
}
static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
{
struct task_struct *p = rt_task_of(rt_se);
return task_rq(p);
}
static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
struct rq *rq = rq_of_rt_se(rt_se);
return &rq->rt;
}
void free_rt_sched_group(struct task_group *tg) { }
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
{
return 1;
}
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_SMP
static void pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
return rq->rt.highest_prio.curr > prev->prio;
}
static inline int rt_overloaded(struct rq *rq)
{
return atomic_read(&rq->rd->rto_count);
}
static inline void rt_set_overload(struct rq *rq)
{
if (!rq->online)
return;
cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
/*
* Make sure the mask is visible before we set
* the overload count. That is checked to determine
* if we should look at the mask. It would be a shame
* if we looked at the mask, but the mask was not
* updated yet.
*
* Matched by the barrier in pull_rt_task().
*/
smp_wmb();
atomic_inc(&rq->rd->rto_count);
}
static inline void rt_clear_overload(struct rq *rq)
{
if (!rq->online)
return;
/* the order here really doesn't matter */
atomic_dec(&rq->rd->rto_count);
cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
}
static void update_rt_migration(struct rt_rq *rt_rq)
{
if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
if (!rt_rq->overloaded) {
rt_set_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 1;
}
} else if (rt_rq->overloaded) {
rt_clear_overload(rq_of_rt_rq(rt_rq));
rt_rq->overloaded = 0;
}
}
static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
return;
p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total++;
if (p->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory++;
update_rt_migration(rt_rq);
}
static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
struct task_struct *p;
if (!rt_entity_is_task(rt_se))
return;
p = rt_task_of(rt_se);
rt_rq = &rq_of_rt_rq(rt_rq)->rt;
rt_rq->rt_nr_total--;
if (p->nr_cpus_allowed > 1)
rt_rq->rt_nr_migratory--;
update_rt_migration(rt_rq);
}
static inline int has_pushable_tasks(struct rq *rq)
{
return !plist_head_empty(&rq->rt.pushable_tasks);
}
static DEFINE_PER_CPU(struct callback_head, rt_push_head);
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
static void push_rt_tasks(struct rq *);
static void pull_rt_task(struct rq *);
static inline void rt_queue_push_tasks(struct rq *rq)
{
if (!has_pushable_tasks(rq))
return;
queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
}
static inline void rt_queue_pull_task(struct rq *rq)
{
queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
}
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
plist_node_init(&p->pushable_tasks, p->prio);
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the highest prio pushable task */
if (p->prio < rq->rt.highest_prio.next)
rq->rt.highest_prio.next = p->prio;
}
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
/* Update the new highest prio pushable task */
if (has_pushable_tasks(rq)) {
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
rq->rt.highest_prio.next = p->prio;
} else
rq->rt.highest_prio.next = MAX_RT_PRIO;
}
#else
static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
{
}
static inline
void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
static inline
void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
return false;
}
static inline void pull_rt_task(struct rq *this_rq)
{
}
static inline void rt_queue_push_tasks(struct rq *rq)
{
}
#endif /* CONFIG_SMP */
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
return rt_se->on_rq;
}
#ifdef CONFIG_UCLAMP_TASK
/*
* Verify the fitness of task @p to run on @cpu taking into account the uclamp
* settings.
*
* This check is only important for heterogeneous systems where uclamp_min value
* is higher than the capacity of a @cpu. For non-heterogeneous system this
* function will always return true.
*
* The function will return true if the capacity of the @cpu is >= the
* uclamp_min and false otherwise.
*
* Note that uclamp_min will be clamped to uclamp_max if uclamp_min
* > uclamp_max.
*/
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
unsigned int min_cap;
unsigned int max_cap;
unsigned int cpu_cap;
/* Only heterogeneous systems can benefit from this check */
if (!static_branch_unlikely(&sched_asym_cpucapacity))
return true;
min_cap = uclamp_eff_value(p, UCLAMP_MIN);
max_cap = uclamp_eff_value(p, UCLAMP_MAX);
cpu_cap = capacity_orig_of(cpu);
return cpu_cap >= min(min_cap, max_cap);
}
#else
static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
{
return true;
}
#endif
#ifdef CONFIG_RT_GROUP_SCHED
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
if (!rt_rq->tg)
return RUNTIME_INF;
return rt_rq->rt_runtime;
}
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
}
typedef struct task_group *rt_rq_iter_t;
static inline struct task_group *next_task_group(struct task_group *tg)
{
do {
tg = list_entry_rcu(tg->list.next,
typeof(struct task_group), list);
} while (&tg->list != &task_groups && task_group_is_autogroup(tg));
if (&tg->list == &task_groups)
tg = NULL;
return tg;
}
#define for_each_rt_rq(rt_rq, iter, rq) \
for (iter = container_of(&task_groups, typeof(*iter), list); \
(iter = next_task_group(iter)) && \
(rt_rq = iter->rt_rq[cpu_of(rq)]);)
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = rt_se->parent)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
return rt_se->my_q;
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
struct rq *rq = rq_of_rt_rq(rt_rq);
struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq);
rt_se = rt_rq->tg->rt_se[cpu];
if (rt_rq->rt_nr_running) {
if (!rt_se)
enqueue_top_rt_rq(rt_rq);
else if (!on_rt_rq(rt_se))
enqueue_rt_entity(rt_se, 0);
if (rt_rq->highest_prio.curr < curr->prio)
resched_curr(rq);
}
}
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
struct sched_rt_entity *rt_se;
int cpu = cpu_of(rq_of_rt_rq(rt_rq));
rt_se = rt_rq->tg->rt_se[cpu];
if (!rt_se) {
dequeue_top_rt_rq(rt_rq);
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
}
else if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se, 0);
}
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
}
static int rt_se_boosted(struct sched_rt_entity *rt_se)
{
struct rt_rq *rt_rq = group_rt_rq(rt_se);
struct task_struct *p;
if (rt_rq)
return !!rt_rq->rt_nr_boosted;
p = rt_task_of(rt_se);
return p->prio != p->normal_prio;
}
#ifdef CONFIG_SMP
static inline const struct cpumask *sched_rt_period_mask(void)
{
return this_rq()->rd->span;
}
#else
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
}
#endif
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
}
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
return &rt_rq->tg->rt_bandwidth;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
return rt_rq->rt_runtime;
}
static inline u64 sched_rt_period(struct rt_rq *rt_rq)
{
return ktime_to_ns(def_rt_bandwidth.rt_period);
}
typedef struct rt_rq *rt_rq_iter_t;
#define for_each_rt_rq(rt_rq, iter, rq) \
for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
#define for_each_sched_rt_entity(rt_se) \
for (; rt_se; rt_se = NULL)
static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
return NULL;
}
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
if (!rt_rq->rt_nr_running)
return;
enqueue_top_rt_rq(rt_rq);
resched_curr(rq);
}
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
dequeue_top_rt_rq(rt_rq);
}
static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled;
}
static inline const struct cpumask *sched_rt_period_mask(void)
{
return cpu_online_mask;
}
static inline
struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
{
return &cpu_rq(cpu)->rt;
}
static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
{
return &def_rt_bandwidth;
}
#endif /* CONFIG_RT_GROUP_SCHED */
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
return (hrtimer_active(&rt_b->rt_period_timer) ||
rt_rq->rt_time < rt_b->rt_runtime);
}
#ifdef CONFIG_SMP
/*
* We ran out of runtime, see if we can borrow some from our neighbours.
*/
static void do_balance_runtime(struct rt_rq *rt_rq)
{
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
int i, weight;
u64 rt_period;
weight = cpumask_weight(rd->span);
raw_spin_lock(&rt_b->rt_runtime_lock);
rt_period = ktime_to_ns(rt_b->rt_period);
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
if (iter == rt_rq)
continue;
raw_spin_lock(&iter->rt_runtime_lock);
/*
* Either all rqs have inf runtime and there's nothing to steal
* or __disable_runtime() below sets a specific rq to inf to
* indicate its been disabled and disalow stealing.
*/
if (iter->rt_runtime == RUNTIME_INF)
goto next;
/*
* From runqueues with spare time, take 1/n part of their
* spare time, but no more than our period.
*/
diff = iter->rt_runtime - iter->rt_time;
if (diff > 0) {
diff = div_u64((u64)diff, weight);
if (rt_rq->rt_runtime + diff > rt_period)
diff = rt_period - rt_rq->rt_runtime;
iter->rt_runtime -= diff;
rt_rq->rt_runtime += diff;
if (rt_rq->rt_runtime == rt_period) {
raw_spin_unlock(&iter->rt_runtime_lock);
break;
}
}
next:
raw_spin_unlock(&iter->rt_runtime_lock);
}
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
/*
* Ensure this RQ takes back all the runtime it lend to its neighbours.
*/
static void __disable_runtime(struct rq *rq)
{
struct root_domain *rd = rq->rd;
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
return;
for_each_rt_rq(rt_rq, iter, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
s64 want;
int i;
raw_spin_lock(&rt_b->rt_runtime_lock);
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* Either we're all inf and nobody needs to borrow, or we're
* already disabled and thus have nothing to do, or we have
* exactly the right amount of runtime to take out.
*/
if (rt_rq->rt_runtime == RUNTIME_INF ||
rt_rq->rt_runtime == rt_b->rt_runtime)
goto balanced;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
/*
* Calculate the difference between what we started out with
* and what we current have, that's the amount of runtime
* we lend and now have to reclaim.
*/
want = rt_b->rt_runtime - rt_rq->rt_runtime;
/*
* Greedy reclaim, take back as much as we can.
*/
for_each_cpu(i, rd->span) {
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
s64 diff;
/*
* Can't reclaim from ourselves or disabled runqueues.
*/
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
continue;
raw_spin_lock(&iter->rt_runtime_lock);
if (want > 0) {
diff = min_t(s64, iter->rt_runtime, want);
iter->rt_runtime -= diff;
want -= diff;
} else {
iter->rt_runtime -= want;
want -= want;
}
raw_spin_unlock(&iter->rt_runtime_lock);
if (!want)
break;
}
raw_spin_lock(&rt_rq->rt_runtime_lock);
/*
* We cannot be left wanting - that would mean some runtime
* leaked out of the system.
*/
BUG_ON(want);
balanced:
/*
* Disable all the borrow logic by pretending we have inf
* runtime - in which case borrowing doesn't make sense.
*/
rt_rq->rt_runtime = RUNTIME_INF;
rt_rq->rt_throttled = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
raw_spin_unlock(&rt_b->rt_runtime_lock);
/* Make rt_rq available for pick_next_task() */
sched_rt_rq_enqueue(rt_rq);
}
}
static void __enable_runtime(struct rq *rq)
{
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
if (unlikely(!scheduler_running))
return;
/*
* Reset each runqueue's bandwidth settings
*/
for_each_rt_rq(rt_rq, iter, rq) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
raw_spin_lock(&rt_b->rt_runtime_lock);
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_b->rt_runtime;
rt_rq->rt_time = 0;
rt_rq->rt_throttled = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
raw_spin_unlock(&rt_b->rt_runtime_lock);
}
}
static void balance_runtime(struct rt_rq *rt_rq)
{
if (!sched_feat(RT_RUNTIME_SHARE))
return;
if (rt_rq->rt_time > rt_rq->rt_runtime) {
raw_spin_unlock(&rt_rq->rt_runtime_lock);
do_balance_runtime(rt_rq);
raw_spin_lock(&rt_rq->rt_runtime_lock);
}
}
#else /* !CONFIG_SMP */
static inline void balance_runtime(struct rt_rq *rt_rq) {}
#endif /* CONFIG_SMP */
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
{
int i, idle = 1, throttled = 0;
const struct cpumask *span;
span = sched_rt_period_mask();
#ifdef CONFIG_RT_GROUP_SCHED
/*
* FIXME: isolated CPUs should really leave the root task group,
* whether they are isolcpus or were isolated via cpusets, lest
* the timer run on a CPU which does not service all runqueues,
* potentially leaving other CPUs indefinitely throttled. If
* isolation is really required, the user will turn the throttle
* off to kill the perturbations it causes anyway. Meanwhile,
* this maintains functionality for boot and/or troubleshooting.
*/
if (rt_b == &root_task_group.rt_bandwidth)
span = cpu_online_mask;
#endif
for_each_cpu(i, span) {
int enqueue = 0;
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
struct rq *rq = rq_of_rt_rq(rt_rq);
int skip;
/*
* When span == cpu_online_mask, taking each rq->lock
* can be time-consuming. Try to avoid it when possible.
*/
raw_spin_lock(&rt_rq->rt_runtime_lock);
if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
rt_rq->rt_runtime = rt_b->rt_runtime;
skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
if (skip)
continue;
raw_spin_lock(&rq->lock);
update_rq_clock(rq);
if (rt_rq->rt_time) {
u64 runtime;
raw_spin_lock(&rt_rq->rt_runtime_lock);
if (rt_rq->rt_throttled)
balance_runtime(rt_rq);
runtime = rt_rq->rt_runtime;
rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
rt_rq->rt_throttled = 0;
enqueue = 1;
/*
* When we're idle and a woken (rt) task is
* throttled check_preempt_curr() will set
* skip_update and the time between the wakeup
* and this unthrottle will get accounted as
* 'runtime'.
*/
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
rq_clock_cancel_skipupdate(rq);
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
} else if (rt_rq->rt_nr_running) {
idle = 0;
if (!rt_rq_throttled(rt_rq))
enqueue = 1;
}
if (rt_rq->rt_throttled)
throttled = 1;
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
raw_spin_unlock(&rq->lock);
}
if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
return 1;
return idle;
}
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_RT_GROUP_SCHED
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq)
return rt_rq->highest_prio.curr;
#endif
return rt_task_of(rt_se)->prio;
}
static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
{
u64 runtime = sched_rt_runtime(rt_rq);
if (rt_rq->rt_throttled)
return rt_rq_throttled(rt_rq);
if (runtime >= sched_rt_period(rt_rq))
return 0;
balance_runtime(rt_rq);
runtime = sched_rt_runtime(rt_rq);
if (runtime == RUNTIME_INF)
return 0;
if (rt_rq->rt_time > runtime) {
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
/*
* Don't actually throttle groups that have no runtime assigned
* but accrue some time due to boosting.
*/
if (likely(rt_b->rt_runtime)) {
rt_rq->rt_throttled = 1;
printk_deferred_once("sched: RT throttling activated\n");
trace_android_vh_dump_throttled_rt_tasks(
raw_smp_processor_id(),
rq_clock(rq_of_rt_rq(rt_rq)),
sched_rt_period(rt_rq),
runtime,
hrtimer_get_expires_ns(&rt_b->rt_period_timer));
} else {
/*
* In case we did anyway, make it go away,
* replenishment is a joke, since it will replenish us
* with exactly 0 ns.
*/
rt_rq->rt_time = 0;
}
if (rt_rq_throttled(rt_rq)) {
sched_rt_rq_dequeue(rt_rq);
return 1;
}
}
return 0;
}
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
*/
static void update_curr_rt(struct rq *rq)
{
struct task_struct *curr = rq->curr;
struct sched_rt_entity *rt_se = &curr->rt;
u64 delta_exec;
u64 now;
if (curr->sched_class != &rt_sched_class)
return;
now = rq_clock_task(rq);
delta_exec = now - curr->se.exec_start;
if (unlikely((s64)delta_exec <= 0))
return;
schedstat_set(curr->se.statistics.exec_max,
max(curr->se.statistics.exec_max, delta_exec));
curr->se.sum_exec_runtime += delta_exec;
account_group_exec_runtime(curr, delta_exec);
curr->se.exec_start = now;
cgroup_account_cputime(curr, delta_exec);
trace_android_vh_sched_stat_runtime_rt(curr, delta_exec);
if (!rt_bandwidth_enabled())
return;
for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
int exceeded;
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_time += delta_exec;
exceeded = sched_rt_runtime_exceeded(rt_rq);
if (exceeded)
resched_curr(rq);
raw_spin_unlock(&rt_rq->rt_runtime_lock);
if (exceeded)
do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
}
}
}
static void
dequeue_top_rt_rq(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
BUG_ON(&rq->rt != rt_rq);
if (!rt_rq->rt_queued)
return;
BUG_ON(!rq->nr_running);
sub_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 0;
}
static void
enqueue_top_rt_rq(struct rt_rq *rt_rq)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
BUG_ON(&rq->rt != rt_rq);
if (rt_rq->rt_queued)
return;
if (rt_rq_throttled(rt_rq))
return;
if (rt_rq->rt_nr_running) {
add_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 1;
}
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq, 0);
}
#if defined CONFIG_SMP
static void
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Change rq's cpupri only if rt_rq is the top queue.
*/
if (&rq->rt != rt_rq)
return;
#endif
if (rq->online && prio < prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
}
static void
dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Change rq's cpupri only if rt_rq is the top queue.
*/
if (&rq->rt != rt_rq)
return;
#endif
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
}
#else /* CONFIG_SMP */
static inline
void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
static inline
void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
#endif /* CONFIG_SMP */
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
static void
inc_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
if (prio < prev_prio)
rt_rq->highest_prio.curr = prio;
inc_rt_prio_smp(rt_rq, prio, prev_prio);
}
static void
dec_rt_prio(struct rt_rq *rt_rq, int prio)
{
int prev_prio = rt_rq->highest_prio.curr;
if (rt_rq->rt_nr_running) {
WARN_ON(prio < prev_prio);
/*
* This may have been our highest task, and therefore
* we may have some recomputation to do
*/
if (prio == prev_prio) {
struct rt_prio_array *array = &rt_rq->active;
rt_rq->highest_prio.curr =
sched_find_first_bit(array->bitmap);
}
} else
rt_rq->highest_prio.curr = MAX_RT_PRIO;
dec_rt_prio_smp(rt_rq, prio, prev_prio);
}
#else
static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++;
if (rt_rq->tg)
start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
}
static void
dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted--;
WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
}
#else /* CONFIG_RT_GROUP_SCHED */
static void
inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
start_rt_bandwidth(&def_rt_bandwidth);
}
static inline
void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
#endif /* CONFIG_RT_GROUP_SCHED */
static inline
unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
{
struct rt_rq *group_rq = group_rt_rq(rt_se);
if (group_rq)
return group_rq->rt_nr_running;
else
return 1;
}
static inline
unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
{
struct rt_rq *group_rq = group_rt_rq(rt_se);
struct task_struct *tsk;
if (group_rq)
return group_rq->rr_nr_running;
tsk = rt_task_of(rt_se);
return (tsk->policy == SCHED_RR) ? 1 : 0;
}
static inline
void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
int prio = rt_se_prio(rt_se);
WARN_ON(!rt_prio(prio));
rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
inc_rt_prio(rt_rq, prio);
inc_rt_migration(rt_se, rt_rq);
inc_rt_group(rt_se, rt_rq);
}
static inline
void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
WARN_ON(!rt_prio(rt_se_prio(rt_se)));
WARN_ON(!rt_rq->rt_nr_running);
rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
dec_rt_prio(rt_rq, rt_se_prio(rt_se));
dec_rt_migration(rt_se, rt_rq);
dec_rt_group(rt_se, rt_rq);
}
/*
* Change rt_se->run_list location unless SAVE && !MOVE
*
* assumes ENQUEUE/DEQUEUE flags match
*/
static inline bool move_entity(unsigned int flags)
{
if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
return false;
return true;
}
static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
{
list_del_init(&rt_se->run_list);
if (list_empty(array->queue + rt_se_prio(rt_se)))
__clear_bit(rt_se_prio(rt_se), array->bitmap);
rt_se->on_list = 0;
}
static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
struct rt_rq *group_rq = group_rt_rq(rt_se);
struct list_head *queue = array->queue + rt_se_prio(rt_se);
/*
* Don't enqueue the group if its throttled, or when empty.
* The latter is a consequence of the former when a child group
* get throttled and the current group doesn't have any other
* active members.
*/
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
if (rt_se->on_list)
__delist_rt_entity(rt_se, array);
return;
}
if (move_entity(flags)) {
WARN_ON_ONCE(rt_se->on_list);
if (flags & ENQUEUE_HEAD)
list_add(&rt_se->run_list, queue);
else
list_add_tail(&rt_se->run_list, queue);
__set_bit(rt_se_prio(rt_se), array->bitmap);
rt_se->on_list = 1;
}
rt_se->on_rq = 1;
inc_rt_tasks(rt_se, rt_rq);
}
static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
struct rt_prio_array *array = &rt_rq->active;
if (move_entity(flags)) {
WARN_ON_ONCE(!rt_se->on_list);
__delist_rt_entity(rt_se, array);
}
rt_se->on_rq = 0;
dec_rt_tasks(rt_se, rt_rq);
}
/*
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
*/
static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct sched_rt_entity *back = NULL;
for_each_sched_rt_entity(rt_se) {
rt_se->back = back;
back = rt_se;
}
dequeue_top_rt_rq(rt_rq_of_se(back));
for (rt_se = back; rt_se; rt_se = rt_se->back) {
if (on_rt_rq(rt_se))
__dequeue_rt_entity(rt_se, flags);
}
}
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rq *rq = rq_of_rt_se(rt_se);
dequeue_rt_stack(rt_se, flags);
for_each_sched_rt_entity(rt_se)
__enqueue_rt_entity(rt_se, flags);
enqueue_top_rt_rq(&rq->rt);
}
static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
{
struct rq *rq = rq_of_rt_se(rt_se);
dequeue_rt_stack(rt_se, flags);
for_each_sched_rt_entity(rt_se) {
struct rt_rq *rt_rq = group_rt_rq(rt_se);
if (rt_rq && rt_rq->rt_nr_running)
__enqueue_rt_entity(rt_se, flags);
}
enqueue_top_rt_rq(&rq->rt);
}
#ifdef CONFIG_SMP
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
bool sync)
{
/*
* If the waker is CFS, then an RT sync wakeup would preempt the waker
* and force it to run for a likely small time after the RT wakee is
* done. So, only honor RT sync wakeups from RT wakers.
*/
return sync && task_has_rt_policy(rq->curr) &&
p->prio <= rq->rt.highest_prio.next &&
rq->rt.rt_nr_running <= 2;
}
#else
static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
bool sync)
{
return 0;
}
#endif
/*
* Adding/removing a task to/from a priority array:
*/
static void
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
enqueue_rt_entity(rt_se, flags);
if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
!should_honor_rt_sync(rq, p, sync))
enqueue_pushable_task(rq, p);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
dequeue_rt_entity(rt_se, flags);
dequeue_pushable_task(rq, p);
}
/*
* Put task to the head or the end of the run list without the overhead of
* dequeue followed by enqueue.
*/
static void
requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
{
if (on_rt_rq(rt_se)) {
struct rt_prio_array *array = &rt_rq->active;
struct list_head *queue = array->queue + rt_se_prio(rt_se);
if (head)
list_move(&rt_se->run_list, queue);
else
list_move_tail(&rt_se->run_list, queue);
}
}
static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq;
for_each_sched_rt_entity(rt_se) {
rt_rq = rt_rq_of_se(rt_se);
requeue_rt_entity(rt_rq, rt_se, head);
}
}
static void yield_task_rt(struct rq *rq)
{
requeue_task_rt(rq, rq->curr, 0);
}
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
/*
* Return whether the task on the given cpu is currently non-preemptible
* while handling a potentially long softint, or if the task is likely
* to block preemptions soon because it is a ksoftirq thread that is
* handling slow softints.
*/
bool
task_may_not_preempt(struct task_struct *task, int cpu)
{
__u32 softirqs = per_cpu(active_softirqs, cpu) |
__IRQ_STAT(cpu, __softirq_pending);
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
return ((softirqs & LONG_SOFTIRQ_MASK) &&
(task == cpu_ksoftirqd ||
task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
}
EXPORT_SYMBOL_GPL(task_may_not_preempt);
#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
struct task_struct *curr;
struct rq *rq;
struct rq *this_cpu_rq;
bool test;
int target_cpu = -1;
bool may_not_preempt;
bool sync = !!(flags & WF_SYNC);
int this_cpu;
trace_android_rvh_select_task_rq_rt(p, cpu, sd_flag,
flags, &target_cpu);
if (target_cpu >= 0)
return target_cpu;
/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
goto out;
rq = cpu_rq(cpu);
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
this_cpu = smp_processor_id();
this_cpu_rq = cpu_rq(this_cpu);
/*
* If the current task on @p's runqueue is a softirq task,
* it may run without preemption for a time that is
* ill-suited for a waiting RT task. Therefore, try to
* wake this RT task on another runqueue.
*
* Also, if the current task on @p's runqueue is an RT task, then
* try to see if we can wake this RT task up on another
* runqueue. Otherwise simply start this RT task
* on its current runqueue.
*
* We want to avoid overloading runqueues. If the woken
* task is a higher priority, then it will stay on this CPU
* and the lower prio task should be moved to another CPU.
* Even though this will probably make the lower prio task
* lose its cache, we do not want to bounce a higher task
* around just because it gave up its CPU, perhaps for a
* lock?
*
* For equal prio tasks, we just let the scheduler sort it out.
*
* Otherwise, just let it ride on the affined RQ and the
* post-schedule router will push the preempted task away
*
* This test is optimistic, if we get it wrong the load-balancer
* will have to sort it out.
*
* We take into account the capacity of the CPU to ensure it fits the
* requirement of the task - which is only important on heterogeneous
* systems like big.LITTLE.
*/
may_not_preempt = task_may_not_preempt(curr, cpu);
test = (curr && (may_not_preempt ||
(unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
/*
* Respect the sync flag as long as the task can run on this CPU.
*/
if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
cpu = this_cpu;
goto out_unlock;
}
if (test || !rt_task_fits_capacity(p, cpu)) {
int target = find_lowest_rq(p);
/*
* Bail out if we were forcing a migration to find a better
* fitting CPU but our search failed.
*/
if (!test && target != -1 && !rt_task_fits_capacity(p, target))
goto out_unlock;
/*
* If cpu is non-preemptible, prefer remote cpu
* even if it's running a higher-prio task.
* Otherwise: Don't bother moving it if the destination CPU is
* not running a lower priority task.
*/
if (target != -1 &&
(may_not_preempt ||
p->prio < cpu_rq(target)->rt.highest_prio.curr))
cpu = target;
}
out_unlock:
rcu_read_unlock();
out:
return cpu;
}
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
/*
* Current can't be migrated, useless to reschedule,
* let's hope p can move out.
*/
if (rq->curr->nr_cpus_allowed == 1 ||
!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
return;
/*
* p is migratable, so let's not schedule it and
* see if it is pushed or pulled somewhere else.
*/
if (p->nr_cpus_allowed != 1 &&
cpupri_find(&rq->rd->cpupri, p, NULL))
return;
/*
* There appear to be other CPUs that can accept
* the current task but none can run 'p', so lets reschedule
* to try and push the current task away:
*/
requeue_task_rt(rq, p, 1);
resched_curr(rq);
}
static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
int done = 0;
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
* disabled avoiding further scheduler activity on it and we've
* not yet started the picking loop.
*/
rq_unpin_lock(rq, rf);
trace_android_rvh_sched_balance_rt(rq, p, &done);
if (!done)
pull_rt_task(rq);
rq_repin_lock(rq, rf);
}
return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
}
#endif /* CONFIG_SMP */
/*
* Preempt the current task with a newly woken task if needed:
*/
static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
{
if (p->prio < rq->curr->prio) {
resched_curr(rq);
return;
}
#ifdef CONFIG_SMP
/*
* If:
*
* - the newly woken task is of equal priority to the current task
* - the newly woken task is non-migratable while current is migratable
* - current will be preempted on the next reschedule
*
* we should check to see if current can readily move to a different
* cpu. If so, we will reschedule to allow the push logic to try
* to move current somewhere else, making room for our non-migratable
* task.
*/
if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
check_preempt_equal_prio(rq, p);
#endif
}
static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
{
p->se.exec_start = rq_clock_task(rq);
/* The running task is never eligible for pushing */
dequeue_pushable_task(rq, p);
if (!first)
return;
/*
* If prev task was rt, put_prev_task() has already updated the
* utilization. We only care of the case where we start to schedule a
* rt task
*/
if (rq->curr->sched_class != &rt_sched_class)
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
rt_queue_push_tasks(rq);
}
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
struct rt_rq *rt_rq)
{
struct rt_prio_array *array = &rt_rq->active;
struct sched_rt_entity *next = NULL;
struct list_head *queue;
int idx;
idx = sched_find_first_bit(array->bitmap);
BUG_ON(idx >= MAX_RT_PRIO);
queue = array->queue + idx;
next = list_entry(queue->next, struct sched_rt_entity, run_list);
return next;
}
static struct task_struct *_pick_next_task_rt(struct rq *rq)
{
struct sched_rt_entity *rt_se;
struct rt_rq *rt_rq = &rq->rt;
do {
rt_se = pick_next_rt_entity(rq, rt_rq);
BUG_ON(!rt_se);
rt_rq = group_rt_rq(rt_se);
} while (rt_rq);
return rt_task_of(rt_se);
}
static struct task_struct *pick_next_task_rt(struct rq *rq)
{
struct task_struct *p;
if (!sched_rt_runnable(rq))
return NULL;
p = _pick_next_task_rt(rq);
set_next_task_rt(rq, p, true);
return p;
}
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{
update_curr_rt(rq);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
/*
* The previous task needs to be made eligible for pushing
* if it is still active
*/
if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
}
#ifdef CONFIG_SMP
/* Only try algorithms three times */
#define RT_MAX_TRIES 3
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
cpumask_test_cpu(cpu, p->cpus_ptr))
return 1;
return 0;
}
/*
* Return the highest pushable rq's task, which is suitable to be executed
* on the CPU, NULL otherwise
*/
struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
{
struct plist_head *head = &rq->rt.pushable_tasks;
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
plist_for_each_entry(p, head, pushable_tasks) {
if (pick_rt_task(rq, p, cpu))
return p;
}
return NULL;
}
EXPORT_SYMBOL_GPL(pick_highest_pushable_task);
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = -1;
int ret;
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
return -1;
if (task->nr_cpus_allowed == 1)
return -1; /* No other targets possible */
/*
* If we're on asym system ensure we consider the different capacities
* of the CPUs when searching for the lowest_mask.
*/
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
task, lowest_mask,
rt_task_fits_capacity);
} else {
ret = cpupri_find(&task_rq(task)->rd->cpupri,
task, lowest_mask);
}
trace_android_rvh_find_lowest_rq(task, lowest_mask, ret, &cpu);
if (cpu >= 0)
return cpu;
if (!ret)
return -1; /* No targets found */
cpu = task_cpu(task);
/*
* At this point we have built a mask of CPUs representing the
* lowest priority tasks in the system. Now we want to elect
* the best one based on our affinity and topology.
*
* We prioritize the last CPU that the task executed on since
* it is most likely cache-hot in that location.
*/
if (cpumask_test_cpu(cpu, lowest_mask))
return cpu;
/*
* Otherwise, we consult the sched_domains span maps to figure
* out which CPU is logically closest to our hot cache data.
*/
if (!cpumask_test_cpu(this_cpu, lowest_mask))
this_cpu = -1; /* Skip this_cpu opt if not among lowest */
rcu_read_lock();
for_each_domain(cpu, sd) {
if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu;
/*
* "this_cpu" is cheaper to preempt than a
* remote processor.
*/
if (this_cpu != -1 &&
cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
rcu_read_unlock();
return this_cpu;
}
best_cpu = cpumask_first_and(lowest_mask,
sched_domain_span(sd));
if (best_cpu < nr_cpu_ids) {
rcu_read_unlock();
return best_cpu;
}
}
}
rcu_read_unlock();
/*
* And finally, if there were no matches within the domains
* just give the caller *something* to work with from the compatible
* locations.
*/
if (this_cpu != -1)
return this_cpu;
cpu = cpumask_any(lowest_mask);
if (cpu < nr_cpu_ids)
return cpu;
return -1;
}
/* Will lock the rq it finds */
static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
{
struct rq *lowest_rq = NULL;
int tries;
int cpu;
for (tries = 0; tries < RT_MAX_TRIES; tries++) {
cpu = find_lowest_rq(task);
if ((cpu == -1) || (cpu == rq->cpu))
break;
lowest_rq = cpu_rq(cpu);
if (lowest_rq->rt.highest_prio.curr <= task->prio) {
/*
* Target rq has tasks of equal or higher priority,
* retrying does not release any lock and is unlikely
* to yield a different result.
*/
lowest_rq = NULL;
break;
}
/* if the prio of this runqueue changed, try again */
if (double_lock_balance(rq, lowest_rq)) {
/*
* We had to unlock the run queue. In
* the mean time, task could have
* migrated already or had its affinity changed.
* Also make sure that it wasn't scheduled on its rq.
*/
if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
task_running(rq, task) ||
!rt_task(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
break;
}
}
/* If this rq is still suitable use it. */
if (lowest_rq->rt.highest_prio.curr > task->prio)
break;
/* try again */
double_unlock_balance(rq, lowest_rq);
lowest_rq = NULL;
}
return lowest_rq;
}
static struct task_struct *pick_next_pushable_task(struct rq *rq)
{
struct task_struct *p;
if (!has_pushable_tasks(rq))
return NULL;
p = plist_first_entry(&rq->rt.pushable_tasks,
struct task_struct, pushable_tasks);
BUG_ON(rq->cpu != task_cpu(p));
BUG_ON(task_current(rq, p));
BUG_ON(p->nr_cpus_allowed <= 1);
BUG_ON(!task_on_rq_queued(p));
BUG_ON(!rt_task(p));
return p;
}
/*
* If the current CPU has more than one RT task, see if the non
* running task can migrate over to a CPU that is running a task
* of lesser priority.
*/
static int push_rt_task(struct rq *rq)
{
struct task_struct *next_task;
struct rq *lowest_rq;
int ret = 0;
if (!rq->rt.overloaded)
return 0;
next_task = pick_next_pushable_task(rq);
if (!next_task)
return 0;
retry:
if (WARN_ON(next_task == rq->curr))
return 0;
/*
* It's possible that the next_task slipped in of
* higher priority than current. If that's the case
* just reschedule current.
*/
if (unlikely(next_task->prio < rq->curr->prio)) {
resched_curr(rq);
return 0;
}
/* We might release rq lock */
get_task_struct(next_task);
/* find_lock_lowest_rq locks the rq if found */
lowest_rq = find_lock_lowest_rq(next_task, rq);
if (!lowest_rq) {
struct task_struct *task;
/*
* find_lock_lowest_rq releases rq->lock
* so it is possible that next_task has migrated.
*
* We need to make sure that the task is still on the same
* run-queue and is also still the next task eligible for
* pushing.
*/
task = pick_next_pushable_task(rq);
if (task == next_task) {
/*
* The task hasn't migrated, and is still the next
* eligible task, but we failed to find a run-queue
* to push it to. Do not retry in this case, since
* other CPUs will pull from us when ready.
*/
goto out;
}
if (!task)
/* No more tasks, just exit */
goto out;
/*
* Something has shifted, try again.
*/
put_task_struct(next_task);
next_task = task;
goto retry;
}
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu);
activate_task(lowest_rq, next_task, 0);
ret = 1;
resched_curr(lowest_rq);
double_unlock_balance(rq, lowest_rq);
out:
put_task_struct(next_task);
return ret;
}
static void push_rt_tasks(struct rq *rq)
{
/* push_rt_task will return true if it moved an RT */
while (push_rt_task(rq))
;
}
#ifdef HAVE_RT_PUSH_IPI
/*
* When a high priority task schedules out from a CPU and a lower priority
* task is scheduled in, a check is made to see if there's any RT tasks
* on other CPUs that are waiting to run because a higher priority RT task
* is currently running on its CPU. In this case, the CPU with multiple RT
* tasks queued on it (overloaded) needs to be notified that a CPU has opened
* up that may be able to run one of its non-running queued RT tasks.
*
* All CPUs with overloaded RT tasks need to be notified as there is currently
* no way to know which of these CPUs have the highest priority task waiting
* to run. Instead of trying to take a spinlock on each of these CPUs,
* which has shown to cause large latency when done on machines with many
* CPUs, sending an IPI to the CPUs to have them push off the overloaded
* RT tasks waiting to run.
*
* Just sending an IPI to each of the CPUs is also an issue, as on large
* count CPU machines, this can cause an IPI storm on a CPU, especially
* if its the only CPU with multiple RT tasks queued, and a large number
* of CPUs scheduling a lower priority task at the same time.
*
* Each root domain has its own irq work function that can iterate over
* all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
* tassk must be checked if there's one or many CPUs that are lowering
* their priority, there's a single irq work iterator that will try to
* push off RT tasks that are waiting to run.
*
* When a CPU schedules a lower priority task, it will kick off the
* irq work iterator that will jump to each CPU with overloaded RT tasks.
* As it only takes the first CPU that schedules a lower priority task
* to start the process, the rto_start variable is incremented and if
* the atomic result is one, then that CPU will try to take the rto_lock.
* This prevents high contention on the lock as the process handles all
* CPUs scheduling lower priority tasks.
*
* All CPUs that are scheduling a lower priority task will increment the
* rt_loop_next variable. This will make sure that the irq work iterator
* checks all RT overloaded CPUs whenever a CPU schedules a new lower
* priority task, even if the iterator is in the middle of a scan. Incrementing
* the rt_loop_next will cause the iterator to perform another scan.
*
*/
static int rto_next_cpu(struct root_domain *rd)
{
int next;
int cpu;
/*
* When starting the IPI RT pushing, the rto_cpu is set to -1,
* rt_next_cpu() will simply return the first CPU found in
* the rto_mask.
*
* If rto_next_cpu() is called with rto_cpu is a valid CPU, it
* will return the next CPU found in the rto_mask.
*
* If there are no more CPUs left in the rto_mask, then a check is made
* against rto_loop and rto_loop_next. rto_loop is only updated with
* the rto_lock held, but any CPU may increment the rto_loop_next
* without any locking.
*/
for (;;) {
/* When rto_cpu is -1 this acts like cpumask_first() */
cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
rd->rto_cpu = cpu;
if (cpu < nr_cpu_ids)
return cpu;
rd->rto_cpu = -1;
/*
* ACQUIRE ensures we see the @rto_mask changes
* made prior to the @next value observed.
*
* Matches WMB in rt_set_overload().
*/
next = atomic_read_acquire(&rd->rto_loop_next);
if (rd->rto_loop == next)
break;
rd->rto_loop = next;
}
return -1;
}
static inline bool rto_start_trylock(atomic_t *v)
{
return !atomic_cmpxchg_acquire(v, 0, 1);
}
static inline void rto_start_unlock(atomic_t *v)
{
atomic_set_release(v, 0);
}
static void tell_cpu_to_push(struct rq *rq)
{
int cpu = -1;
/* Keep the loop going if the IPI is currently active */
atomic_inc(&rq->rd->rto_loop_next);
/* Only one CPU can initiate a loop at a time */
if (!rto_start_trylock(&rq->rd->rto_loop_start))
return;
raw_spin_lock(&rq->rd->rto_lock);
/*
* The rto_cpu is updated under the lock, if it has a valid CPU
* then the IPI is still running and will continue due to the
* update to loop_next, and nothing needs to be done here.
* Otherwise it is finishing up and an ipi needs to be sent.
*/
if (rq->rd->rto_cpu < 0)
cpu = rto_next_cpu(rq->rd);
raw_spin_unlock(&rq->rd->rto_lock);
rto_start_unlock(&rq->rd->rto_loop_start);
if (cpu >= 0) {
/* Make sure the rd does not get freed while pushing */
sched_get_rd(rq->rd);
irq_work_queue_on(&rq->rd->rto_push_work, cpu);
}
}
/* Called from hardirq context */
void rto_push_irq_work_func(struct irq_work *work)
{
struct root_domain *rd =
container_of(work, struct root_domain, rto_push_work);
struct rq *rq;
int cpu;
rq = this_rq();
/*
* We do not need to grab the lock to check for has_pushable_tasks.
* When it gets updated, a check is made if a push is possible.
*/
if (has_pushable_tasks(rq)) {
raw_spin_lock(&rq->lock);
push_rt_tasks(rq);
raw_spin_unlock(&rq->lock);
}
raw_spin_lock(&rd->rto_lock);
/* Pass the IPI to the next rt overloaded queue */
cpu = rto_next_cpu(rd);
raw_spin_unlock(&rd->rto_lock);
if (cpu < 0) {
sched_put_rd(rd);
return;
}
/* Try the next RT overloaded CPU */
irq_work_queue_on(&rd->rto_push_work, cpu);
}
#endif /* HAVE_RT_PUSH_IPI */
static void pull_rt_task(struct rq *this_rq)
{
int this_cpu = this_rq->cpu, cpu;
bool resched = false;
struct task_struct *p;
struct rq *src_rq;
int rt_overload_count = rt_overloaded(this_rq);
if (likely(!rt_overload_count))
return;
/*
* Match the barrier from rt_set_overloaded; this guarantees that if we
* see overloaded we must also see the rto_mask bit.
*/
smp_rmb();
/* If we are the only overloaded CPU do nothing */
if (rt_overload_count == 1 &&
cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
return;
#ifdef HAVE_RT_PUSH_IPI
if (sched_feat(RT_PUSH_IPI)) {
tell_cpu_to_push(this_rq);
return;
}
#endif
for_each_cpu(cpu, this_rq->rd->rto_mask) {
if (this_cpu == cpu)
continue;
src_rq = cpu_rq(cpu);
/*
* Don't bother taking the src_rq->lock if the next highest
* task is known to be lower-priority than our current task.
* This may look racy, but if this value is about to go
* logically higher, the src_rq will push this task away.
* And if its going logically lower, we do not care
*/
if (src_rq->rt.highest_prio.next >=
this_rq->rt.highest_prio.curr)
continue;
/*
* We can potentially drop this_rq's lock in
* double_lock_balance, and another CPU could
* alter this_rq
*/
double_lock_balance(this_rq, src_rq);
/*
* We can pull only a task, which is pushable
* on its rq, and no others.
*/
p = pick_highest_pushable_task(src_rq, this_cpu);
/*
* Do we have an RT task that preempts
* the to-be-scheduled task?
*/
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
WARN_ON(p == src_rq->curr);
WARN_ON(!task_on_rq_queued(p));
/*
* There's a chance that p is higher in priority
* than what's currently running on its CPU.
* This is just that p is wakeing up and hasn't
* had a chance to schedule. We only pull
* p if it is lower in priority than the
* current task on the run queue
*/
if (p->prio < src_rq->curr->prio)
goto skip;
resched = true;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
/*
* We continue with the search, just in
* case there's an even higher prio task
* in another runqueue. (low likelihood
* but possible)
*/
}
skip:
double_unlock_balance(this_rq, src_rq);
}
if (resched)
resched_curr(this_rq);
}
/*
* If we are not running and we are not going to reschedule soon, we should
* try to push tasks away now
*/
static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
bool need_to_push = !task_running(rq, p) &&
!test_tsk_need_resched(rq->curr) &&
p->nr_cpus_allowed > 1 &&
(dl_task(rq->curr) || rt_task(rq->curr)) &&
(rq->curr->nr_cpus_allowed < 2 ||
rq->curr->prio <= p->prio);
if (need_to_push)
push_rt_tasks(rq);
}
/* Assumes rq->lock is held */
static void rq_online_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_set_overload(rq);
__enable_runtime(rq);
cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
}
/* Assumes rq->lock is held */
static void rq_offline_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_clear_overload(rq);
__disable_runtime(rq);
cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
}
/*
* When switch from the rt queue, we bring ourselves to a position
* that we might want to pull RT tasks from other runqueues.
*/
static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
/*
* If there are other RT tasks then we will reschedule
* and the scheduling of the other RT tasks will handle
* the balancing. But if we are the last RT task
* we may need to handle the pulling of RT tasks
* now.
*/
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
return;
rt_queue_pull_task(rq);
}
void __init init_sched_rt_class(void)
{
unsigned int i;
for_each_possible_cpu(i) {
zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
GFP_KERNEL, cpu_to_node(i));
}
}
#endif /* CONFIG_SMP */
/*
* When switching a task to RT, we may overload the runqueue
* with RT tasks. In this case we try to push them off to
* other runqueues.
*/
static void switched_to_rt(struct rq *rq, struct task_struct *p)
{
/*
* If we are running, update the avg_rt tracking, as the running time
* will now on be accounted into the latter.
*/
if (task_current(rq, p)) {
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
return;
}
/*
* If we are not running we may need to preempt the current
* running task. If that current running task is also an RT task
* then see if we can move to another run queue.
*/
if (task_on_rq_queued(p)) {
#ifdef CONFIG_SMP
if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
rt_queue_push_tasks(rq);
#endif /* CONFIG_SMP */
if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
resched_curr(rq);
}
}
/*
* Priority of the task has changed. This may cause
* us to initiate a push or pull.
*/
static void
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{
if (!task_on_rq_queued(p))
return;
if (rq->curr == p) {
#ifdef CONFIG_SMP
/*
* If our priority decreases while running, we
* may need to pull tasks to this runqueue.
*/
if (oldprio < p->prio)
rt_queue_pull_task(rq);
/*
* If there's a higher priority task waiting to run
* then reschedule.
*/
if (p->prio > rq->rt.highest_prio.curr)
resched_curr(rq);
#else
/* For UP simply resched on drop of prio */
if (oldprio < p->prio)
resched_curr(rq);
#endif /* CONFIG_SMP */
} else {
/*
* This task is not running, but if it is
* greater than the current running task
* then reschedule.
*/
if (p->prio < rq->curr->prio)
resched_curr(rq);
}
}
#ifdef CONFIG_POSIX_TIMERS
static void watchdog(struct rq *rq, struct task_struct *p)
{
unsigned long soft, hard;
/* max may change after cur was read, this will be fixed next tick */
soft = task_rlimit(p, RLIMIT_RTTIME);
hard = task_rlimit_max(p, RLIMIT_RTTIME);
if (soft != RLIM_INFINITY) {
unsigned long next;
if (p->rt.watchdog_stamp != jiffies) {
p->rt.timeout++;
p->rt.watchdog_stamp = jiffies;
}
next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
if (p->rt.timeout > next) {
posix_cputimers_rt_watchdog(&p->posix_cputimers,
p->se.sum_exec_runtime);
}
}
}
#else
static inline void watchdog(struct rq *rq, struct task_struct *p) { }
#endif
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
{
struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq);
update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
watchdog(rq, p);
/*
* RR tasks need a special form of timeslice management.
* FIFO tasks have no timeslices.
*/
if (p->policy != SCHED_RR)
return;
if (--p->rt.time_slice)
return;
p->rt.time_slice = sched_rr_timeslice;
/*
* Requeue to the end of queue if we (and all of our ancestors) are not
* the only element on the queue
*/
for_each_sched_rt_entity(rt_se) {
if (rt_se->run_list.prev != rt_se->run_list.next) {
requeue_task_rt(rq, p, 0);
resched_curr(rq);
return;
}
}
}
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
/*
* Time slice is 0 for SCHED_FIFO tasks
*/
if (task->policy == SCHED_RR)
return sched_rr_timeslice;
else
return 0;
}
const struct sched_class rt_sched_class
__section("__rt_sched_class") = {
.enqueue_task = enqueue_task_rt,
.dequeue_task = dequeue_task_rt,
.yield_task = yield_task_rt,
.check_preempt_curr = check_preempt_curr_rt,
.pick_next_task = pick_next_task_rt,
.put_prev_task = put_prev_task_rt,
.set_next_task = set_next_task_rt,
#ifdef CONFIG_SMP
.balance = balance_rt,
.select_task_rq = select_task_rq_rt,
.set_cpus_allowed = set_cpus_allowed_common,
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.task_woken = task_woken_rt,
.switched_from = switched_from_rt,
#endif
.task_tick = task_tick_rt,
.get_rr_interval = get_rr_interval_rt,
.prio_changed = prio_changed_rt,
.switched_to = switched_to_rt,
.update_curr = update_curr_rt,
#ifdef CONFIG_UCLAMP_TASK
.uclamp_enabled = 1,
#endif
};
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Ensure that the real time constraints are schedulable.
*/
static DEFINE_MUTEX(rt_constraints_mutex);
static inline int tg_has_rt_tasks(struct task_group *tg)
{
struct task_struct *task;
struct css_task_iter it;
int ret = 0;
/*
* Autogroups do not have RT tasks; see autogroup_create().
*/
if (task_group_is_autogroup(tg))
return 0;
css_task_iter_start(&tg->css, 0, &it);
while (!ret && (task = css_task_iter_next(&it)))
ret |= rt_task(task);
css_task_iter_end(&it);
return ret;
}
struct rt_schedulable_data {
struct task_group *tg;
u64 rt_period;
u64 rt_runtime;
};
static int tg_rt_schedulable(struct task_group *tg, void *data)
{
struct rt_schedulable_data *d = data;
struct task_group *child;
unsigned long total, sum = 0;
u64 period, runtime;
period = ktime_to_ns(tg->rt_bandwidth.rt_period);
runtime = tg->rt_bandwidth.rt_runtime;
if (tg == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
/*
* Cannot have more runtime than the period.
*/
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;
/*
* Ensure we don't starve existing RT tasks if runtime turns zero.
*/
if (rt_bandwidth_enabled() && !runtime &&
tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
return -EBUSY;
total = to_ratio(period, runtime);
/*
* Nobody can have more than the global setting allows.
*/
if (total > to_ratio(global_rt_period(), global_rt_runtime()))
return -EINVAL;
/*
* The sum of our children's runtime should not exceed our own.
*/
list_for_each_entry_rcu(child, &tg->children, siblings) {
period = ktime_to_ns(child->rt_bandwidth.rt_period);
runtime = child->rt_bandwidth.rt_runtime;
if (child == d->tg) {
period = d->rt_period;
runtime = d->rt_runtime;
}
sum += to_ratio(period, runtime);
}
if (sum > total)
return -EINVAL;
return 0;
}
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
{
int ret;
struct rt_schedulable_data data = {
.tg = tg,
.rt_period = period,
.rt_runtime = runtime,
};
rcu_read_lock();
ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
rcu_read_unlock();
return ret;
}
static int tg_set_rt_bandwidth(struct task_group *tg,
u64 rt_period, u64 rt_runtime)
{
int i, err = 0;
/*
* Disallowing the root group RT runtime is BAD, it would disallow the
* kernel creating (and or operating) RT threads.
*/
if (tg == &root_task_group && rt_runtime == 0)
return -EINVAL;
/* No period doesn't make any sense. */
if (rt_period == 0)
return -EINVAL;
/*
* Bound quota to defend quota against overflow during bandwidth shift.
*/
if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
return -EINVAL;
mutex_lock(&rt_constraints_mutex);
err = __rt_schedulable(tg, rt_period, rt_runtime);
if (err)
goto unlock;
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
tg->rt_bandwidth.rt_runtime = rt_runtime;
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = tg->rt_rq[i];
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = rt_runtime;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
unlock:
mutex_unlock(&rt_constraints_mutex);
return err;
}
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
{
u64 rt_runtime, rt_period;
rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
if (rt_runtime_us < 0)
rt_runtime = RUNTIME_INF;
else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
return -EINVAL;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_runtime(struct task_group *tg)
{
u64 rt_runtime_us;
if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
return -1;
rt_runtime_us = tg->rt_bandwidth.rt_runtime;
do_div(rt_runtime_us, NSEC_PER_USEC);
return rt_runtime_us;
}
int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
{
u64 rt_runtime, rt_period;
if (rt_period_us > U64_MAX / NSEC_PER_USEC)
return -EINVAL;
rt_period = rt_period_us * NSEC_PER_USEC;
rt_runtime = tg->rt_bandwidth.rt_runtime;
return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
}
long sched_group_rt_period(struct task_group *tg)
{
u64 rt_period_us;
rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
do_div(rt_period_us, NSEC_PER_USEC);
return rt_period_us;
}
static int sched_rt_global_constraints(void)
{
int ret = 0;
mutex_lock(&rt_constraints_mutex);
ret = __rt_schedulable(NULL, 0, 0);
mutex_unlock(&rt_constraints_mutex);
return ret;
}
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;
return 1;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void)
{
unsigned long flags;
int i;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
rt_rq->rt_runtime = global_rt_runtime();
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
return 0;
}
#endif /* CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_validate(void)
{
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
((u64)sysctl_sched_rt_runtime *
NSEC_PER_USEC > max_rt_runtime)))
return -EINVAL;
return 0;
}
static void sched_rt_do_global(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
def_rt_bandwidth.rt_runtime = global_rt_runtime();
def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
}
int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int old_period, old_runtime;
static DEFINE_MUTEX(mutex);
int ret;
mutex_lock(&mutex);
old_period = sysctl_sched_rt_period;
old_runtime = sysctl_sched_rt_runtime;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write) {
ret = sched_rt_global_validate();
if (ret)
goto undo;
ret = sched_dl_global_validate();
if (ret)
goto undo;
ret = sched_rt_global_constraints();
if (ret)
goto undo;
sched_rt_do_global();
sched_dl_do_global();
}
if (0) {
undo:
sysctl_sched_rt_period = old_period;
sysctl_sched_rt_runtime = old_runtime;
}
mutex_unlock(&mutex);
return ret;
}
int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
int ret;
static DEFINE_MUTEX(mutex);
mutex_lock(&mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
/*
* Make sure that internally we keep jiffies.
* Also, writing zero resets the timeslice to default:
*/
if (!ret && write) {
sched_rr_timeslice =
sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
msecs_to_jiffies(sysctl_sched_rr_timeslice);
}
mutex_unlock(&mutex);
return ret;
}
#ifdef CONFIG_SCHED_DEBUG
void print_rt_stats(struct seq_file *m, int cpu)
{
rt_rq_iter_t iter;
struct rt_rq *rt_rq;
rcu_read_lock();
for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
print_rt_rq(m, cpu, rt_rq);
rcu_read_unlock();
}
#endif /* CONFIG_SCHED_DEBUG */