Files
android_kernel_xiaomi_sm8450/fs/eventpoll.c
Greg Kroah-Hartman 19057a6a6b Merge 5.10.4 into android12-5.10
Changes in 5.10.4
	hwmon: (k10temp) Remove support for displaying voltage and current on Zen CPUs
	drm/gma500: fix double free of gma_connector
	iio: adc: at91_adc: add Kconfig dep on the OF symbol and remove of_match_ptr()
	drm/aspeed: Fix Kconfig warning & subsequent build errors
	drm/mcde: Fix handling of platform_get_irq() error
	drm/tve200: Fix handling of platform_get_irq() error
	arm64: dts: renesas: hihope-rzg2-ex: Drop rxc-skew-ps from ethernet-phy node
	arm64: dts: renesas: cat875: Remove rxc-skew-ps from ethernet-phy node
	soc: renesas: rmobile-sysc: Fix some leaks in rmobile_init_pm_domains()
	soc: mediatek: Check if power domains can be powered on at boot time
	arm64: dts: mediatek: mt8183: fix gce incorrect mbox-cells value
	arm64: dts: ipq6018: update the reserved-memory node
	arm64: dts: qcom: sc7180: Fix one forgotten interconnect reference
	soc: qcom: geni: More properly switch to DMA mode
	Revert "i2c: i2c-qcom-geni: Fix DMA transfer race"
	RDMA/bnxt_re: Set queue pair state when being queried
	rtc: pcf2127: fix pcf2127_nvmem_read/write() returns
	RDMA/bnxt_re: Fix entry size during SRQ create
	selinux: fix error initialization in inode_doinit_with_dentry()
	ARM: dts: aspeed-g6: Fix the GPIO memory size
	ARM: dts: aspeed: s2600wf: Fix VGA memory region location
	RDMA/core: Fix error return in _ib_modify_qp()
	RDMA/rxe: Compute PSN windows correctly
	x86/mm/ident_map: Check for errors from ident_pud_init()
	ARM: p2v: fix handling of LPAE translation in BE mode
	RDMA/rtrs-clt: Remove destroy_con_cq_qp in case route resolving failed
	RDMA/rtrs-clt: Missing error from rtrs_rdma_conn_established
	RDMA/rtrs-srv: Don't guard the whole __alloc_srv with srv_mutex
	x86/apic: Fix x2apic enablement without interrupt remapping
	ASoC: qcom: fix unsigned int bitwidth compared to less than zero
	sched/deadline: Fix sched_dl_global_validate()
	sched: Reenable interrupts in do_sched_yield()
	drm/amdgpu: fix incorrect enum type
	crypto: talitos - Endianess in current_desc_hdr()
	crypto: talitos - Fix return type of current_desc_hdr()
	crypto: inside-secure - Fix sizeof() mismatch
	ASoC: sun4i-i2s: Fix lrck_period computation for I2S justified mode
	drm/msm: Add missing stub definition
	ARM: dts: aspeed: tiogapass: Remove vuart
	drm/amdgpu: fix build_coefficients() argument
	powerpc/64: Set up a kernel stack for secondaries before cpu_restore()
	spi: img-spfi: fix reference leak in img_spfi_resume
	f2fs: call f2fs_get_meta_page_retry for nat page
	RDMA/mlx5: Fix corruption of reg_pages in mlx5_ib_rereg_user_mr()
	perf test: Use generic event for expand_libpfm_events()
	drm/msm/dp: DisplayPort PHY compliance tests fixup
	drm/msm/dsi_pll_7nm: restore VCO rate during restore_state
	drm/msm/dsi_pll_10nm: restore VCO rate during restore_state
	drm/msm/dpu: fix clock scaling on non-sc7180 board
	spi: spi-mem: fix reference leak in spi_mem_access_start
	scsi: aacraid: Improve compat_ioctl handlers
	pinctrl: core: Add missing #ifdef CONFIG_GPIOLIB
	ASoC: pcm: DRAIN support reactivation
	drm/bridge: tpd12s015: Fix irq registering in tpd12s015_probe
	crypto: arm64/poly1305-neon - reorder PAC authentication with SP update
	crypto: arm/aes-neonbs - fix usage of cbc(aes) fallback
	crypto: caam - fix printing on xts fallback allocation error path
	selinux: fix inode_doinit_with_dentry() LABEL_INVALID error handling
	nl80211/cfg80211: fix potential infinite loop
	spi: stm32: fix reference leak in stm32_spi_resume
	bpf: Fix tests for local_storage
	x86/mce: Correct the detection of invalid notifier priorities
	drm/edid: Fix uninitialized variable in drm_cvt_modes()
	ath11k: Initialize complete alpha2 for regulatory change
	ath11k: Fix number of rules in filtered ETSI regdomain
	ath11k: fix wmi init configuration
	brcmfmac: Fix memory leak for unpaired brcmf_{alloc/free}
	arm64: dts: exynos: Include common syscon restart/poweroff for Exynos7
	arm64: dts: exynos: Correct psci compatible used on Exynos7
	drm/panel: simple: Add flags to boe_nv133fhm_n61
	Bluetooth: Fix null pointer dereference in hci_event_packet()
	Bluetooth: Fix: LL PRivacy BLE device fails to connect
	Bluetooth: hci_h5: fix memory leak in h5_close
	spi: stm32-qspi: fix reference leak in stm32 qspi operations
	spi: spi-ti-qspi: fix reference leak in ti_qspi_setup
	spi: mt7621: fix missing clk_disable_unprepare() on error in mt7621_spi_probe
	spi: tegra20-slink: fix reference leak in slink ops of tegra20
	spi: tegra20-sflash: fix reference leak in tegra_sflash_resume
	spi: tegra114: fix reference leak in tegra spi ops
	spi: bcm63xx-hsspi: fix missing clk_disable_unprepare() on error in bcm63xx_hsspi_resume
	spi: imx: fix reference leak in two imx operations
	ASoC: qcom: common: Fix refcounting in qcom_snd_parse_of()
	ath11k: Handle errors if peer creation fails
	mwifiex: fix mwifiex_shutdown_sw() causing sw reset failure
	drm/msm/a6xx: Clear shadow on suspend
	drm/msm/a5xx: Clear shadow on suspend
	firmware: tegra: fix strncpy()/strncat() confusion
	drm/msm/dp: return correct connection status after suspend
	drm/msm/dp: skip checking LINK_STATUS_UPDATED bit
	drm/msm/dp: do not notify audio subsystem if sink doesn't support audio
	selftests/run_kselftest.sh: fix dry-run typo
	selftest/bpf: Add missed ip6ip6 test back
	ASoC: wm8994: Fix PM disable depth imbalance on error
	ASoC: wm8998: Fix PM disable depth imbalance on error
	spi: sprd: fix reference leak in sprd_spi_remove
	virtiofs fix leak in setup
	ASoC: arizona: Fix a wrong free in wm8997_probe
	RDMa/mthca: Work around -Wenum-conversion warning
	ASoC: SOF: Intel: fix Kconfig dependency for SND_INTEL_DSP_CONFIG
	arm64: dts: ti: k3-am65*/j721e*: Fix unit address format error for dss node
	MIPS: BCM47XX: fix kconfig dependency bug for BCM47XX_BCMA
	drm/amdgpu: fix compute queue priority if num_kcq is less than 4
	soc: ti: omap-prm: Do not check rstst bit on deassert if already deasserted
	crypto: Kconfig - CRYPTO_MANAGER_EXTRA_TESTS requires the manager
	crypto: qat - fix status check in qat_hal_put_rel_rd_xfer()
	firmware: arm_scmi: Fix missing destroy_workqueue()
	drm/udl: Fix missing error code in udl_handle_damage()
	staging: greybus: codecs: Fix reference counter leak in error handling
	staging: gasket: interrupt: fix the missed eventfd_ctx_put() in gasket_interrupt.c
	scripts: kernel-doc: Restore anonymous enum parsing
	drm/amdkfd: Put ACPI table after using it
	ionic: use mc sync for multicast filters
	ionic: flatten calls to ionic_lif_rx_mode
	ionic: change set_rx_mode from_ndo to can_sleep
	media: tm6000: Fix sizeof() mismatches
	media: platform: add missing put_device() call in mtk_jpeg_clk_init()
	media: mtk-vcodec: add missing put_device() call in mtk_vcodec_init_dec_pm()
	media: mtk-vcodec: add missing put_device() call in mtk_vcodec_release_dec_pm()
	media: mtk-vcodec: add missing put_device() call in mtk_vcodec_init_enc_pm()
	media: v4l2-fwnode: Return -EINVAL for invalid bus-type
	media: v4l2-fwnode: v4l2_fwnode_endpoint_parse caller must init vep argument
	media: ov5640: fix support of BT656 bus mode
	media: staging: rkisp1: cap: fix runtime PM imbalance on error
	media: cedrus: fix reference leak in cedrus_start_streaming
	media: platform: add missing put_device() call in mtk_jpeg_probe() and mtk_jpeg_remove()
	media: venus: core: change clk enable and disable order in resume and suspend
	media: venus: core: vote for video-mem path
	media: venus: core: vote with average bandwidth and peak bandwidth as zero
	RDMA/cma: Add missing error handling of listen_id
	ASoC: meson: fix COMPILE_TEST error
	spi: dw: fix build error by selecting MULTIPLEXER
	scsi: core: Fix VPD LUN ID designator priorities
	media: venus: put dummy vote on video-mem path after last session release
	media: solo6x10: fix missing snd_card_free in error handling case
	video: fbdev: atmel_lcdfb: fix return error code in atmel_lcdfb_of_init()
	mmc: sdhci: tegra: fix wrong unit with busy_timeout
	drm/omap: dmm_tiler: fix return error code in omap_dmm_probe()
	drm/meson: Free RDMA resources after tearing down DRM
	drm/meson: Unbind all connectors on module removal
	drm/meson: dw-hdmi: Register a callback to disable the regulator
	drm/meson: dw-hdmi: Ensure that clocks are enabled before touching the TOP registers
	ASoC: intel: SND_SOC_INTEL_KEEMBAY should depend on ARCH_KEEMBAY
	iommu/vt-d: include conditionally on CONFIG_INTEL_IOMMU_SVM
	Input: ads7846 - fix race that causes missing releases
	Input: ads7846 - fix integer overflow on Rt calculation
	Input: ads7846 - fix unaligned access on 7845
	bus: mhi: core: Remove double locking from mhi_driver_remove()
	bus: mhi: core: Fix null pointer access when parsing MHI configuration
	usb/max3421: fix return error code in max3421_probe()
	spi: mxs: fix reference leak in mxs_spi_probe
	selftests/bpf: Fix broken riscv build
	powerpc: Avoid broken GCC __attribute__((optimize))
	powerpc/feature: Fix CPU_FTRS_ALWAYS by removing CPU_FTRS_GENERIC_32
	ARM: dts: tacoma: Fix node vs reg mismatch for flash memory
	Revert "powerpc/pseries/hotplug-cpu: Remove double free in error path"
	powerpc/powernv/sriov: fix unsigned int win compared to less than zero
	mfd: htc-i2cpld: Add the missed i2c_put_adapter() in htcpld_register_chip_i2c()
	mfd: MFD_SL28CPLD should depend on ARCH_LAYERSCAPE
	mfd: stmfx: Fix dev_err_probe() call in stmfx_chip_init()
	mfd: cpcap: Fix interrupt regression with regmap clear_ack
	EDAC/mce_amd: Use struct cpuinfo_x86.cpu_die_id for AMD NodeId
	scsi: ufs: Avoid to call REQ_CLKS_OFF to CLKS_OFF
	scsi: ufs: Fix clkgating on/off
	rcu: Allow rcu_irq_enter_check_tick() from NMI
	rcu,ftrace: Fix ftrace recursion
	rcu/tree: Defer kvfree_rcu() allocation to a clean context
	crypto: crypto4xx - Replace bitwise OR with logical OR in crypto4xx_build_pd
	crypto: omap-aes - Fix PM disable depth imbalance in omap_aes_probe
	crypto: sun8i-ce - fix two error path's memory leak
	spi: fix resource leak for drivers without .remove callback
	drm/meson: dw-hdmi: Disable clocks on driver teardown
	drm/meson: dw-hdmi: Enable the iahb clock early enough
	PCI: Disable MSI for Pericom PCIe-USB adapter
	PCI: brcmstb: Initialize "tmp" before use
	soc: ti: knav_qmss: fix reference leak in knav_queue_probe
	soc: ti: Fix reference imbalance in knav_dma_probe
	drivers: soc: ti: knav_qmss_queue: Fix error return code in knav_queue_probe
	soc: qcom: initialize local variable
	arm64: dts: qcom: sm8250: correct compatible for sm8250-mtp
	arm64: dts: qcom: msm8916-samsung-a2015: Disable muic i2c pin bias
	Input: omap4-keypad - fix runtime PM error handling
	clk: meson: Kconfig: fix dependency for G12A
	staging: mfd: hi6421-spmi-pmic: fix error return code in hi6421_spmi_pmic_probe()
	ath11k: Fix the rx_filter flag setting for peer rssi stats
	RDMA/cxgb4: Validate the number of CQEs
	soundwire: Fix DEBUG_LOCKS_WARN_ON for uninitialized attribute
	pinctrl: sunxi: fix irq bank map for the Allwinner A100 pin controller
	memstick: fix a double-free bug in memstick_check
	ARM: dts: at91: sam9x60: add pincontrol for USB Host
	ARM: dts: at91: sama5d4_xplained: add pincontrol for USB Host
	ARM: dts: at91: sama5d3_xplained: add pincontrol for USB Host
	mmc: pxamci: Fix error return code in pxamci_probe
	brcmfmac: fix error return code in brcmf_cfg80211_connect()
	orinoco: Move context allocation after processing the skb
	qtnfmac: fix error return code in qtnf_pcie_probe()
	rsi: fix error return code in rsi_reset_card()
	cw1200: fix missing destroy_workqueue() on error in cw1200_init_common
	dmaengine: mv_xor_v2: Fix error return code in mv_xor_v2_probe()
	arm64: dts: qcom: sdm845: Limit ipa iommu streams
	leds: netxbig: add missing put_device() call in netxbig_leds_get_of_pdata()
	leds: lp50xx: Fix an error handling path in 'lp50xx_probe_dt()'
	leds: turris-omnia: check for LED_COLOR_ID_RGB instead LED_COLOR_ID_MULTI
	arm64: tegra: Fix DT binding for IO High Voltage entry
	RDMA/cma: Fix deadlock on &lock in rdma_cma_listen_on_all() error unwind
	soundwire: qcom: Fix build failure when slimbus is module
	drm/imx/dcss: fix rotations for Vivante tiled formats
	media: siano: fix memory leak of debugfs members in smsdvb_hotplug
	platform/x86: mlx-platform: Remove PSU EEPROM from default platform configuration
	platform/x86: mlx-platform: Remove PSU EEPROM from MSN274x platform configuration
	arm64: dts: qcom: sc7180: limit IPA iommu streams
	RDMA/hns: Only record vlan info for HIP08
	RDMA/hns: Fix missing fields in address vector
	RDMA/hns: Avoid setting loopback indicator when smac is same as dmac
	serial: 8250-mtk: Fix reference leak in mtk8250_probe
	samples: bpf: Fix lwt_len_hist reusing previous BPF map
	media: imx214: Fix stop streaming
	mips: cdmm: fix use-after-free in mips_cdmm_bus_discover
	media: max2175: fix max2175_set_csm_mode() error code
	slimbus: qcom-ngd-ctrl: Avoid sending power requests without QMI
	RDMA/core: Track device memory MRs
	drm/mediatek: Use correct aliases name for ovl
	HSI: omap_ssi: Don't jump to free ID in ssi_add_controller()
	ARM: dts: Remove non-existent i2c1 from 98dx3236
	arm64: dts: armada-3720-turris-mox: update ethernet-phy handle name
	power: supply: bq25890: Use the correct range for IILIM register
	arm64: dts: rockchip: Set dr_mode to "host" for OTG on rk3328-roc-cc
	power: supply: max17042_battery: Fix current_{avg,now} hiding with no current sense
	power: supply: axp288_charger: Fix HP Pavilion x2 10 DMI matching
	power: supply: bq24190_charger: fix reference leak
	genirq/irqdomain: Don't try to free an interrupt that has no mapping
	arm64: dts: ls1028a: fix ENETC PTP clock input
	arm64: dts: ls1028a: fix FlexSPI clock input
	arm64: dts: freescale: sl28: combine SPI MTD partitions
	phy: tegra: xusb: Fix usb_phy device driver field
	arm64: dts: qcom: c630: Polish i2c-hid devices
	arm64: dts: qcom: c630: Fix pinctrl pins properties
	PCI: Bounds-check command-line resource alignment requests
	PCI: Fix overflow in command-line resource alignment requests
	PCI: iproc: Fix out-of-bound array accesses
	PCI: iproc: Invalidate correct PAXB inbound windows
	arm64: dts: meson: fix spi-max-frequency on Khadas VIM2
	arm64: dts: meson-sm1: fix typo in opp table
	soc: amlogic: canvas: add missing put_device() call in meson_canvas_get()
	scsi: hisi_sas: Fix up probe error handling for v3 hw
	scsi: pm80xx: Do not sleep in atomic context
	spi: spi-fsl-dspi: Use max_native_cs instead of num_chipselect to set SPI_MCR
	ARM: dts: at91: at91sam9rl: fix ADC triggers
	RDMA/hns: Fix 0-length sge calculation error
	RDMA/hns: Bugfix for calculation of extended sge
	mailbox: arm_mhu_db: Fix mhu_db_shutdown by replacing kfree with devm_kfree
	soundwire: master: use pm_runtime_set_active() on add
	platform/x86: dell-smbios-base: Fix error return code in dell_smbios_init
	ASoC: Intel: Boards: tgl_max98373: update TDM slot_width
	media: max9271: Fix GPIO enable/disable
	media: rdacm20: Enable GPIO1 explicitly
	media: i2c: imx219: Selection compliance fixes
	ath11k: Don't cast ath11k_skb_cb to ieee80211_tx_info.control
	ath11k: Reset ath11k_skb_cb before setting new flags
	ath11k: Fix an error handling path
	ath10k: Fix the parsing error in service available event
	ath10k: Fix an error handling path
	ath10k: Release some resources in an error handling path
	SUNRPC: rpc_wake_up() should wake up tasks in the correct order
	NFSv4.2: condition READDIR's mask for security label based on LSM state
	SUNRPC: xprt_load_transport() needs to support the netid "rdma6"
	NFSv4: Fix the alignment of page data in the getdeviceinfo reply
	net: sunrpc: Fix 'snprintf' return value check in 'do_xprt_debugfs'
	lockd: don't use interval-based rebinding over TCP
	NFS: switch nfsiod to be an UNBOUND workqueue.
	selftests/seccomp: Update kernel config
	vfio-pci: Use io_remap_pfn_range() for PCI IO memory
	hwmon: (ina3221) Fix PM usage counter unbalance in ina3221_write_enable
	f2fs: fix double free of unicode map
	media: tvp5150: Fix wrong return value of tvp5150_parse_dt()
	media: saa7146: fix array overflow in vidioc_s_audio()
	powerpc/perf: Fix crash with is_sier_available when pmu is not set
	powerpc/64: Fix an EMIT_BUG_ENTRY in head_64.S
	powerpc/xmon: Fix build failure for 8xx
	powerpc/perf: Fix to update radix_scope_qual in power10
	powerpc/perf: Update the PMU group constraints for l2l3 events in power10
	powerpc/perf: Fix the PMU group constraints for threshold events in power10
	clocksource/drivers/orion: Add missing clk_disable_unprepare() on error path
	clocksource/drivers/cadence_ttc: Fix memory leak in ttc_setup_clockevent()
	clocksource/drivers/ingenic: Fix section mismatch
	clocksource/drivers/riscv: Make RISCV_TIMER depends on RISCV_SBI
	arm64: mte: fix prctl(PR_GET_TAGGED_ADDR_CTRL) if TCF0=NONE
	iio: hrtimer-trigger: Mark hrtimer to expire in hard interrupt context
	libbpf: Sanitise map names before pinning
	ARM: dts: at91: sam9x60ek: remove bypass property
	ARM: dts: at91: sama5d2: map securam as device
	scripts: kernel-doc: fix parsing function-like typedefs
	bpf: Fix bpf_put_raw_tracepoint()'s use of __module_address()
	selftests/bpf: Fix invalid use of strncat in test_sockmap
	pinctrl: falcon: add missing put_device() call in pinctrl_falcon_probe()
	soc: rockchip: io-domain: Fix error return code in rockchip_iodomain_probe()
	arm64: dts: rockchip: Fix UART pull-ups on rk3328
	memstick: r592: Fix error return in r592_probe()
	MIPS: Don't round up kernel sections size for memblock_add()
	mt76: mt7663s: fix a possible ple quota underflow
	mt76: mt7915: set fops_sta_stats.owner to THIS_MODULE
	mt76: set fops_tx_stats.owner to THIS_MODULE
	mt76: dma: fix possible deadlock running mt76_dma_cleanup
	net/mlx5: Properly convey driver version to firmware
	mt76: fix memory leak if device probing fails
	mt76: fix tkip configuration for mt7615/7663 devices
	ASoC: jz4740-i2s: add missed checks for clk_get()
	ASoC: q6afe-clocks: Add missing parent clock rate
	dm ioctl: fix error return code in target_message
	ASoC: cros_ec_codec: fix uninitialized memory read
	ASoC: atmel: mchp-spdifrx needs COMMON_CLK
	ASoC: qcom: fix QDSP6 dependencies, attempt #3
	phy: mediatek: allow compile-testing the hdmi phy
	phy: renesas: rcar-gen3-usb2: disable runtime pm in case of failure
	memory: ti-emif-sram: only build for ARMv7
	memory: jz4780_nemc: Fix potential NULL dereference in jz4780_nemc_probe()
	drm/msm: a5xx: Make preemption reset case reentrant
	drm/msm: add IOMMU_SUPPORT dependency
	clocksource/drivers/arm_arch_timer: Use stable count reader in erratum sne
	clocksource/drivers/arm_arch_timer: Correct fault programming of CNTKCTL_EL1.EVNTI
	cpufreq: ap806: Add missing MODULE_DEVICE_TABLE
	cpufreq: highbank: Add missing MODULE_DEVICE_TABLE
	cpufreq: mediatek: Add missing MODULE_DEVICE_TABLE
	cpufreq: qcom: Add missing MODULE_DEVICE_TABLE
	cpufreq: st: Add missing MODULE_DEVICE_TABLE
	cpufreq: sun50i: Add missing MODULE_DEVICE_TABLE
	cpufreq: loongson1: Add missing MODULE_ALIAS
	cpufreq: scpi: Add missing MODULE_ALIAS
	cpufreq: vexpress-spc: Add missing MODULE_ALIAS
	cpufreq: imx: fix NVMEM_IMX_OCOTP dependency
	macintosh/adb-iop: Always wait for reply message from IOP
	macintosh/adb-iop: Send correct poll command
	staging: bcm2835: fix vchiq_mmal dependencies
	staging: greybus: audio: Fix possible leak free widgets in gbaudio_dapm_free_controls
	spi: dw: Fix error return code in dw_spi_bt1_probe()
	Bluetooth: btusb: Add the missed release_firmware() in btusb_mtk_setup_firmware()
	Bluetooth: btmtksdio: Add the missed release_firmware() in mtk_setup_firmware()
	Bluetooth: sco: Fix crash when using BT_SNDMTU/BT_RCVMTU option
	block/rnbd-clt: Dynamically alloc buffer for pathname & blk_symlink_name
	block/rnbd: fix a null pointer dereference on dev->blk_symlink_name
	Bluetooth: btusb: Fix detection of some fake CSR controllers with a bcdDevice val of 0x0134
	platform/x86: intel-vbtn: Fix SW_TABLET_MODE always reporting 1 on some HP x360 models
	adm8211: fix error return code in adm8211_probe()
	mtd: spi-nor: sst: fix BPn bits for the SST25VF064C
	mtd: spi-nor: ignore errors in spi_nor_unlock_all()
	mtd: spi-nor: atmel: remove global protection flag
	mtd: spi-nor: atmel: fix unlock_all() for AT25FS010/040
	arm64: dts: meson: g12b: odroid-n2: fix PHY deassert timing requirements
	arm64: dts: meson: fix PHY deassert timing requirements
	ARM: dts: meson: fix PHY deassert timing requirements
	arm64: dts: meson: g12a: x96-max: fix PHY deassert timing requirements
	arm64: dts: meson: g12b: w400: fix PHY deassert timing requirements
	clk: fsl-sai: fix memory leak
	scsi: qedi: Fix missing destroy_workqueue() on error in __qedi_probe
	scsi: pm80xx: Fix error return in pm8001_pci_probe()
	scsi: iscsi: Fix inappropriate use of put_device()
	seq_buf: Avoid type mismatch for seq_buf_init
	scsi: fnic: Fix error return code in fnic_probe()
	platform/x86: mlx-platform: Fix item counter assignment for MSN2700, MSN24xx systems
	platform/x86: mlx-platform: Fix item counter assignment for MSN2700/ComEx system
	ARM: 9030/1: entry: omit FP emulation for UND exceptions taken in kernel mode
	powerpc/pseries/hibernation: drop pseries_suspend_begin() from suspend ops
	powerpc/pseries/hibernation: remove redundant cacheinfo update
	powerpc/powermac: Fix low_sleep_handler with CONFIG_VMAP_STACK
	drm/mediatek: avoid dereferencing a null hdmi_phy on an error message
	ASoC: amd: change clk_get() to devm_clk_get() and add missed checks
	coresight: remove broken __exit annotations
	ASoC: max98390: Fix error codes in max98390_dsm_init()
	powerpc/mm: sanity_check_fault() should work for all, not only BOOK3S
	usb: ehci-omap: Fix PM disable depth umbalance in ehci_hcd_omap_probe
	usb: oxu210hp-hcd: Fix memory leak in oxu_create
	speakup: fix uninitialized flush_lock
	nfsd: Fix message level for normal termination
	NFSD: Fix 5 seconds delay when doing inter server copy
	nfs_common: need lock during iterate through the list
	x86/kprobes: Restore BTF if the single-stepping is cancelled
	scsi: qla2xxx: Fix FW initialization error on big endian machines
	scsi: qla2xxx: Fix N2N and NVMe connect retry failure
	platform/chrome: cros_ec_spi: Don't overwrite spi::mode
	misc: pci_endpoint_test: fix return value of error branch
	bus: fsl-mc: add back accidentally dropped error check
	bus: fsl-mc: fix error return code in fsl_mc_object_allocate()
	fsi: Aspeed: Add mutex to protect HW access
	s390/cio: fix use-after-free in ccw_device_destroy_console
	iwlwifi: dbg-tlv: fix old length in is_trig_data_contained()
	iwlwifi: mvm: hook up missing RX handlers
	erofs: avoid using generic_block_bmap
	clk: renesas: r8a779a0: Fix R and OSC clocks
	can: m_can: m_can_config_endisable(): remove double clearing of clock stop request bit
	powerpc/sstep: Emulate prefixed instructions only when CPU_FTR_ARCH_31 is set
	powerpc/sstep: Cover new VSX instructions under CONFIG_VSX
	slimbus: qcom: fix potential NULL dereference in qcom_slim_prg_slew()
	ALSA: hda/hdmi: fix silent stream for first playback to DP
	RDMA/core: Do not indicate device ready when device enablement fails
	RDMA/uverbs: Fix incorrect variable type
	remoteproc/mediatek: change MT8192 CFG register base
	remoteproc/mtk_scp: surround DT device IDs with CONFIG_OF
	remoteproc: q6v5-mss: fix error handling in q6v5_pds_enable
	remoteproc: qcom: fix reference leak in adsp_start
	remoteproc: qcom: pas: fix error handling in adsp_pds_enable
	remoteproc: k3-dsp: Fix return value check in k3_dsp_rproc_of_get_memories()
	remoteproc: qcom: Fix potential NULL dereference in adsp_init_mmio()
	remoteproc/mediatek: unprepare clk if scp_before_load fails
	clk: qcom: gcc-sc7180: Use floor ops for sdcc clks
	clk: tegra: Fix duplicated SE clock entry
	mtd: rawnand: gpmi: fix reference count leak in gpmi ops
	mtd: rawnand: meson: Fix a resource leak in init
	mtd: rawnand: gpmi: Fix the random DMA timeout issue
	samples/bpf: Fix possible hang in xdpsock with multiple threads
	fs: Handle I_DONTCACHE in iput_final() instead of generic_drop_inode()
	extcon: max77693: Fix modalias string
	crypto: atmel-i2c - select CONFIG_BITREVERSE
	mac80211: don't set set TDLS STA bandwidth wider than possible
	mac80211: fix a mistake check for rx_stats update
	ASoC: wm_adsp: remove "ctl" from list on error in wm_adsp_create_control()
	irqchip/alpine-msi: Fix freeing of interrupts on allocation error path
	irqchip/ti-sci-inta: Fix printing of inta id on probe success
	irqchip/ti-sci-intr: Fix freeing of irqs
	dmaengine: ti: k3-udma: Correct normal channel offset when uchan_cnt is not 0
	RDMA/hns: Limit the length of data copied between kernel and userspace
	RDMA/hns: Normalization the judgment of some features
	RDMA/hns: Do shift on traffic class when using RoCEv2
	gpiolib: irq hooks: fix recursion in gpiochip_irq_unmask
	ath11k: Fix incorrect tlvs in scan start command
	irqchip/qcom-pdc: Fix phantom irq when changing between rising/falling
	watchdog: armada_37xx: Add missing dependency on HAS_IOMEM
	watchdog: sirfsoc: Add missing dependency on HAS_IOMEM
	watchdog: sprd: remove watchdog disable from resume fail path
	watchdog: sprd: check busy bit before new loading rather than after that
	watchdog: Fix potential dereferencing of null pointer
	ubifs: Fix error return code in ubifs_init_authentication()
	um: Monitor error events in IRQ controller
	um: tty: Fix handling of close in tty lines
	um: chan_xterm: Fix fd leak
	sunrpc: fix xs_read_xdr_buf for partial pages receive
	RDMA/mlx5: Fix MR cache memory leak
	RDMA/cma: Don't overwrite sgid_attr after device is released
	nfc: s3fwrn5: Release the nfc firmware
	drm: mxsfb: Silence -EPROBE_DEFER while waiting for bridge
	powerpc/perf: Fix Threshold Event Counter Multiplier width for P10
	powerpc/ps3: use dma_mapping_error()
	perf test: Fix metric parsing test
	drm/amdgpu: fix regression in vbios reservation handling on headless
	mm/gup: reorganize internal_get_user_pages_fast()
	mm/gup: prevent gup_fast from racing with COW during fork
	mm/gup: combine put_compound_head() and unpin_user_page()
	mm: memcg/slab: fix return of child memcg objcg for root memcg
	mm: memcg/slab: fix use after free in obj_cgroup_charge
	mm/rmap: always do TTU_IGNORE_ACCESS
	sparc: fix handling of page table constructor failure
	mm/vmalloc: Fix unlock order in s_stop()
	mm/vmalloc.c: fix kasan shadow poisoning size
	mm,memory_failure: always pin the page in madvise_inject_error
	hugetlb: fix an error code in hugetlb_reserve_pages()
	mm: don't wake kswapd prematurely when watermark boosting is disabled
	proc: fix lookup in /proc/net subdirectories after setns(2)
	checkpatch: fix unescaped left brace
	s390/test_unwind: fix CALL_ON_STACK tests
	lan743x: fix rx_napi_poll/interrupt ping-pong
	ice, xsk: clear the status bits for the next_to_use descriptor
	i40e, xsk: clear the status bits for the next_to_use descriptor
	net: dsa: qca: ar9331: fix sleeping function called from invalid context bug
	dpaa2-eth: fix the size of the mapped SGT buffer
	net: bcmgenet: Fix a resource leak in an error handling path in the probe functin
	net: mscc: ocelot: Fix a resource leak in the error handling path of the probe function
	net: allwinner: Fix some resources leak in the error handling path of the probe and in the remove function
	block/rnbd-clt: Get rid of warning regarding size argument in strlcpy
	block/rnbd-clt: Fix possible memleak
	NFS/pNFS: Fix a typo in ff_layout_resend_pnfs_read()
	net: korina: fix return value
	devlink: use _BITUL() macro instead of BIT() in the UAPI header
	libnvdimm/label: Return -ENXIO for no slot in __blk_label_update
	powerpc/32s: Fix cleanup_cpu_mmu_context() compile bug
	watchdog: qcom: Avoid context switch in restart handler
	watchdog: coh901327: add COMMON_CLK dependency
	clk: ti: Fix memleak in ti_fapll_synth_setup
	pwm: zx: Add missing cleanup in error path
	pwm: lp3943: Dynamically allocate PWM chip base
	pwm: imx27: Fix overflow for bigger periods
	pwm: sun4i: Remove erroneous else branch
	io_uring: cancel only requests of current task
	tools build: Add missing libcap to test-all.bin target
	perf record: Fix memory leak when using '--user-regs=?' to list registers
	qlcnic: Fix error code in probe
	nfp: move indirect block cleanup to flower app stop callback
	vdpa/mlx5: Use write memory barrier after updating CQ index
	virtio_ring: Cut and paste bugs in vring_create_virtqueue_packed()
	virtio_net: Fix error code in probe()
	virtio_ring: Fix two use after free bugs
	vhost scsi: fix error return code in vhost_scsi_set_endpoint()
	epoll: check for events when removing a timed out thread from the wait queue
	clk: bcm: dvp: Add MODULE_DEVICE_TABLE()
	clk: at91: sama7g5: fix compilation error
	clk: at91: sam9x60: remove atmel,osc-bypass support
	clk: s2mps11: Fix a resource leak in error handling paths in the probe function
	clk: sunxi-ng: Make sure divider tables have sentinel
	clk: vc5: Use "idt,voltage-microvolt" instead of "idt,voltage-microvolts"
	kconfig: fix return value of do_error_if()
	powerpc/boot: Fix build of dts/fsl
	powerpc/smp: Add __init to init_big_cores()
	ARM: 9044/1: vfp: use undef hook for VFP support detection
	ARM: 9036/1: uncompress: Fix dbgadtb size parameter name
	perf probe: Fix memory leak when synthesizing SDT probes
	io_uring: fix racy IOPOLL flush overflow
	io_uring: cancel reqs shouldn't kill overflow list
	Smack: Handle io_uring kernel thread privileges
	proc mountinfo: make splice available again
	io_uring: fix io_cqring_events()'s noflush
	io_uring: fix racy IOPOLL completions
	io_uring: always let io_iopoll_complete() complete polled io
	vfio/pci: Move dummy_resources_list init in vfio_pci_probe()
	vfio/pci/nvlink2: Do not attempt NPU2 setup on POWER8NVL NPU
	media: gspca: Fix memory leak in probe
	io_uring: fix io_wqe->work_list corruption
	io_uring: fix 0-iov read buffer select
	io_uring: hold uring_lock while completing failed polled io in io_wq_submit_work()
	io_uring: fix ignoring xa_store errors
	io_uring: fix double io_uring free
	io_uring: make ctx cancel on exit targeted to actual ctx
	media: sunxi-cir: ensure IR is handled when it is continuous
	media: netup_unidvb: Don't leak SPI master in probe error path
	media: ipu3-cio2: Remove traces of returned buffers
	media: ipu3-cio2: Return actual subdev format
	media: ipu3-cio2: Serialise access to pad format
	media: ipu3-cio2: Validate mbus format in setting subdev format
	media: ipu3-cio2: Make the field on subdev format V4L2_FIELD_NONE
	Input: cyapa_gen6 - fix out-of-bounds stack access
	ALSA: hda/ca0132 - Change Input Source enum strings.
	ACPI: NFIT: Fix input validation of bus-family
	PM: ACPI: PCI: Drop acpi_pm_set_bridge_wakeup()
	Revert "ACPI / resources: Use AE_CTRL_TERMINATE to terminate resources walks"
	ACPI: PNP: compare the string length in the matching_id()
	ALSA: hda: Fix regressions on clear and reconfig sysfs
	ALSA: hda/ca0132 - Fix AE-5 rear headphone pincfg.
	ALSA: hda/realtek: make bass spk volume adjustable on a yoga laptop
	ALSA: hda/realtek - Enable headset mic of ASUS X430UN with ALC256
	ALSA: hda/realtek - Enable headset mic of ASUS Q524UQK with ALC255
	ALSA: hda/realtek - Add supported for more Lenovo ALC285 Headset Button
	ALSA: pcm: oss: Fix a few more UBSAN fixes
	ALSA/hda: apply jack fixup for the Acer Veriton N4640G/N6640G/N2510G
	ALSA: hda/realtek: Add quirk for MSI-GP73
	ALSA: hda/realtek: Apply jack fixup for Quanta NL3
	ALSA: hda/realtek: Remove dummy lineout on Acer TravelMate P648/P658
	ALSA: hda/realtek - Supported Dell fixed type headset
	ALSA: usb-audio: Add VID to support native DSD reproduction on FiiO devices
	ALSA: usb-audio: Disable sample read check if firmware doesn't give back
	ALSA: usb-audio: Add alias entry for ASUS PRIME TRX40 PRO-S
	ALSA: core: memalloc: add page alignment for iram
	s390/smp: perform initial CPU reset also for SMT siblings
	s390/kexec_file: fix diag308 subcode when loading crash kernel
	s390/idle: add missing mt_cycles calculation
	s390/idle: fix accounting with machine checks
	s390/dasd: fix hanging device offline processing
	s390/dasd: prevent inconsistent LCU device data
	s390/dasd: fix list corruption of pavgroup group list
	s390/dasd: fix list corruption of lcu list
	binder: add flag to clear buffer on txn complete
	ASoC: cx2072x: Fix doubly definitions of Playback and Capture streams
	ASoC: AMD Renoir - add DMI table to avoid the ACP mic probe (broken BIOS)
	ASoC: AMD Raven/Renoir - fix the PCI probe (PCI revision)
	staging: comedi: mf6x4: Fix AI end-of-conversion detection
	z3fold: simplify freeing slots
	z3fold: stricter locking and more careful reclaim
	perf/x86/intel: Add event constraint for CYCLE_ACTIVITY.STALLS_MEM_ANY
	perf/x86/intel: Fix rtm_abort_event encoding on Ice Lake
	perf/x86/intel/lbr: Fix the return type of get_lbr_cycles()
	powerpc/perf: Exclude kernel samples while counting events in user space.
	cpufreq: intel_pstate: Use most recent guaranteed performance values
	crypto: ecdh - avoid unaligned accesses in ecdh_set_secret()
	crypto: arm/aes-ce - work around Cortex-A57/A72 silion errata
	m68k: Fix WARNING splat in pmac_zilog driver
	Documentation: seqlock: s/LOCKTYPE/LOCKNAME/g
	EDAC/i10nm: Use readl() to access MMIO registers
	EDAC/amd64: Fix PCI component registration
	cpuset: fix race between hotplug work and later CPU offline
	dyndbg: fix use before null check
	USB: serial: mos7720: fix parallel-port state restore
	USB: serial: digi_acceleport: fix write-wakeup deadlocks
	USB: serial: keyspan_pda: fix dropped unthrottle interrupts
	USB: serial: keyspan_pda: fix write deadlock
	USB: serial: keyspan_pda: fix stalled writes
	USB: serial: keyspan_pda: fix write-wakeup use-after-free
	USB: serial: keyspan_pda: fix tx-unthrottle use-after-free
	USB: serial: keyspan_pda: fix write unthrottling
	btrfs: do not shorten unpin len for caching block groups
	btrfs: update last_byte_to_unpin in switch_commit_roots
	btrfs: fix race when defragmenting leads to unnecessary IO
	ext4: fix an IS_ERR() vs NULL check
	ext4: fix a memory leak of ext4_free_data
	ext4: fix deadlock with fs freezing and EA inodes
	ext4: don't remount read-only with errors=continue on reboot
	RISC-V: Fix usage of memblock_enforce_memory_limit
	arm64: dts: ti: k3-am65: mark dss as dma-coherent
	arm64: dts: marvell: keep SMMU disabled by default for Armada 7040 and 8040
	KVM: arm64: Introduce handling of AArch32 TTBCR2 traps
	KVM: x86: reinstate vendor-agnostic check on SPEC_CTRL cpuid bits
	KVM: SVM: Remove the call to sev_platform_status() during setup
	iommu/arm-smmu: Allow implementation specific write_s2cr
	iommu/arm-smmu-qcom: Read back stream mappings
	iommu/arm-smmu-qcom: Implement S2CR quirk
	ARM: dts: pandaboard: fix pinmux for gpio user button of Pandaboard ES
	ARM: dts: at91: sama5d2: fix CAN message ram offset and size
	ARM: tegra: Populate OPP table for Tegra20 Ventana
	xprtrdma: Fix XDRBUF_SPARSE_PAGES support
	powerpc/32: Fix vmap stack - Properly set r1 before activating MMU on syscall too
	powerpc: Fix incorrect stw{, ux, u, x} instructions in __set_pte_at
	powerpc/rtas: Fix typo of ibm,open-errinjct in RTAS filter
	powerpc/bitops: Fix possible undefined behaviour with fls() and fls64()
	powerpc/feature: Add CPU_FTR_NOEXECUTE to G2_LE
	powerpc/xmon: Change printk() to pr_cont()
	powerpc/8xx: Fix early debug when SMC1 is relocated
	powerpc/mm: Fix verification of MMU_FTR_TYPE_44x
	powerpc/powernv/npu: Do not attempt NPU2 setup on POWER8NVL NPU
	powerpc/powernv/memtrace: Don't leak kernel memory to user space
	powerpc/powernv/memtrace: Fix crashing the kernel when enabling concurrently
	ovl: make ioctl() safe
	ima: Don't modify file descriptor mode on the fly
	um: Remove use of asprinf in umid.c
	um: Fix time-travel mode
	ceph: fix race in concurrent __ceph_remove_cap invocations
	SMB3: avoid confusing warning message on mount to Azure
	SMB3.1.1: remove confusing mount warning when no SPNEGO info on negprot rsp
	SMB3.1.1: do not log warning message if server doesn't populate salt
	ubifs: wbuf: Don't leak kernel memory to flash
	jffs2: Fix GC exit abnormally
	jffs2: Fix ignoring mounting options problem during remounting
	fsnotify: generalize handle_inode_event()
	inotify: convert to handle_inode_event() interface
	fsnotify: fix events reported to watching parent and child
	jfs: Fix array index bounds check in dbAdjTree
	drm/panfrost: Fix job timeout handling
	drm/panfrost: Move the GPU reset bits outside the timeout handler
	platform/x86: mlx-platform: remove an unused variable
	drm/amdgpu: only set DP subconnector type on DP and eDP connectors
	drm/amd/display: Fix memory leaks in S3 resume
	drm/dp_aux_dev: check aux_dev before use in drm_dp_aux_dev_get_by_minor()
	drm/i915: Fix mismatch between misplaced vma check and vma insert
	iio: ad_sigma_delta: Don't put SPI transfer buffer on the stack
	spi: pxa2xx: Fix use-after-free on unbind
	spi: spi-sh: Fix use-after-free on unbind
	spi: atmel-quadspi: Fix use-after-free on unbind
	spi: spi-mtk-nor: Don't leak SPI master in probe error path
	spi: ar934x: Don't leak SPI master in probe error path
	spi: davinci: Fix use-after-free on unbind
	spi: fsl: fix use of spisel_boot signal on MPC8309
	spi: gpio: Don't leak SPI master in probe error path
	spi: mxic: Don't leak SPI master in probe error path
	spi: npcm-fiu: Disable clock in probe error path
	spi: pic32: Don't leak DMA channels in probe error path
	spi: rb4xx: Don't leak SPI master in probe error path
	spi: rpc-if: Fix use-after-free on unbind
	spi: sc18is602: Don't leak SPI master in probe error path
	spi: spi-geni-qcom: Fix use-after-free on unbind
	spi: spi-qcom-qspi: Fix use-after-free on unbind
	spi: st-ssc4: Fix unbalanced pm_runtime_disable() in probe error path
	spi: synquacer: Disable clock in probe error path
	spi: mt7621: Disable clock in probe error path
	spi: mt7621: Don't leak SPI master in probe error path
	spi: atmel-quadspi: Disable clock in probe error path
	spi: atmel-quadspi: Fix AHB memory accesses
	soc: qcom: smp2p: Safely acquire spinlock without IRQs
	mtd: spinand: Fix OOB read
	mtd: parser: cmdline: Fix parsing of part-names with colons
	mtd: core: Fix refcounting for unpartitioned MTDs
	mtd: rawnand: qcom: Fix DMA sync on FLASH_STATUS register read
	mtd: rawnand: meson: fix meson_nfc_dma_buffer_release() arguments
	scsi: qla2xxx: Fix crash during driver load on big endian machines
	scsi: lpfc: Fix invalid sleeping context in lpfc_sli4_nvmet_alloc()
	scsi: lpfc: Fix scheduling call while in softirq context in lpfc_unreg_rpi
	scsi: lpfc: Re-fix use after free in lpfc_rq_buf_free()
	openat2: reject RESOLVE_BENEATH|RESOLVE_IN_ROOT
	iio: buffer: Fix demux update
	iio: adc: rockchip_saradc: fix missing clk_disable_unprepare() on error in rockchip_saradc_resume
	iio: imu: st_lsm6dsx: fix edge-trigger interrupts
	iio:light:rpr0521: Fix timestamp alignment and prevent data leak.
	iio:light:st_uvis25: Fix timestamp alignment and prevent data leak.
	iio:magnetometer:mag3110: Fix alignment and data leak issues.
	iio:pressure:mpl3115: Force alignment of buffer
	iio:imu:bmi160: Fix too large a buffer.
	iio:imu:bmi160: Fix alignment and data leak issues
	iio:adc:ti-ads124s08: Fix buffer being too long.
	iio:adc:ti-ads124s08: Fix alignment and data leak issues.
	md/cluster: block reshape with remote resync job
	md/cluster: fix deadlock when node is doing resync job
	pinctrl: sunxi: Always call chained_irq_{enter, exit} in sunxi_pinctrl_irq_handler
	clk: ingenic: Fix divider calculation with div tables
	clk: mvebu: a3700: fix the XTAL MODE pin to MPP1_9
	clk: tegra: Do not return 0 on failure
	counter: microchip-tcb-capture: Fix CMR value check
	device-dax/core: Fix memory leak when rmmod dax.ko
	dma-buf/dma-resv: Respect num_fences when initializing the shared fence list.
	driver: core: Fix list corruption after device_del()
	xen-blkback: set ring->xenblkd to NULL after kthread_stop()
	xen/xenbus: Allow watches discard events before queueing
	xen/xenbus: Add 'will_handle' callback support in xenbus_watch_path()
	xen/xenbus/xen_bus_type: Support will_handle watch callback
	xen/xenbus: Count pending messages for each watch
	xenbus/xenbus_backend: Disallow pending watch messages
	memory: jz4780_nemc: Fix an error pointer vs NULL check in probe()
	memory: renesas-rpc-if: Fix a node reference leak in rpcif_probe()
	memory: renesas-rpc-if: Return correct value to the caller of rpcif_manual_xfer()
	memory: renesas-rpc-if: Fix unbalanced pm_runtime_enable in rpcif_{enable,disable}_rpm
	libnvdimm/namespace: Fix reaping of invalidated block-window-namespace labels
	platform/x86: intel-vbtn: Allow switch events on Acer Switch Alpha 12
	tracing: Disable ftrace selftests when any tracer is running
	mt76: add back the SUPPORTS_REORDERING_BUFFER flag
	of: fix linker-section match-table corruption
	PCI: Fix pci_slot_release() NULL pointer dereference
	regulator: axp20x: Fix DLDO2 voltage control register mask for AXP22x
	remoteproc: sysmon: Ensure remote notification ordering
	thermal/drivers/cpufreq_cooling: Update cpufreq_state only if state has changed
	rtc: ep93xx: Fix NULL pointer dereference in ep93xx_rtc_read_time
	Revert: "ring-buffer: Remove HAVE_64BIT_ALIGNED_ACCESS"
	null_blk: Fix zone size initialization
	null_blk: Fail zone append to conventional zones
	drm/edid: fix objtool warning in drm_cvt_modes()
	x86/CPU/AMD: Save AMD NodeId as cpu_die_id
	Linux 5.10.4

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I25209e79d8b9faf5382087955a29b7404bdefe38
2020-12-30 12:47:03 +01:00

2427 lines
66 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* fs/eventpoll.c (Efficient event retrieval implementation)
* Copyright (C) 2001,...,2009 Davide Libenzi
*
* Davide Libenzi <davidel@xmailserver.org>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/rbtree.h>
#include <linux/wait.h>
#include <linux/eventpoll.h>
#include <linux/mount.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/anon_inodes.h>
#include <linux/device.h>
#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/mman.h>
#include <linux/atomic.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/compat.h>
#include <linux/rculist.h>
#include <net/busy_poll.h>
/*
* LOCKING:
* There are three level of locking required by epoll :
*
* 1) epmutex (mutex)
* 2) ep->mtx (mutex)
* 3) ep->lock (rwlock)
*
* The acquire order is the one listed above, from 1 to 3.
* We need a rwlock (ep->lock) because we manipulate objects
* from inside the poll callback, that might be triggered from
* a wake_up() that in turn might be called from IRQ context.
* So we can't sleep inside the poll callback and hence we need
* a spinlock. During the event transfer loop (from kernel to
* user space) we could end up sleeping due a copy_to_user(), so
* we need a lock that will allow us to sleep. This lock is a
* mutex (ep->mtx). It is acquired during the event transfer loop,
* during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
* Then we also need a global mutex to serialize eventpoll_release_file()
* and ep_free().
* This mutex is acquired by ep_free() during the epoll file
* cleanup path and it is also acquired by eventpoll_release_file()
* if a file has been pushed inside an epoll set and it is then
* close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
* It is also acquired when inserting an epoll fd onto another epoll
* fd. We do this so that we walk the epoll tree and ensure that this
* insertion does not create a cycle of epoll file descriptors, which
* could lead to deadlock. We need a global mutex to prevent two
* simultaneous inserts (A into B and B into A) from racing and
* constructing a cycle without either insert observing that it is
* going to.
* It is necessary to acquire multiple "ep->mtx"es at once in the
* case when one epoll fd is added to another. In this case, we
* always acquire the locks in the order of nesting (i.e. after
* epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
* before e2->mtx). Since we disallow cycles of epoll file
* descriptors, this ensures that the mutexes are well-ordered. In
* order to communicate this nesting to lockdep, when walking a tree
* of epoll file descriptors, we use the current recursion depth as
* the lockdep subkey.
* It is possible to drop the "ep->mtx" and to use the global
* mutex "epmutex" (together with "ep->lock") to have it working,
* but having "ep->mtx" will make the interface more scalable.
* Events that require holding "epmutex" are very rare, while for
* normal operations the epoll private "ep->mtx" will guarantee
* a better scalability.
*/
/* Epoll private bits inside the event mask */
#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
#define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)
#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \
EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
/* Maximum number of nesting allowed inside epoll sets */
#define EP_MAX_NESTS 4
#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
#define EP_UNACTIVE_PTR ((void *) -1L)
#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
struct epoll_filefd {
struct file *file;
int fd;
} __packed;
/*
* Structure used to track possible nested calls, for too deep recursions
* and loop cycles.
*/
struct nested_call_node {
struct list_head llink;
void *cookie;
void *ctx;
};
/*
* This structure is used as collector for nested calls, to check for
* maximum recursion dept and loop cycles.
*/
struct nested_calls {
struct list_head tasks_call_list;
spinlock_t lock;
};
/*
* Each file descriptor added to the eventpoll interface will
* have an entry of this type linked to the "rbr" RB tree.
* Avoid increasing the size of this struct, there can be many thousands
* of these on a server and we do not want this to take another cache line.
*/
struct epitem {
union {
/* RB tree node links this structure to the eventpoll RB tree */
struct rb_node rbn;
/* Used to free the struct epitem */
struct rcu_head rcu;
};
/* List header used to link this structure to the eventpoll ready list */
struct list_head rdllink;
/*
* Works together "struct eventpoll"->ovflist in keeping the
* single linked chain of items.
*/
struct epitem *next;
/* The file descriptor information this item refers to */
struct epoll_filefd ffd;
/* Number of active wait queue attached to poll operations */
int nwait;
/* List containing poll wait queues */
struct list_head pwqlist;
/* The "container" of this item */
struct eventpoll *ep;
/* List header used to link this item to the "struct file" items list */
struct list_head fllink;
/* wakeup_source used when EPOLLWAKEUP is set */
struct wakeup_source __rcu *ws;
/* The structure that describe the interested events and the source fd */
struct epoll_event event;
};
/*
* This structure is stored inside the "private_data" member of the file
* structure and represents the main data structure for the eventpoll
* interface.
*/
struct eventpoll {
/*
* This mutex is used to ensure that files are not removed
* while epoll is using them. This is held during the event
* collection loop, the file cleanup path, the epoll file exit
* code and the ctl operations.
*/
struct mutex mtx;
/* Wait queue used by sys_epoll_wait() */
wait_queue_head_t wq;
/* Wait queue used by file->poll() */
wait_queue_head_t poll_wait;
/* List of ready file descriptors */
struct list_head rdllist;
/* Lock which protects rdllist and ovflist */
rwlock_t lock;
/* RB tree root used to store monitored fd structs */
struct rb_root_cached rbr;
/*
* This is a single linked list that chains all the "struct epitem" that
* happened while transferring ready events to userspace w/out
* holding ->lock.
*/
struct epitem *ovflist;
/* wakeup_source used when ep_scan_ready_list is running */
struct wakeup_source *ws;
/* The user that created the eventpoll descriptor */
struct user_struct *user;
struct file *file;
/* used to optimize loop detection check */
u64 gen;
#ifdef CONFIG_NET_RX_BUSY_POLL
/* used to track busy poll napi_id */
unsigned int napi_id;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* tracks wakeup nests for lockdep validation */
u8 nests;
#endif
};
/* Wait structure used by the poll hooks */
struct eppoll_entry {
/* List header used to link this structure to the "struct epitem" */
struct list_head llink;
/* The "base" pointer is set to the container "struct epitem" */
struct epitem *base;
/*
* Wait queue item that will be linked to the target file wait
* queue head.
*/
wait_queue_entry_t wait;
/* The wait queue head that linked the "wait" wait queue item */
wait_queue_head_t *whead;
};
/* Wrapper struct used by poll queueing */
struct ep_pqueue {
poll_table pt;
struct epitem *epi;
};
/* Used by the ep_send_events() function as callback private data */
struct ep_send_events_data {
int maxevents;
struct epoll_event __user *events;
int res;
};
/*
* Configuration options available inside /proc/sys/fs/epoll/
*/
/* Maximum number of epoll watched descriptors, per user */
static long max_user_watches __read_mostly;
/*
* This mutex is used to serialize ep_free() and eventpoll_release_file().
*/
static DEFINE_MUTEX(epmutex);
static u64 loop_check_gen = 0;
/* Used to check for epoll file descriptor inclusion loops */
static struct nested_calls poll_loop_ncalls;
/* Slab cache used to allocate "struct epitem" */
static struct kmem_cache *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */
static struct kmem_cache *pwq_cache __read_mostly;
/*
* List of files with newly added links, where we may need to limit the number
* of emanating paths. Protected by the epmutex.
*/
static LIST_HEAD(tfile_check_list);
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
static long long_zero;
static long long_max = LONG_MAX;
struct ctl_table epoll_table[] = {
{
.procname = "max_user_watches",
.data = &max_user_watches,
.maxlen = sizeof(max_user_watches),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = &long_zero,
.extra2 = &long_max,
},
{ }
};
#endif /* CONFIG_SYSCTL */
static const struct file_operations eventpoll_fops;
static inline int is_file_epoll(struct file *f)
{
return f->f_op == &eventpoll_fops;
}
/* Setup the structure that is used as key for the RB tree */
static inline void ep_set_ffd(struct epoll_filefd *ffd,
struct file *file, int fd)
{
ffd->file = file;
ffd->fd = fd;
}
/* Compare RB tree keys */
static inline int ep_cmp_ffd(struct epoll_filefd *p1,
struct epoll_filefd *p2)
{
return (p1->file > p2->file ? +1:
(p1->file < p2->file ? -1 : p1->fd - p2->fd));
}
/* Tells us if the item is currently linked */
static inline int ep_is_linked(struct epitem *epi)
{
return !list_empty(&epi->rdllink);
}
static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
{
return container_of(p, struct eppoll_entry, wait);
}
/* Get the "struct epitem" from a wait queue pointer */
static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
{
return container_of(p, struct eppoll_entry, wait)->base;
}
/* Get the "struct epitem" from an epoll queue wrapper */
static inline struct epitem *ep_item_from_epqueue(poll_table *p)
{
return container_of(p, struct ep_pqueue, pt)->epi;
}
/* Initialize the poll safe wake up structure */
static void ep_nested_calls_init(struct nested_calls *ncalls)
{
INIT_LIST_HEAD(&ncalls->tasks_call_list);
spin_lock_init(&ncalls->lock);
}
/**
* ep_events_available - Checks if ready events might be available.
*
* @ep: Pointer to the eventpoll context.
*
* Returns: Returns a value different than zero if ready events are available,
* or zero otherwise.
*/
static inline int ep_events_available(struct eventpoll *ep)
{
return !list_empty_careful(&ep->rdllist) ||
READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
static bool ep_busy_loop_end(void *p, unsigned long start_time)
{
struct eventpoll *ep = p;
return ep_events_available(ep) || busy_loop_timeout(start_time);
}
/*
* Busy poll if globally on and supporting sockets found && no events,
* busy loop will return if need_resched or ep_events_available.
*
* we must do our busy polling with irqs enabled
*/
static void ep_busy_loop(struct eventpoll *ep, int nonblock)
{
unsigned int napi_id = READ_ONCE(ep->napi_id);
if ((napi_id >= MIN_NAPI_ID) && net_busy_loop_on())
napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep);
}
static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep)
{
if (ep->napi_id)
ep->napi_id = 0;
}
/*
* Set epoll busy poll NAPI ID from sk.
*/
static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
struct eventpoll *ep;
unsigned int napi_id;
struct socket *sock;
struct sock *sk;
int err;
if (!net_busy_loop_on())
return;
sock = sock_from_file(epi->ffd.file, &err);
if (!sock)
return;
sk = sock->sk;
if (!sk)
return;
napi_id = READ_ONCE(sk->sk_napi_id);
ep = epi->ep;
/* Non-NAPI IDs can be rejected
* or
* Nothing to do if we already have this ID
*/
if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id)
return;
/* record NAPI ID for use in next busy poll */
ep->napi_id = napi_id;
}
#else
static inline void ep_busy_loop(struct eventpoll *ep, int nonblock)
{
}
static inline void ep_reset_busy_poll_napi_id(struct eventpoll *ep)
{
}
static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
{
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
/**
* ep_call_nested - Perform a bound (possibly) nested call, by checking
* that the recursion limit is not exceeded, and that
* the same nested call (by the meaning of same cookie) is
* no re-entered.
*
* @ncalls: Pointer to the nested_calls structure to be used for this call.
* @nproc: Nested call core function pointer.
* @priv: Opaque data to be passed to the @nproc callback.
* @cookie: Cookie to be used to identify this nested call.
* @ctx: This instance context.
*
* Returns: Returns the code returned by the @nproc callback, or -1 if
* the maximum recursion limit has been exceeded.
*/
static int ep_call_nested(struct nested_calls *ncalls,
int (*nproc)(void *, void *, int), void *priv,
void *cookie, void *ctx)
{
int error, call_nests = 0;
unsigned long flags;
struct list_head *lsthead = &ncalls->tasks_call_list;
struct nested_call_node *tncur;
struct nested_call_node tnode;
spin_lock_irqsave(&ncalls->lock, flags);
/*
* Try to see if the current task is already inside this wakeup call.
* We use a list here, since the population inside this set is always
* very much limited.
*/
list_for_each_entry(tncur, lsthead, llink) {
if (tncur->ctx == ctx &&
(tncur->cookie == cookie || ++call_nests > EP_MAX_NESTS)) {
/*
* Ops ... loop detected or maximum nest level reached.
* We abort this wake by breaking the cycle itself.
*/
error = -1;
goto out_unlock;
}
}
/* Add the current task and cookie to the list */
tnode.ctx = ctx;
tnode.cookie = cookie;
list_add(&tnode.llink, lsthead);
spin_unlock_irqrestore(&ncalls->lock, flags);
/* Call the nested function */
error = (*nproc)(priv, cookie, call_nests);
/* Remove the current task from the list */
spin_lock_irqsave(&ncalls->lock, flags);
list_del(&tnode.llink);
out_unlock:
spin_unlock_irqrestore(&ncalls->lock, flags);
return error;
}
/*
* As described in commit 0ccf831cb lockdep: annotate epoll
* the use of wait queues used by epoll is done in a very controlled
* manner. Wake ups can nest inside each other, but are never done
* with the same locking. For example:
*
* dfd = socket(...);
* efd1 = epoll_create();
* efd2 = epoll_create();
* epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
* epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
*
* When a packet arrives to the device underneath "dfd", the net code will
* issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
* callback wakeup entry on that queue, and the wake_up() performed by the
* "dfd" net code will end up in ep_poll_callback(). At this point epoll
* (efd1) notices that it may have some event ready, so it needs to wake up
* the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
* that ends up in another wake_up(), after having checked about the
* recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
* avoid stack blasting.
*
* When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
* this special case of epoll.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
{
struct eventpoll *ep_src;
unsigned long flags;
u8 nests = 0;
/*
* To set the subclass or nesting level for spin_lock_irqsave_nested()
* it might be natural to create a per-cpu nest count. However, since
* we can recurse on ep->poll_wait.lock, and a non-raw spinlock can
* schedule() in the -rt kernel, the per-cpu variable are no longer
* protected. Thus, we are introducing a per eventpoll nest field.
* If we are not being call from ep_poll_callback(), epi is NULL and
* we are at the first level of nesting, 0. Otherwise, we are being
* called from ep_poll_callback() and if a previous wakeup source is
* not an epoll file itself, we are at depth 1 since the wakeup source
* is depth 0. If the wakeup source is a previous epoll file in the
* wakeup chain then we use its nests value and record ours as
* nests + 1. The previous epoll file nests value is stable since its
* already holding its own poll_wait.lock.
*/
if (epi) {
if ((is_file_epoll(epi->ffd.file))) {
ep_src = epi->ffd.file->private_data;
nests = ep_src->nests;
} else {
nests = 1;
}
}
spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);
ep->nests = nests + 1;
wake_up_locked_poll(&ep->poll_wait, EPOLLIN);
ep->nests = 0;
spin_unlock_irqrestore(&ep->poll_wait.lock, flags);
}
#else
static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi)
{
wake_up_poll(&ep->poll_wait, EPOLLIN);
}
#endif
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
{
wait_queue_head_t *whead;
rcu_read_lock();
/*
* If it is cleared by POLLFREE, it should be rcu-safe.
* If we read NULL we need a barrier paired with
* smp_store_release() in ep_poll_callback(), otherwise
* we rely on whead->lock.
*/
whead = smp_load_acquire(&pwq->whead);
if (whead)
remove_wait_queue(whead, &pwq->wait);
rcu_read_unlock();
}
/*
* This function unregisters poll callbacks from the associated file
* descriptor. Must be called with "mtx" held (or "epmutex" if called from
* ep_free).
*/
static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
{
struct list_head *lsthead = &epi->pwqlist;
struct eppoll_entry *pwq;
while (!list_empty(lsthead)) {
pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
list_del(&pwq->llink);
ep_remove_wait_queue(pwq);
kmem_cache_free(pwq_cache, pwq);
}
}
/* call only when ep->mtx is held */
static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
{
return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
}
/* call only when ep->mtx is held */
static inline void ep_pm_stay_awake(struct epitem *epi)
{
struct wakeup_source *ws = ep_wakeup_source(epi);
if (ws)
__pm_stay_awake(ws);
}
static inline bool ep_has_wakeup_source(struct epitem *epi)
{
return rcu_access_pointer(epi->ws) ? true : false;
}
/* call when ep->mtx cannot be held (ep_poll_callback) */
static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
{
struct wakeup_source *ws;
rcu_read_lock();
ws = rcu_dereference(epi->ws);
if (ws)
__pm_stay_awake(ws);
rcu_read_unlock();
}
/**
* ep_scan_ready_list - Scans the ready list in a way that makes possible for
* the scan code, to call f_op->poll(). Also allows for
* O(NumReady) performance.
*
* @ep: Pointer to the epoll private data structure.
* @sproc: Pointer to the scan callback.
* @priv: Private opaque data passed to the @sproc callback.
* @depth: The current depth of recursive f_op->poll calls.
* @ep_locked: caller already holds ep->mtx
*
* Returns: The same integer error code returned by the @sproc callback.
*/
static __poll_t ep_scan_ready_list(struct eventpoll *ep,
__poll_t (*sproc)(struct eventpoll *,
struct list_head *, void *),
void *priv, int depth, bool ep_locked)
{
__poll_t res;
struct epitem *epi, *nepi;
LIST_HEAD(txlist);
lockdep_assert_irqs_enabled();
/*
* We need to lock this because we could be hit by
* eventpoll_release_file() and epoll_ctl().
*/
if (!ep_locked)
mutex_lock_nested(&ep->mtx, depth);
/*
* Steal the ready list, and re-init the original one to the
* empty list. Also, set ep->ovflist to NULL so that events
* happening while looping w/out locks, are not lost. We cannot
* have the poll callback to queue directly on ep->rdllist,
* because we want the "sproc" callback to be able to do it
* in a lockless way.
*/
write_lock_irq(&ep->lock);
list_splice_init(&ep->rdllist, &txlist);
WRITE_ONCE(ep->ovflist, NULL);
write_unlock_irq(&ep->lock);
/*
* Now call the callback function.
*/
res = (*sproc)(ep, &txlist, priv);
write_lock_irq(&ep->lock);
/*
* During the time we spent inside the "sproc" callback, some
* other events might have been queued by the poll callback.
* We re-insert them inside the main ready-list here.
*/
for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL;
nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
/*
* We need to check if the item is already in the list.
* During the "sproc" callback execution time, items are
* queued into ->ovflist but the "txlist" might already
* contain them, and the list_splice() below takes care of them.
*/
if (!ep_is_linked(epi)) {
/*
* ->ovflist is LIFO, so we have to reverse it in order
* to keep in FIFO.
*/
list_add(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
}
}
/*
* We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
* releasing the lock, events will be queued in the normal way inside
* ep->rdllist.
*/
WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR);
/*
* Quickly re-inject items left on "txlist".
*/
list_splice(&txlist, &ep->rdllist);
__pm_relax(ep->ws);
write_unlock_irq(&ep->lock);
if (!ep_locked)
mutex_unlock(&ep->mtx);
return res;
}
static void epi_rcu_free(struct rcu_head *head)
{
struct epitem *epi = container_of(head, struct epitem, rcu);
kmem_cache_free(epi_cache, epi);
}
/*
* Removes a "struct epitem" from the eventpoll RB tree and deallocates
* all the associated resources. Must be called with "mtx" held.
*/
static int ep_remove(struct eventpoll *ep, struct epitem *epi)
{
struct file *file = epi->ffd.file;
lockdep_assert_irqs_enabled();
/*
* Removes poll wait queue hooks.
*/
ep_unregister_pollwait(ep, epi);
/* Remove the current item from the list of epoll hooks */
spin_lock(&file->f_lock);
list_del_rcu(&epi->fllink);
spin_unlock(&file->f_lock);
rb_erase_cached(&epi->rbn, &ep->rbr);
write_lock_irq(&ep->lock);
if (ep_is_linked(epi))
list_del_init(&epi->rdllink);
write_unlock_irq(&ep->lock);
wakeup_source_unregister(ep_wakeup_source(epi));
/*
* At this point it is safe to free the eventpoll item. Use the union
* field epi->rcu, since we are trying to minimize the size of
* 'struct epitem'. The 'rbn' field is no longer in use. Protected by
* ep->mtx. The rcu read side, reverse_path_check_proc(), does not make
* use of the rbn field.
*/
call_rcu(&epi->rcu, epi_rcu_free);
atomic_long_dec(&ep->user->epoll_watches);
return 0;
}
static void ep_free(struct eventpoll *ep)
{
struct rb_node *rbp;
struct epitem *epi;
/* We need to release all tasks waiting for these file */
if (waitqueue_active(&ep->poll_wait))
ep_poll_safewake(ep, NULL);
/*
* We need to lock this because we could be hit by
* eventpoll_release_file() while we're freeing the "struct eventpoll".
* We do not need to hold "ep->mtx" here because the epoll file
* is on the way to be removed and no one has references to it
* anymore. The only hit might come from eventpoll_release_file() but
* holding "epmutex" is sufficient here.
*/
mutex_lock(&epmutex);
/*
* Walks through the whole tree by unregistering poll callbacks.
*/
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_unregister_pollwait(ep, epi);
cond_resched();
}
/*
* Walks through the whole tree by freeing each "struct epitem". At this
* point we are sure no poll callbacks will be lingering around, and also by
* holding "epmutex" we can be sure that no file cleanup code will hit
* us during this operation. So we can avoid the lock on "ep->lock".
* We do not need to lock ep->mtx, either, we only do it to prevent
* a lockdep warning.
*/
mutex_lock(&ep->mtx);
while ((rbp = rb_first_cached(&ep->rbr)) != NULL) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_remove(ep, epi);
cond_resched();
}
mutex_unlock(&ep->mtx);
mutex_unlock(&epmutex);
mutex_destroy(&ep->mtx);
free_uid(ep->user);
wakeup_source_unregister(ep->ws);
kfree(ep);
}
static int ep_eventpoll_release(struct inode *inode, struct file *file)
{
struct eventpoll *ep = file->private_data;
if (ep)
ep_free(ep);
return 0;
}
static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv);
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
poll_table *pt);
/*
* Differs from ep_eventpoll_poll() in that internal callers already have
* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
* is correctly annotated.
*/
static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
int depth)
{
struct eventpoll *ep;
bool locked;
pt->_key = epi->event.events;
if (!is_file_epoll(epi->ffd.file))
return vfs_poll(epi->ffd.file, pt) & epi->event.events;
ep = epi->ffd.file->private_data;
poll_wait(epi->ffd.file, &ep->poll_wait, pt);
locked = pt && (pt->_qproc == ep_ptable_queue_proc);
return ep_scan_ready_list(epi->ffd.file->private_data,
ep_read_events_proc, &depth, depth,
locked) & epi->event.events;
}
static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv)
{
struct epitem *epi, *tmp;
poll_table pt;
int depth = *(int *)priv;
init_poll_funcptr(&pt, NULL);
depth++;
list_for_each_entry_safe(epi, tmp, head, rdllink) {
if (ep_item_poll(epi, &pt, depth)) {
return EPOLLIN | EPOLLRDNORM;
} else {
/*
* Item has been dropped into the ready list by the poll
* callback, but it's not actually ready, as far as
* caller requested events goes. We can remove it here.
*/
__pm_relax(ep_wakeup_source(epi));
list_del_init(&epi->rdllink);
}
}
return 0;
}
static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
{
struct eventpoll *ep = file->private_data;
int depth = 0;
/* Insert inside our poll wait queue */
poll_wait(file, &ep->poll_wait, wait);
/*
* Proceed to find out if wanted events are really available inside
* the ready list.
*/
return ep_scan_ready_list(ep, ep_read_events_proc,
&depth, depth, false);
}
#ifdef CONFIG_PROC_FS
static void ep_show_fdinfo(struct seq_file *m, struct file *f)
{
struct eventpoll *ep = f->private_data;
struct rb_node *rbp;
mutex_lock(&ep->mtx);
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
struct inode *inode = file_inode(epi->ffd.file);
seq_printf(m, "tfd: %8d events: %8x data: %16llx "
" pos:%lli ino:%lx sdev:%x\n",
epi->ffd.fd, epi->event.events,
(long long)epi->event.data,
(long long)epi->ffd.file->f_pos,
inode->i_ino, inode->i_sb->s_dev);
if (seq_has_overflowed(m))
break;
}
mutex_unlock(&ep->mtx);
}
#endif
/* File callbacks that implement the eventpoll file behaviour */
static const struct file_operations eventpoll_fops = {
#ifdef CONFIG_PROC_FS
.show_fdinfo = ep_show_fdinfo,
#endif
.release = ep_eventpoll_release,
.poll = ep_eventpoll_poll,
.llseek = noop_llseek,
};
/*
* This is called from eventpoll_release() to unlink files from the eventpoll
* interface. We need to have this facility to cleanup correctly files that are
* closed without being removed from the eventpoll interface.
*/
void eventpoll_release_file(struct file *file)
{
struct eventpoll *ep;
struct epitem *epi, *next;
/*
* We don't want to get "file->f_lock" because it is not
* necessary. It is not necessary because we're in the "struct file"
* cleanup path, and this means that no one is using this file anymore.
* So, for example, epoll_ctl() cannot hit here since if we reach this
* point, the file counter already went to zero and fget() would fail.
* The only hit might come from ep_free() but by holding the mutex
* will correctly serialize the operation. We do need to acquire
* "ep->mtx" after "epmutex" because ep_remove() requires it when called
* from anywhere but ep_free().
*
* Besides, ep_remove() acquires the lock, so we can't hold it here.
*/
mutex_lock(&epmutex);
list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
ep = epi->ep;
mutex_lock_nested(&ep->mtx, 0);
ep_remove(ep, epi);
mutex_unlock(&ep->mtx);
}
mutex_unlock(&epmutex);
}
static int ep_alloc(struct eventpoll **pep)
{
int error;
struct user_struct *user;
struct eventpoll *ep;
user = get_current_user();
error = -ENOMEM;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (unlikely(!ep))
goto free_uid;
mutex_init(&ep->mtx);
rwlock_init(&ep->lock);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist);
ep->rbr = RB_ROOT_CACHED;
ep->ovflist = EP_UNACTIVE_PTR;
ep->user = user;
*pep = ep;
return 0;
free_uid:
free_uid(user);
return error;
}
/*
* Search the file inside the eventpoll tree. The RB tree operations
* are protected by the "mtx" mutex, and ep_find() must be called with
* "mtx" held.
*/
static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
{
int kcmp;
struct rb_node *rbp;
struct epitem *epi, *epir = NULL;
struct epoll_filefd ffd;
ep_set_ffd(&ffd, file, fd);
for (rbp = ep->rbr.rb_root.rb_node; rbp; ) {
epi = rb_entry(rbp, struct epitem, rbn);
kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
if (kcmp > 0)
rbp = rbp->rb_right;
else if (kcmp < 0)
rbp = rbp->rb_left;
else {
epir = epi;
break;
}
}
return epir;
}
#ifdef CONFIG_CHECKPOINT_RESTORE
static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)
{
struct rb_node *rbp;
struct epitem *epi;
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
if (epi->ffd.fd == tfd) {
if (toff == 0)
return epi;
else
toff--;
}
cond_resched();
}
return NULL;
}
struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,
unsigned long toff)
{
struct file *file_raw;
struct eventpoll *ep;
struct epitem *epi;
if (!is_file_epoll(file))
return ERR_PTR(-EINVAL);
ep = file->private_data;
mutex_lock(&ep->mtx);
epi = ep_find_tfd(ep, tfd, toff);
if (epi)
file_raw = epi->ffd.file;
else
file_raw = ERR_PTR(-ENOENT);
mutex_unlock(&ep->mtx);
return file_raw;
}
#endif /* CONFIG_CHECKPOINT_RESTORE */
/**
* Adds a new entry to the tail of the list in a lockless way, i.e.
* multiple CPUs are allowed to call this function concurrently.
*
* Beware: it is necessary to prevent any other modifications of the
* existing list until all changes are completed, in other words
* concurrent list_add_tail_lockless() calls should be protected
* with a read lock, where write lock acts as a barrier which
* makes sure all list_add_tail_lockless() calls are fully
* completed.
*
* Also an element can be locklessly added to the list only in one
* direction i.e. either to the tail either to the head, otherwise
* concurrent access will corrupt the list.
*
* Returns %false if element has been already added to the list, %true
* otherwise.
*/
static inline bool list_add_tail_lockless(struct list_head *new,
struct list_head *head)
{
struct list_head *prev;
/*
* This is simple 'new->next = head' operation, but cmpxchg()
* is used in order to detect that same element has been just
* added to the list from another CPU: the winner observes
* new->next == new.
*/
if (cmpxchg(&new->next, new, head) != new)
return false;
/*
* Initially ->next of a new element must be updated with the head
* (we are inserting to the tail) and only then pointers are atomically
* exchanged. XCHG guarantees memory ordering, thus ->next should be
* updated before pointers are actually swapped and pointers are
* swapped before prev->next is updated.
*/
prev = xchg(&head->prev, new);
/*
* It is safe to modify prev->next and new->prev, because a new element
* is added only to the tail and new->next is updated before XCHG.
*/
prev->next = new;
new->prev = prev;
return true;
}
/**
* Chains a new epi entry to the tail of the ep->ovflist in a lockless way,
* i.e. multiple CPUs are allowed to call this function concurrently.
*
* Returns %false if epi element has been already chained, %true otherwise.
*/
static inline bool chain_epi_lockless(struct epitem *epi)
{
struct eventpoll *ep = epi->ep;
/* Fast preliminary check */
if (epi->next != EP_UNACTIVE_PTR)
return false;
/* Check that the same epi has not been just chained from another CPU */
if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
return false;
/* Atomically exchange tail */
epi->next = xchg(&ep->ovflist, epi);
return true;
}
/*
* This is the callback that is passed to the wait queue wakeup
* mechanism. It is called by the stored file descriptors when they
* have events to report.
*
* This callback takes a read lock in order not to content with concurrent
* events from another file descriptors, thus all modifications to ->rdllist
* or ->ovflist are lockless. Read lock is paired with the write lock from
* ep_scan_ready_list(), which stops all list modifications and guarantees
* that lists state is seen correctly.
*
* Another thing worth to mention is that ep_poll_callback() can be called
* concurrently for the same @epi from different CPUs if poll table was inited
* with several wait queues entries. Plural wakeup from different CPUs of a
* single wait queue is serialized by wq.lock, but the case when multiple wait
* queues are used should be detected accordingly. This is detected using
* cmpxchg() operation.
*/
static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
{
int pwake = 0;
struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep;
__poll_t pollflags = key_to_poll(key);
unsigned long flags;
int ewake = 0;
read_lock_irqsave(&ep->lock, flags);
ep_set_busy_poll_napi_id(epi);
/*
* If the event mask does not contain any poll(2) event, we consider the
* descriptor to be disabled. This condition is likely the effect of the
* EPOLLONESHOT bit that disables the descriptor when an event is received,
* until the next EPOLL_CTL_MOD will be issued.
*/
if (!(epi->event.events & ~EP_PRIVATE_BITS))
goto out_unlock;
/*
* Check the events coming with the callback. At this stage, not
* every device reports the events in the "key" parameter of the
* callback. We need to be able to handle both cases here, hence the
* test for "key" != NULL before the event match test.
*/
if (pollflags && !(pollflags & epi->event.events))
goto out_unlock;
/*
* If we are transferring events to userspace, we can hold no locks
* (because we're accessing user memory, and because of linux f_op->poll()
* semantics). All the events that happen during that period of time are
* chained in ep->ovflist and requeued later on.
*/
if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
if (chain_epi_lockless(epi))
ep_pm_stay_awake_rcu(epi);
} else if (!ep_is_linked(epi)) {
/* In the usual case, add event to ready list. */
if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
ep_pm_stay_awake_rcu(epi);
}
/*
* Wake up ( if active ) both the eventpoll wait list and the ->poll()
* wait list.
*/
if (waitqueue_active(&ep->wq)) {
if ((epi->event.events & EPOLLEXCLUSIVE) &&
!(pollflags & POLLFREE)) {
switch (pollflags & EPOLLINOUT_BITS) {
case EPOLLIN:
if (epi->event.events & EPOLLIN)
ewake = 1;
break;
case EPOLLOUT:
if (epi->event.events & EPOLLOUT)
ewake = 1;
break;
case 0:
ewake = 1;
break;
}
}
wake_up(&ep->wq);
}
if (waitqueue_active(&ep->poll_wait))
pwake++;
out_unlock:
read_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(ep, epi);
if (!(epi->event.events & EPOLLEXCLUSIVE))
ewake = 1;
if (pollflags & POLLFREE) {
/*
* If we race with ep_remove_wait_queue() it can miss
* ->whead = NULL and do another remove_wait_queue() after
* us, so we can't use __remove_wait_queue().
*/
list_del_init(&wait->entry);
/*
* ->whead != NULL protects us from the race with ep_free()
* or ep_remove(), ep_remove_wait_queue() takes whead->lock
* held by the caller. Once we nullify it, nothing protects
* ep/epi or even wait.
*/
smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
}
return ewake;
}
/*
* This is the callback that is used to add our wait queue to the
* target file wakeup lists.
*/
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
poll_table *pt)
{
struct epitem *epi = ep_item_from_epqueue(pt);
struct eppoll_entry *pwq;
if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
pwq->whead = whead;
pwq->base = epi;
if (epi->event.events & EPOLLEXCLUSIVE)
add_wait_queue_exclusive(whead, &pwq->wait);
else
add_wait_queue(whead, &pwq->wait);
list_add_tail(&pwq->llink, &epi->pwqlist);
epi->nwait++;
} else {
/* We have to signal that an error occurred */
epi->nwait = -1;
}
}
static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
{
int kcmp;
struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;
struct epitem *epic;
bool leftmost = true;
while (*p) {
parent = *p;
epic = rb_entry(parent, struct epitem, rbn);
kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
if (kcmp > 0) {
p = &parent->rb_right;
leftmost = false;
} else
p = &parent->rb_left;
}
rb_link_node(&epi->rbn, parent, p);
rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);
}
#define PATH_ARR_SIZE 5
/*
* These are the number paths of length 1 to 5, that we are allowing to emanate
* from a single file of interest. For example, we allow 1000 paths of length
* 1, to emanate from each file of interest. This essentially represents the
* potential wakeup paths, which need to be limited in order to avoid massive
* uncontrolled wakeup storms. The common use case should be a single ep which
* is connected to n file sources. In this case each file source has 1 path
* of length 1. Thus, the numbers below should be more than sufficient. These
* path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
* and delete can't add additional paths. Protected by the epmutex.
*/
static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
static int path_count[PATH_ARR_SIZE];
static int path_count_inc(int nests)
{
/* Allow an arbitrary number of depth 1 paths */
if (nests == 0)
return 0;
if (++path_count[nests] > path_limits[nests])
return -1;
return 0;
}
static void path_count_init(void)
{
int i;
for (i = 0; i < PATH_ARR_SIZE; i++)
path_count[i] = 0;
}
static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
{
int error = 0;
struct file *file = priv;
struct file *child_file;
struct epitem *epi;
/* CTL_DEL can remove links here, but that can't increase our count */
rcu_read_lock();
list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
child_file = epi->ep->file;
if (is_file_epoll(child_file)) {
if (list_empty(&child_file->f_ep_links)) {
if (path_count_inc(call_nests)) {
error = -1;
break;
}
} else {
error = ep_call_nested(&poll_loop_ncalls,
reverse_path_check_proc,
child_file, child_file,
current);
}
if (error != 0)
break;
} else {
printk(KERN_ERR "reverse_path_check_proc: "
"file is not an ep!\n");
}
}
rcu_read_unlock();
return error;
}
/**
* reverse_path_check - The tfile_check_list is list of file *, which have
* links that are proposed to be newly added. We need to
* make sure that those added links don't add too many
* paths such that we will spend all our time waking up
* eventpoll objects.
*
* Returns: Returns zero if the proposed links don't create too many paths,
* -1 otherwise.
*/
static int reverse_path_check(void)
{
int error = 0;
struct file *current_file;
/* let's call this for all tfiles */
list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
path_count_init();
error = ep_call_nested(&poll_loop_ncalls,
reverse_path_check_proc, current_file,
current_file, current);
if (error)
break;
}
return error;
}
static int ep_create_wakeup_source(struct epitem *epi)
{
struct name_snapshot n;
struct wakeup_source *ws;
if (!epi->ep->ws) {
epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
if (!epi->ep->ws)
return -ENOMEM;
}
take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
ws = wakeup_source_register(NULL, n.name.name);
release_dentry_name_snapshot(&n);
if (!ws)
return -ENOMEM;
rcu_assign_pointer(epi->ws, ws);
return 0;
}
/* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */
static noinline void ep_destroy_wakeup_source(struct epitem *epi)
{
struct wakeup_source *ws = ep_wakeup_source(epi);
RCU_INIT_POINTER(epi->ws, NULL);
/*
* wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is
* used internally by wakeup_source_remove, too (called by
* wakeup_source_unregister), so we cannot use call_rcu
*/
synchronize_rcu();
wakeup_source_unregister(ws);
}
/*
* Must be called with "mtx" held.
*/
static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
struct file *tfile, int fd, int full_check)
{
int error, pwake = 0;
__poll_t revents;
long user_watches;
struct epitem *epi;
struct ep_pqueue epq;
lockdep_assert_irqs_enabled();
user_watches = atomic_long_read(&ep->user->epoll_watches);
if (unlikely(user_watches >= max_user_watches))
return -ENOSPC;
if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
return -ENOMEM;
/* Item initialization follow here ... */
INIT_LIST_HEAD(&epi->rdllink);
INIT_LIST_HEAD(&epi->fllink);
INIT_LIST_HEAD(&epi->pwqlist);
epi->ep = ep;
ep_set_ffd(&epi->ffd, tfile, fd);
epi->event = *event;
epi->nwait = 0;
epi->next = EP_UNACTIVE_PTR;
if (epi->event.events & EPOLLWAKEUP) {
error = ep_create_wakeup_source(epi);
if (error)
goto error_create_wakeup_source;
} else {
RCU_INIT_POINTER(epi->ws, NULL);
}
/* Add the current item to the list of active epoll hook for this file */
spin_lock(&tfile->f_lock);
list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
ep_rbtree_insert(ep, epi);
/* now check if we've created too many backpaths */
error = -EINVAL;
if (full_check && reverse_path_check())
goto error_remove_epi;
/* Initialize the poll table using the queue callback */
epq.epi = epi;
init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
/*
* Attach the item to the poll hooks and get current event bits.
* We can safely use the file* here because its usage count has
* been increased by the caller of this function. Note that after
* this operation completes, the poll callback can start hitting
* the new item.
*/
revents = ep_item_poll(epi, &epq.pt, 1);
/*
* We have to check if something went wrong during the poll wait queue
* install process. Namely an allocation for a wait queue failed due
* high memory pressure.
*/
error = -ENOMEM;
if (epi->nwait < 0)
goto error_unregister;
/* We have to drop the new item inside our item list to keep track of it */
write_lock_irq(&ep->lock);
/* record NAPI ID of new item if present */
ep_set_busy_poll_napi_id(epi);
/* If the file is already "ready" we drop it inside the ready list */
if (revents && !ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
wake_up(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
write_unlock_irq(&ep->lock);
atomic_long_inc(&ep->user->epoll_watches);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(ep, NULL);
return 0;
error_unregister:
ep_unregister_pollwait(ep, epi);
error_remove_epi:
spin_lock(&tfile->f_lock);
list_del_rcu(&epi->fllink);
spin_unlock(&tfile->f_lock);
rb_erase_cached(&epi->rbn, &ep->rbr);
/*
* We need to do this because an event could have been arrived on some
* allocated wait queue. Note that we don't care about the ep->ovflist
* list, since that is used/cleaned only inside a section bound by "mtx".
* And ep_insert() is called with "mtx" held.
*/
write_lock_irq(&ep->lock);
if (ep_is_linked(epi))
list_del_init(&epi->rdllink);
write_unlock_irq(&ep->lock);
wakeup_source_unregister(ep_wakeup_source(epi));
error_create_wakeup_source:
kmem_cache_free(epi_cache, epi);
return error;
}
/*
* Modify the interest event mask by dropping an event if the new mask
* has a match in the current file status. Must be called with "mtx" held.
*/
static int ep_modify(struct eventpoll *ep, struct epitem *epi,
const struct epoll_event *event)
{
int pwake = 0;
poll_table pt;
lockdep_assert_irqs_enabled();
init_poll_funcptr(&pt, NULL);
/*
* Set the new event interest mask before calling f_op->poll();
* otherwise we might miss an event that happens between the
* f_op->poll() call and the new event set registering.
*/
epi->event.events = event->events; /* need barrier below */
epi->event.data = event->data; /* protected by mtx */
if (epi->event.events & EPOLLWAKEUP) {
if (!ep_has_wakeup_source(epi))
ep_create_wakeup_source(epi);
} else if (ep_has_wakeup_source(epi)) {
ep_destroy_wakeup_source(epi);
}
/*
* The following barrier has two effects:
*
* 1) Flush epi changes above to other CPUs. This ensures
* we do not miss events from ep_poll_callback if an
* event occurs immediately after we call f_op->poll().
* We need this because we did not take ep->lock while
* changing epi above (but ep_poll_callback does take
* ep->lock).
*
* 2) We also need to ensure we do not miss _past_ events
* when calling f_op->poll(). This barrier also
* pairs with the barrier in wq_has_sleeper (see
* comments for wq_has_sleeper).
*
* This barrier will now guarantee ep_poll_callback or f_op->poll
* (or both) will notice the readiness of an item.
*/
smp_mb();
/*
* Get current event bits. We can safely use the file* here because
* its usage count has been increased by the caller of this function.
* If the item is "hot" and it is not registered inside the ready
* list, push it inside.
*/
if (ep_item_poll(epi, &pt, 1)) {
write_lock_irq(&ep->lock);
if (!ep_is_linked(epi)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
wake_up(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
write_unlock_irq(&ep->lock);
}
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(ep, NULL);
return 0;
}
static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv)
{
struct ep_send_events_data *esed = priv;
__poll_t revents;
struct epitem *epi, *tmp;
struct epoll_event __user *uevent = esed->events;
struct wakeup_source *ws;
poll_table pt;
init_poll_funcptr(&pt, NULL);
esed->res = 0;
/*
* We can loop without lock because we are passed a task private list.
* Items cannot vanish during the loop because ep_scan_ready_list() is
* holding "mtx" during this call.
*/
lockdep_assert_held(&ep->mtx);
list_for_each_entry_safe(epi, tmp, head, rdllink) {
if (esed->res >= esed->maxevents)
break;
/*
* Activate ep->ws before deactivating epi->ws to prevent
* triggering auto-suspend here (in case we reactive epi->ws
* below).
*
* This could be rearranged to delay the deactivation of epi->ws
* instead, but then epi->ws would temporarily be out of sync
* with ep_is_linked().
*/
ws = ep_wakeup_source(epi);
if (ws) {
if (ws->active)
__pm_stay_awake(ep->ws);
__pm_relax(ws);
}
list_del_init(&epi->rdllink);
/*
* If the event mask intersect the caller-requested one,
* deliver the event to userspace. Again, ep_scan_ready_list()
* is holding ep->mtx, so no operations coming from userspace
* can change the item.
*/
revents = ep_item_poll(epi, &pt, 1);
if (!revents)
continue;
if (__put_user(revents, &uevent->events) ||
__put_user(epi->event.data, &uevent->data)) {
list_add(&epi->rdllink, head);
ep_pm_stay_awake(epi);
if (!esed->res)
esed->res = -EFAULT;
return 0;
}
esed->res++;
uevent++;
if (epi->event.events & EPOLLONESHOT)
epi->event.events &= EP_PRIVATE_BITS;
else if (!(epi->event.events & EPOLLET)) {
/*
* If this file has been added with Level
* Trigger mode, we need to insert back inside
* the ready list, so that the next call to
* epoll_wait() will check again the events
* availability. At this point, no one can insert
* into ep->rdllist besides us. The epoll_ctl()
* callers are locked out by
* ep_scan_ready_list() holding "mtx" and the
* poll callback will queue them in ep->ovflist.
*/
list_add_tail(&epi->rdllink, &ep->rdllist);
ep_pm_stay_awake(epi);
}
}
return 0;
}
static int ep_send_events(struct eventpoll *ep,
struct epoll_event __user *events, int maxevents)
{
struct ep_send_events_data esed;
esed.maxevents = maxevents;
esed.events = events;
ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
return esed.res;
}
static inline struct timespec64 ep_set_mstimeout(long ms)
{
struct timespec64 now, ts = {
.tv_sec = ms / MSEC_PER_SEC,
.tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
};
ktime_get_ts64(&now);
return timespec64_add_safe(now, ts);
}
/**
* ep_poll - Retrieves ready events, and delivers them to the caller supplied
* event buffer.
*
* @ep: Pointer to the eventpoll context.
* @events: Pointer to the userspace buffer where the ready events should be
* stored.
* @maxevents: Size (in terms of number of events) of the caller event buffer.
* @timeout: Maximum timeout for the ready events fetch operation, in
* milliseconds. If the @timeout is zero, the function will not block,
* while if the @timeout is less than zero, the function will block
* until at least one event has been retrieved (or an error
* occurred).
*
* Returns: Returns the number of ready events which have been fetched, or an
* error code, in case of error.
*/
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
int res = 0, eavail, timed_out = 0;
u64 slack = 0;
wait_queue_entry_t wait;
ktime_t expires, *to = NULL;
lockdep_assert_irqs_enabled();
if (timeout > 0) {
struct timespec64 end_time = ep_set_mstimeout(timeout);
slack = select_estimate_accuracy(&end_time);
to = &expires;
*to = timespec64_to_ktime(end_time);
} else if (timeout == 0) {
/*
* Avoid the unnecessary trip to the wait queue loop, if the
* caller specified a non blocking operation. We still need
* lock because we could race and not see an epi being added
* to the ready list while in irq callback. Thus incorrectly
* returning 0 back to userspace.
*/
timed_out = 1;
write_lock_irq(&ep->lock);
eavail = ep_events_available(ep);
write_unlock_irq(&ep->lock);
goto send_events;
}
fetch_events:
if (!ep_events_available(ep))
ep_busy_loop(ep, timed_out);
eavail = ep_events_available(ep);
if (eavail)
goto send_events;
/*
* Busy poll timed out. Drop NAPI ID for now, we can add
* it back in when we have moved a socket with a valid NAPI
* ID onto the ready list.
*/
ep_reset_busy_poll_napi_id(ep);
do {
/*
* Internally init_wait() uses autoremove_wake_function(),
* thus wait entry is removed from the wait queue on each
* wakeup. Why it is important? In case of several waiters
* each new wakeup will hit the next waiter, giving it the
* chance to harvest new event. Otherwise wakeup can be
* lost. This is also good performance-wise, because on
* normal wakeup path no need to call __remove_wait_queue()
* explicitly, thus ep->lock is not taken, which halts the
* event delivery.
*/
init_wait(&wait);
write_lock_irq(&ep->lock);
/*
* Barrierless variant, waitqueue_active() is called under
* the same lock on wakeup ep_poll_callback() side, so it
* is safe to avoid an explicit barrier.
*/
__set_current_state(TASK_INTERRUPTIBLE);
/*
* Do the final check under the lock. ep_scan_ready_list()
* plays with two lists (->rdllist and ->ovflist) and there
* is always a race when both lists are empty for short
* period of time although events are pending, so lock is
* important.
*/
eavail = ep_events_available(ep);
if (!eavail) {
if (signal_pending(current))
res = -EINTR;
else
__add_wait_queue_exclusive(&ep->wq, &wait);
}
write_unlock_irq(&ep->lock);
if (!eavail && !res)
timed_out = !freezable_schedule_hrtimeout_range(to, slack,
HRTIMER_MODE_ABS);
/*
* We were woken up, thus go and try to harvest some events.
* If timed out and still on the wait queue, recheck eavail
* carefully under lock, below.
*/
eavail = 1;
} while (0);
__set_current_state(TASK_RUNNING);
if (!list_empty_careful(&wait.entry)) {
write_lock_irq(&ep->lock);
/*
* If the thread timed out and is not on the wait queue, it
* means that the thread was woken up after its timeout expired
* before it could reacquire the lock. Thus, when wait.entry is
* empty, it needs to harvest events.
*/
if (timed_out)
eavail = list_empty(&wait.entry);
__remove_wait_queue(&ep->wq, &wait);
write_unlock_irq(&ep->lock);
}
send_events:
if (fatal_signal_pending(current)) {
/*
* Always short-circuit for fatal signals to allow
* threads to make a timely exit without the chance of
* finding more events available and fetching
* repeatedly.
*/
res = -EINTR;
}
/*
* Try to transfer events to user space. In case we get 0 events and
* there's still timeout left over, we go trying again in search of
* more luck.
*/
if (!res && eavail &&
!(res = ep_send_events(ep, events, maxevents)) && !timed_out)
goto fetch_events;
return res;
}
/**
* ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
* API, to verify that adding an epoll file inside another
* epoll structure, does not violate the constraints, in
* terms of closed loops, or too deep chains (which can
* result in excessive stack usage).
*
* @priv: Pointer to the epoll file to be currently checked.
* @cookie: Original cookie for this call. This is the top-of-the-chain epoll
* data structure pointer.
* @call_nests: Current dept of the @ep_call_nested() call stack.
*
* Returns: Returns zero if adding the epoll @file inside current epoll
* structure @ep does not violate the constraints, or -1 otherwise.
*/
static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
{
int error = 0;
struct file *file = priv;
struct eventpoll *ep = file->private_data;
struct eventpoll *ep_tovisit;
struct rb_node *rbp;
struct epitem *epi;
mutex_lock_nested(&ep->mtx, call_nests + 1);
ep->gen = loop_check_gen;
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
if (unlikely(is_file_epoll(epi->ffd.file))) {
ep_tovisit = epi->ffd.file->private_data;
if (ep_tovisit->gen == loop_check_gen)
continue;
error = ep_call_nested(&poll_loop_ncalls,
ep_loop_check_proc, epi->ffd.file,
ep_tovisit, current);
if (error != 0)
break;
} else {
/*
* If we've reached a file that is not associated with
* an ep, then we need to check if the newly added
* links are going to add too many wakeup paths. We do
* this by adding it to the tfile_check_list, if it's
* not already there, and calling reverse_path_check()
* during ep_insert().
*/
if (list_empty(&epi->ffd.file->f_tfile_llink)) {
if (get_file_rcu(epi->ffd.file))
list_add(&epi->ffd.file->f_tfile_llink,
&tfile_check_list);
}
}
}
mutex_unlock(&ep->mtx);
return error;
}
/**
* ep_loop_check - Performs a check to verify that adding an epoll file (@file)
* another epoll file (represented by @ep) does not create
* closed loops or too deep chains.
*
* @ep: Pointer to the epoll private data structure.
* @file: Pointer to the epoll file to be checked.
*
* Returns: Returns zero if adding the epoll @file inside current epoll
* structure @ep does not violate the constraints, or -1 otherwise.
*/
static int ep_loop_check(struct eventpoll *ep, struct file *file)
{
return ep_call_nested(&poll_loop_ncalls,
ep_loop_check_proc, file, ep, current);
}
static void clear_tfile_check_list(void)
{
struct file *file;
/* first clear the tfile_check_list */
while (!list_empty(&tfile_check_list)) {
file = list_first_entry(&tfile_check_list, struct file,
f_tfile_llink);
list_del_init(&file->f_tfile_llink);
fput(file);
}
INIT_LIST_HEAD(&tfile_check_list);
}
/*
* Open an eventpoll file descriptor.
*/
static int do_epoll_create(int flags)
{
int error, fd;
struct eventpoll *ep = NULL;
struct file *file;
/* Check the EPOLL_* constant for consistency. */
BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
if (flags & ~EPOLL_CLOEXEC)
return -EINVAL;
/*
* Create the internal data structure ("struct eventpoll").
*/
error = ep_alloc(&ep);
if (error < 0)
return error;
/*
* Creates all the items needed to setup an eventpoll file. That is,
* a file structure and a free file descriptor.
*/
fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
if (fd < 0) {
error = fd;
goto out_free_ep;
}
file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
O_RDWR | (flags & O_CLOEXEC));
if (IS_ERR(file)) {
error = PTR_ERR(file);
goto out_free_fd;
}
ep->file = file;
fd_install(fd, file);
return fd;
out_free_fd:
put_unused_fd(fd);
out_free_ep:
ep_free(ep);
return error;
}
SYSCALL_DEFINE1(epoll_create1, int, flags)
{
return do_epoll_create(flags);
}
SYSCALL_DEFINE1(epoll_create, int, size)
{
if (size <= 0)
return -EINVAL;
return do_epoll_create(0);
}
static inline int epoll_mutex_lock(struct mutex *mutex, int depth,
bool nonblock)
{
if (!nonblock) {
mutex_lock_nested(mutex, depth);
return 0;
}
if (mutex_trylock(mutex))
return 0;
return -EAGAIN;
}
int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
bool nonblock)
{
int error;
int full_check = 0;
struct fd f, tf;
struct eventpoll *ep;
struct epitem *epi;
struct eventpoll *tep = NULL;
error = -EBADF;
f = fdget(epfd);
if (!f.file)
goto error_return;
/* Get the "struct file *" for the target file */
tf = fdget(fd);
if (!tf.file)
goto error_fput;
/* The target file descriptor must support poll */
error = -EPERM;
if (!file_can_poll(tf.file))
goto error_tgt_fput;
/* Check if EPOLLWAKEUP is allowed */
if (ep_op_has_event(op))
ep_take_care_of_epollwakeup(epds);
/*
* We have to check that the file structure underneath the file descriptor
* the user passed to us _is_ an eventpoll file. And also we do not permit
* adding an epoll file descriptor inside itself.
*/
error = -EINVAL;
if (f.file == tf.file || !is_file_epoll(f.file))
goto error_tgt_fput;
/*
* epoll adds to the wakeup queue at EPOLL_CTL_ADD time only,
* so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
* Also, we do not currently supported nested exclusive wakeups.
*/
if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {
if (op == EPOLL_CTL_MOD)
goto error_tgt_fput;
if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
(epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))
goto error_tgt_fput;
}
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
ep = f.file->private_data;
/*
* When we insert an epoll file descriptor, inside another epoll file
* descriptor, there is the change of creating closed loops, which are
* better be handled here, than in more critical paths. While we are
* checking for loops we also determine the list of files reachable
* and hang them on the tfile_check_list, so we can check that we
* haven't created too many possible wakeup paths.
*
* We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when
* the epoll file descriptor is attaching directly to a wakeup source,
* unless the epoll file descriptor is nested. The purpose of taking the
* 'epmutex' on add is to prevent complex toplogies such as loops and
* deep wakeup paths from forming in parallel through multiple
* EPOLL_CTL_ADD operations.
*/
error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
if (error)
goto error_tgt_fput;
if (op == EPOLL_CTL_ADD) {
if (!list_empty(&f.file->f_ep_links) ||
ep->gen == loop_check_gen ||
is_file_epoll(tf.file)) {
mutex_unlock(&ep->mtx);
error = epoll_mutex_lock(&epmutex, 0, nonblock);
if (error)
goto error_tgt_fput;
loop_check_gen++;
full_check = 1;
if (is_file_epoll(tf.file)) {
error = -ELOOP;
if (ep_loop_check(ep, tf.file) != 0)
goto error_tgt_fput;
} else {
get_file(tf.file);
list_add(&tf.file->f_tfile_llink,
&tfile_check_list);
}
error = epoll_mutex_lock(&ep->mtx, 0, nonblock);
if (error)
goto error_tgt_fput;
if (is_file_epoll(tf.file)) {
tep = tf.file->private_data;
error = epoll_mutex_lock(&tep->mtx, 1, nonblock);
if (error) {
mutex_unlock(&ep->mtx);
goto error_tgt_fput;
}
}
}
}
/*
* Try to lookup the file inside our RB tree, Since we grabbed "mtx"
* above, we can be sure to be able to use the item looked up by
* ep_find() till we release the mutex.
*/
epi = ep_find(ep, tf.file, fd);
error = -EINVAL;
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
epds->events |= EPOLLERR | EPOLLHUP;
error = ep_insert(ep, epds, tf.file, fd, full_check);
} else
error = -EEXIST;
break;
case EPOLL_CTL_DEL:
if (epi)
error = ep_remove(ep, epi);
else
error = -ENOENT;
break;
case EPOLL_CTL_MOD:
if (epi) {
if (!(epi->event.events & EPOLLEXCLUSIVE)) {
epds->events |= EPOLLERR | EPOLLHUP;
error = ep_modify(ep, epi, epds);
}
} else
error = -ENOENT;
break;
}
if (tep != NULL)
mutex_unlock(&tep->mtx);
mutex_unlock(&ep->mtx);
error_tgt_fput:
if (full_check) {
clear_tfile_check_list();
loop_check_gen++;
mutex_unlock(&epmutex);
}
fdput(tf);
error_fput:
fdput(f);
error_return:
return error;
}
/*
* The following function implements the controller interface for
* the eventpoll file that enables the insertion/removal/change of
* file descriptors inside the interest set.
*/
SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
struct epoll_event __user *, event)
{
struct epoll_event epds;
if (ep_op_has_event(op) &&
copy_from_user(&epds, event, sizeof(struct epoll_event)))
return -EFAULT;
return do_epoll_ctl(epfd, op, fd, &epds, false);
}
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_wait(2).
*/
static int do_epoll_wait(int epfd, struct epoll_event __user *events,
int maxevents, int timeout)
{
int error;
struct fd f;
struct eventpoll *ep;
/* The maximum number of event must be greater than zero */
if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
return -EINVAL;
/* Verify that the area passed by the user is writeable */
if (!access_ok(events, maxevents * sizeof(struct epoll_event)))
return -EFAULT;
/* Get the "struct file *" for the eventpoll file */
f = fdget(epfd);
if (!f.file)
return -EBADF;
/*
* We have to check that the file structure underneath the fd
* the user passed to us _is_ an eventpoll file.
*/
error = -EINVAL;
if (!is_file_epoll(f.file))
goto error_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
ep = f.file->private_data;
/* Time to fish for events ... */
error = ep_poll(ep, events, maxevents, timeout);
error_fput:
fdput(f);
return error;
}
SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
int, maxevents, int, timeout)
{
return do_epoll_wait(epfd, events, maxevents, timeout);
}
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_pwait(2).
*/
SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
int, maxevents, int, timeout, const sigset_t __user *, sigmask,
size_t, sigsetsize)
{
int error;
/*
* If the caller wants a certain signal mask to be set during the wait,
* we apply it here.
*/
error = set_user_sigmask(sigmask, sigsetsize);
if (error)
return error;
error = do_epoll_wait(epfd, events, maxevents, timeout);
restore_saved_sigmask_unless(error == -EINTR);
return error;
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
struct epoll_event __user *, events,
int, maxevents, int, timeout,
const compat_sigset_t __user *, sigmask,
compat_size_t, sigsetsize)
{
long err;
/*
* If the caller wants a certain signal mask to be set during the wait,
* we apply it here.
*/
err = set_compat_user_sigmask(sigmask, sigsetsize);
if (err)
return err;
err = do_epoll_wait(epfd, events, maxevents, timeout);
restore_saved_sigmask_unless(err == -EINTR);
return err;
}
#endif
static int __init eventpoll_init(void)
{
struct sysinfo si;
si_meminfo(&si);
/*
* Allows top 4% of lomem to be allocated for epoll watches (per user).
*/
max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
EP_ITEM_COST;
BUG_ON(max_user_watches < 0);
/*
* Initialize the structure used to perform epoll file descriptor
* inclusion loops checks.
*/
ep_nested_calls_init(&poll_loop_ncalls);
/*
* We can have many thousands of epitems, so prevent this from
* using an extra cache line on 64-bit (and smaller) CPUs
*/
BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
/* Allocates slab cache used to allocate "struct epitem" items */
epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);
/* Allocates slab cache used to allocate "struct eppoll_entry" */
pwq_cache = kmem_cache_create("eventpoll_pwq",
sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
return 0;
}
fs_initcall(eventpoll_init);