Merge 5.10.121 into android12-5.10-lts

Changes in 5.10.121
	binfmt_flat: do not stop relocating GOT entries prematurely on riscv
	parisc/stifb: Implement fb_is_primary_device()
	riscv: Initialize thread pointer before calling C functions
	riscv: Fix irq_work when SMP is disabled
	ALSA: hda/realtek: Enable 4-speaker output for Dell XPS 15 9520 laptop
	ALSA: hda/realtek - Fix microphone noise on ASUS TUF B550M-PLUS
	ALSA: usb-audio: Cancel pending work at closing a MIDI substream
	USB: serial: option: add Quectel BG95 modem
	USB: new quirk for Dell Gen 2 devices
	usb: dwc3: gadget: Move null pinter check to proper place
	usb: core: hcd: Add support for deferring roothub registration
	cifs: when extending a file with falloc we should make files not-sparse
	xhci: Allow host runtime PM as default for Intel Alder Lake N xHCI
	Fonts: Make font size unsigned in font_desc
	parisc/stifb: Keep track of hardware path of graphics card
	x86/MCE/AMD: Fix memory leak when threshold_create_bank() fails
	perf/x86/intel: Fix event constraints for ICL
	ptrace/um: Replace PT_DTRACE with TIF_SINGLESTEP
	ptrace/xtensa: Replace PT_SINGLESTEP with TIF_SINGLESTEP
	ptrace: Reimplement PTRACE_KILL by always sending SIGKILL
	btrfs: add "0x" prefix for unsupported optional features
	btrfs: repair super block num_devices automatically
	iommu/vt-d: Add RPLS to quirk list to skip TE disabling
	drm/virtio: fix NULL pointer dereference in virtio_gpu_conn_get_modes
	mwifiex: add mutex lock for call in mwifiex_dfs_chan_sw_work_queue
	b43legacy: Fix assigning negative value to unsigned variable
	b43: Fix assigning negative value to unsigned variable
	ipw2x00: Fix potential NULL dereference in libipw_xmit()
	ipv6: fix locking issues with loops over idev->addr_list
	fbcon: Consistently protect deferred_takeover with console_lock()
	x86/platform/uv: Update TSC sync state for UV5
	ACPICA: Avoid cache flush inside virtual machines
	drm/komeda: return early if drm_universal_plane_init() fails.
	rcu-tasks: Fix race in schedule and flush work
	rcu: Make TASKS_RUDE_RCU select IRQ_WORK
	sfc: ef10: Fix assigning negative value to unsigned variable
	ALSA: jack: Access input_dev under mutex
	spi: spi-rspi: Remove setting {src,dst}_{addr,addr_width} based on DMA direction
	tools/power turbostat: fix ICX DRAM power numbers
	drm/amd/pm: fix double free in si_parse_power_table()
	ath9k: fix QCA9561 PA bias level
	media: venus: hfi: avoid null dereference in deinit
	media: pci: cx23885: Fix the error handling in cx23885_initdev()
	media: cx25821: Fix the warning when removing the module
	md/bitmap: don't set sb values if can't pass sanity check
	mmc: jz4740: Apply DMA engine limits to maximum segment size
	drivers: mmc: sdhci_am654: Add the quirk to set TESTCD bit
	scsi: megaraid: Fix error check return value of register_chrdev()
	scsi: ufs: Use pm_runtime_resume_and_get() instead of pm_runtime_get_sync()
	scsi: lpfc: Fix resource leak in lpfc_sli4_send_seq_to_ulp()
	ath11k: disable spectral scan during spectral deinit
	ASoC: Intel: bytcr_rt5640: Add quirk for the HP Pro Tablet 408
	drm/plane: Move range check for format_count earlier
	drm/amd/pm: fix the compile warning
	ath10k: skip ath10k_halt during suspend for driver state RESTARTING
	arm64: compat: Do not treat syscall number as ESR_ELx for a bad syscall
	drm: msm: fix error check return value of irq_of_parse_and_map()
	ipv6: Don't send rs packets to the interface of ARPHRD_TUNNEL
	net/mlx5: fs, delete the FTE when there are no rules attached to it
	ASoC: dapm: Don't fold register value changes into notifications
	mlxsw: spectrum_dcb: Do not warn about priority changes
	mlxsw: Treat LLDP packets as control
	drm/amdgpu/ucode: Remove firmware load type check in amdgpu_ucode_free_bo
	HID: bigben: fix slab-out-of-bounds Write in bigben_probe
	ASoC: tscs454: Add endianness flag in snd_soc_component_driver
	net: remove two BUG() from skb_checksum_help()
	s390/preempt: disable __preempt_count_add() optimization for PROFILE_ALL_BRANCHES
	perf/amd/ibs: Cascade pmu init functions' return value
	spi: stm32-qspi: Fix wait_cmd timeout in APM mode
	dma-debug: change allocation mode from GFP_NOWAIT to GFP_ATIOMIC
	ACPI: PM: Block ASUS B1400CEAE from suspend to idle by default
	ipmi:ssif: Check for NULL msg when handling events and messages
	ipmi: Fix pr_fmt to avoid compilation issues
	rtlwifi: Use pr_warn instead of WARN_ONCE
	media: rga: fix possible memory leak in rga_probe
	media: coda: limit frame interval enumeration to supported encoder frame sizes
	media: imon: reorganize serialization
	media: cec-adap.c: fix is_configuring state
	openrisc: start CPU timer early in boot
	nvme-pci: fix a NULL pointer dereference in nvme_alloc_admin_tags
	ASoC: rt5645: Fix errorenous cleanup order
	nbd: Fix hung on disconnect request if socket is closed before
	net: phy: micrel: Allow probing without .driver_data
	media: exynos4-is: Fix compile warning
	ASoC: max98357a: remove dependency on GPIOLIB
	ASoC: rt1015p: remove dependency on GPIOLIB
	can: mcp251xfd: silence clang's -Wunaligned-access warning
	x86/microcode: Add explicit CPU vendor dependency
	m68k: atari: Make Atari ROM port I/O write macros return void
	rxrpc: Return an error to sendmsg if call failed
	rxrpc, afs: Fix selection of abort codes
	eth: tg3: silence the GCC 12 array-bounds warning
	selftests/bpf: fix btf_dump/btf_dump due to recent clang change
	gfs2: use i_lock spin_lock for inode qadata
	IB/rdmavt: add missing locks in rvt_ruc_loopback
	ARM: dts: ox820: align interrupt controller node name with dtschema
	ARM: dts: s5pv210: align DMA channels with dtschema
	arm64: dts: qcom: msm8994: Fix BLSP[12]_DMA channels count
	PM / devfreq: rk3399_dmc: Disable edev on remove()
	crypto: ccree - use fine grained DMA mapping dir
	soc: ti: ti_sci_pm_domains: Check for null return of devm_kcalloc
	fs: jfs: fix possible NULL pointer dereference in dbFree()
	ARM: OMAP1: clock: Fix UART rate reporting algorithm
	powerpc/fadump: Fix fadump to work with a different endian capture kernel
	fat: add ratelimit to fat*_ent_bread()
	pinctrl: renesas: rzn1: Fix possible null-ptr-deref in sh_pfc_map_resources()
	ARM: versatile: Add missing of_node_put in dcscb_init
	ARM: dts: exynos: add atmel,24c128 fallback to Samsung EEPROM
	ARM: hisi: Add missing of_node_put after of_find_compatible_node
	PCI: Avoid pci_dev_lock() AB/BA deadlock with sriov_numvfs_store()
	tracing: incorrect isolate_mote_t cast in mm_vmscan_lru_isolate
	powerpc/powernv/vas: Assign real address to rx_fifo in vas_rx_win_attr
	powerpc/xics: fix refcount leak in icp_opal_init()
	powerpc/powernv: fix missing of_node_put in uv_init()
	macintosh/via-pmu: Fix build failure when CONFIG_INPUT is disabled
	powerpc/iommu: Add missing of_node_put in iommu_init_early_dart
	RDMA/hfi1: Prevent panic when SDMA is disabled
	drm: fix EDID struct for old ARM OABI format
	dt-bindings: display: sitronix, st7735r: Fix backlight in example
	ath11k: acquire ab->base_lock in unassign when finding the peer by addr
	ath9k: fix ar9003_get_eepmisc
	drm/edid: fix invalid EDID extension block filtering
	drm/bridge: adv7511: clean up CEC adapter when probe fails
	spi: qcom-qspi: Add minItems to interconnect-names
	ASoC: mediatek: Fix error handling in mt8173_max98090_dev_probe
	ASoC: mediatek: Fix missing of_node_put in mt2701_wm8960_machine_probe
	x86/delay: Fix the wrong asm constraint in delay_loop()
	drm/ingenic: Reset pixclock rate when parent clock rate changes
	drm/mediatek: Fix mtk_cec_mask()
	drm/vc4: hvs: Reset muxes at probe time
	drm/vc4: txp: Don't set TXP_VSTART_AT_EOF
	drm/vc4: txp: Force alpha to be 0xff if it's disabled
	libbpf: Don't error out on CO-RE relos for overriden weak subprogs
	bpf: Fix excessive memory allocation in stack_map_alloc()
	nl80211: show SSID for P2P_GO interfaces
	drm/komeda: Fix an undefined behavior bug in komeda_plane_add()
	drm: mali-dp: potential dereference of null pointer
	spi: spi-ti-qspi: Fix return value handling of wait_for_completion_timeout
	scftorture: Fix distribution of short handler delays
	net: dsa: mt7530: 1G can also support 1000BASE-X link mode
	NFC: NULL out the dev->rfkill to prevent UAF
	efi: Add missing prototype for efi_capsule_setup_info
	target: remove an incorrect unmap zeroes data deduction
	drbd: fix duplicate array initializer
	EDAC/dmc520: Don't print an error for each unconfigured interrupt line
	mtd: rawnand: denali: Use managed device resources
	HID: hid-led: fix maximum brightness for Dream Cheeky
	HID: elan: Fix potential double free in elan_input_configured
	drm/bridge: Fix error handling in analogix_dp_probe
	sched/fair: Fix cfs_rq_clock_pelt() for throttled cfs_rq
	spi: img-spfi: Fix pm_runtime_get_sync() error checking
	cpufreq: Fix possible race in cpufreq online error path
	ath9k_htc: fix potential out of bounds access with invalid rxstatus->rs_keyix
	media: hantro: Empty encoder capture buffers by default
	drm/panel: simple: Add missing bus flags for Innolux G070Y2-L01
	ALSA: pcm: Check for null pointer of pointer substream before dereferencing it
	inotify: show inotify mask flags in proc fdinfo
	fsnotify: fix wrong lockdep annotations
	of: overlay: do not break notify on NOTIFY_{OK|STOP}
	drm/msm/dpu: adjust display_v_end for eDP and DP
	scsi: ufs: qcom: Fix ufs_qcom_resume()
	scsi: ufs: core: Exclude UECxx from SFR dump list
	selftests/resctrl: Fix null pointer dereference on open failed
	libbpf: Fix logic for finding matching program for CO-RE relocation
	mtd: spi-nor: core: Check written SR value in spi_nor_write_16bit_sr_and_check()
	x86/pm: Fix false positive kmemleak report in msr_build_context()
	mtd: rawnand: cadence: fix possible null-ptr-deref in cadence_nand_dt_probe()
	x86/speculation: Add missing prototype for unpriv_ebpf_notify()
	ASoC: rk3328: fix disabling mclk on pclk probe failure
	perf tools: Add missing headers needed by util/data.h
	drm/msm/disp/dpu1: set vbif hw config to NULL to avoid use after memory free during pm runtime resume
	drm/msm/dp: stop event kernel thread when DP unbind
	drm/msm/dp: fix error check return value of irq_of_parse_and_map()
	drm/msm/dsi: fix error checks and return values for DSI xmit functions
	drm/msm/hdmi: check return value after calling platform_get_resource_byname()
	drm/msm/hdmi: fix error check return value of irq_of_parse_and_map()
	drm/msm: add missing include to msm_drv.c
	drm/panel: panel-simple: Fix proper bpc for AM-1280800N3TZQW-T00H
	drm/rockchip: vop: fix possible null-ptr-deref in vop_bind()
	perf tools: Use Python devtools for version autodetection rather than runtime
	virtio_blk: fix the discard_granularity and discard_alignment queue limits
	x86: Fix return value of __setup handlers
	irqchip/exiu: Fix acknowledgment of edge triggered interrupts
	irqchip/aspeed-i2c-ic: Fix irq_of_parse_and_map() return value
	irqchip/aspeed-scu-ic: Fix irq_of_parse_and_map() return value
	x86/mm: Cleanup the control_va_addr_alignment() __setup handler
	arm64: fix types in copy_highpage()
	regulator: core: Fix enable_count imbalance with EXCLUSIVE_GET
	drm/msm/dp: fix event thread stuck in wait_event after kthread_stop()
	drm/msm/mdp5: Return error code in mdp5_pipe_release when deadlock is detected
	drm/msm/mdp5: Return error code in mdp5_mixer_release when deadlock is detected
	drm/msm: return an error pointer in msm_gem_prime_get_sg_table()
	media: uvcvideo: Fix missing check to determine if element is found in list
	iomap: iomap_write_failed fix
	spi: spi-fsl-qspi: check return value after calling platform_get_resource_byname()
	Revert "cpufreq: Fix possible race in cpufreq online error path"
	regulator: qcom_smd: Fix up PM8950 regulator configuration
	perf/amd/ibs: Use interrupt regs ip for stack unwinding
	ath11k: Don't check arvif->is_started before sending management frames
	ASoC: fsl: Fix refcount leak in imx_sgtl5000_probe
	ASoC: mxs-saif: Fix refcount leak in mxs_saif_probe
	regulator: pfuze100: Fix refcount leak in pfuze_parse_regulators_dt
	ASoC: samsung: Use dev_err_probe() helper
	ASoC: samsung: Fix refcount leak in aries_audio_probe
	kselftest/cgroup: fix test_stress.sh to use OUTPUT dir
	scripts/faddr2line: Fix overlapping text section failures
	media: aspeed: Fix an error handling path in aspeed_video_probe()
	media: exynos4-is: Fix PM disable depth imbalance in fimc_is_probe
	media: st-delta: Fix PM disable depth imbalance in delta_probe
	media: exynos4-is: Change clk_disable to clk_disable_unprepare
	media: pvrusb2: fix array-index-out-of-bounds in pvr2_i2c_core_init
	media: vsp1: Fix offset calculation for plane cropping
	Bluetooth: fix dangling sco_conn and use-after-free in sco_sock_timeout
	Bluetooth: Interleave with allowlist scan
	Bluetooth: L2CAP: Rudimentary typo fixes
	Bluetooth: LL privacy allow RPA
	Bluetooth: use inclusive language in HCI role comments
	Bluetooth: use inclusive language when filtering devices
	Bluetooth: use hdev lock for accept_list and reject_list in conn req
	nvme: set dma alignment to dword
	m68k: math-emu: Fix dependencies of math emulation support
	lsm,selinux: pass flowi_common instead of flowi to the LSM hooks
	sctp: read sk->sk_bound_dev_if once in sctp_rcv()
	net: hinic: add missing destroy_workqueue in hinic_pf_to_mgmt_init
	ASoC: ti: j721e-evm: Fix refcount leak in j721e_soc_probe_*
	media: ov7670: remove ov7670_power_off from ov7670_remove
	media: staging: media: rkvdec: Make use of the helper function devm_platform_ioremap_resource()
	media: rkvdec: h264: Fix dpb_valid implementation
	media: rkvdec: h264: Fix bit depth wrap in pps packet
	ext4: reject the 'commit' option on ext2 filesystems
	drm/msm/a6xx: Fix refcount leak in a6xx_gpu_init
	drm: msm: fix possible memory leak in mdp5_crtc_cursor_set()
	x86/sev: Annotate stack change in the #VC handler
	drm/msm/dpu: handle pm_runtime_get_sync() errors in bind path
	drm/i915: Fix CFI violation with show_dynamic_id()
	thermal/drivers/bcm2711: Don't clamp temperature at zero
	thermal/drivers/broadcom: Fix potential NULL dereference in sr_thermal_probe
	thermal/drivers/core: Use a char pointer for the cooling device name
	thermal/core: Fix memory leak in __thermal_cooling_device_register()
	thermal/drivers/imx_sc_thermal: Fix refcount leak in imx_sc_thermal_probe
	ASoC: wm2000: fix missing clk_disable_unprepare() on error in wm2000_anc_transition()
	NFC: hci: fix sleep in atomic context bugs in nfc_hci_hcp_message_tx
	ASoC: max98090: Move check for invalid values before casting in max98090_put_enab_tlv()
	net: stmmac: selftests: Use kcalloc() instead of kzalloc()
	net: stmmac: fix out-of-bounds access in a selftest
	hv_netvsc: Fix potential dereference of NULL pointer
	rxrpc: Fix listen() setting the bar too high for the prealloc rings
	rxrpc: Don't try to resend the request if we're receiving the reply
	rxrpc: Fix overlapping ACK accounting
	rxrpc: Don't let ack.previousPacket regress
	rxrpc: Fix decision on when to generate an IDLE ACK
	net: huawei: hinic: Use devm_kcalloc() instead of devm_kzalloc()
	hinic: Avoid some over memory allocation
	net/smc: postpone sk_refcnt increment in connect()
	arm64: dts: rockchip: Move drive-impedance-ohm to emmc phy on rk3399
	memory: samsung: exynos5422-dmc: Avoid some over memory allocation
	ARM: dts: suniv: F1C100: fix watchdog compatible
	soc: qcom: smp2p: Fix missing of_node_put() in smp2p_parse_ipc
	soc: qcom: smsm: Fix missing of_node_put() in smsm_parse_ipc
	PCI: cadence: Fix find_first_zero_bit() limit
	PCI: rockchip: Fix find_first_zero_bit() limit
	PCI: dwc: Fix setting error return on MSI DMA mapping failure
	ARM: dts: ci4x10: Adapt to changes in imx6qdl.dtsi regarding fec clocks
	soc: qcom: llcc: Add MODULE_DEVICE_TABLE()
	KVM: nVMX: Leave most VM-Exit info fields unmodified on failed VM-Entry
	KVM: nVMX: Clear IDT vectoring on nested VM-Exit for double/triple fault
	platform/chrome: cros_ec: fix error handling in cros_ec_register()
	ARM: dts: imx6dl-colibri: Fix I2C pinmuxing
	platform/chrome: Re-introduce cros_ec_cmd_xfer and use it for ioctls
	can: xilinx_can: mark bit timing constants as const
	ARM: dts: stm32: Fix PHY post-reset delay on Avenger96
	ARM: dts: bcm2835-rpi-zero-w: Fix GPIO line name for Wifi/BT
	ARM: dts: bcm2837-rpi-cm3-io3: Fix GPIO line names for SMPS I2C
	ARM: dts: bcm2837-rpi-3-b-plus: Fix GPIO line name of power LED
	ARM: dts: bcm2835-rpi-b: Fix GPIO line names
	misc: ocxl: fix possible double free in ocxl_file_register_afu
	crypto: marvell/cesa - ECB does not IV
	gpiolib: of: Introduce hook for missing gpio-ranges
	pinctrl: bcm2835: implement hook for missing gpio-ranges
	arm: mediatek: select arch timer for mt7629
	powerpc/fadump: fix PT_LOAD segment for boot memory area
	mfd: ipaq-micro: Fix error check return value of platform_get_irq()
	scsi: fcoe: Fix Wstringop-overflow warnings in fcoe_wwn_from_mac()
	firmware: arm_scmi: Fix list protocols enumeration in the base protocol
	nvdimm: Fix firmware activation deadlock scenarios
	nvdimm: Allow overwrite in the presence of disabled dimms
	pinctrl: mvebu: Fix irq_of_parse_and_map() return value
	drivers/base/node.c: fix compaction sysfs file leak
	dax: fix cache flush on PMD-mapped pages
	drivers/base/memory: fix an unlikely reference counting issue in __add_memory_block()
	powerpc/8xx: export 'cpm_setbrg' for modules
	pinctrl: renesas: core: Fix possible null-ptr-deref in sh_pfc_map_resources()
	powerpc/idle: Fix return value of __setup() handler
	powerpc/4xx/cpm: Fix return value of __setup() handler
	ASoC: atmel-pdmic: Remove endianness flag on pdmic component
	ASoC: atmel-classd: Remove endianness flag on class d component
	proc: fix dentry/inode overinstantiating under /proc/${pid}/net
	ipc/mqueue: use get_tree_nodev() in mqueue_get_tree()
	PCI: imx6: Fix PERST# start-up sequence
	tty: fix deadlock caused by calling printk() under tty_port->lock
	crypto: sun8i-ss - rework handling of IV
	crypto: sun8i-ss - handle zero sized sg
	crypto: cryptd - Protect per-CPU resource by disabling BH.
	Input: sparcspkr - fix refcount leak in bbc_beep_probe
	PCI/AER: Clear MULTI_ERR_COR/UNCOR_RCV bits
	hwrng: omap3-rom - fix using wrong clk_disable() in omap_rom_rng_runtime_resume()
	powerpc/64: Only WARN if __pa()/__va() called with bad addresses
	powerpc/perf: Fix the threshold compare group constraint for power9
	macintosh: via-pmu and via-cuda need RTC_LIB
	powerpc/fsl_rio: Fix refcount leak in fsl_rio_setup
	mfd: davinci_voicecodec: Fix possible null-ptr-deref davinci_vc_probe()
	mailbox: forward the hrtimer if not queued and under a lock
	RDMA/hfi1: Prevent use of lock before it is initialized
	Input: stmfts - do not leave device disabled in stmfts_input_open
	OPP: call of_node_put() on error path in _bandwidth_supported()
	f2fs: fix dereference of stale list iterator after loop body
	iommu/mediatek: Add list_del in mtk_iommu_remove
	i2c: at91: use dma safe buffers
	cpufreq: mediatek: add missing platform_driver_unregister() on error in mtk_cpufreq_driver_init
	cpufreq: mediatek: Use module_init and add module_exit
	cpufreq: mediatek: Unregister platform device on exit
	MIPS: Loongson: Use hwmon_device_register_with_groups() to register hwmon
	i2c: at91: Initialize dma_buf in at91_twi_xfer()
	dmaengine: idxd: Fix the error handling path in idxd_cdev_register()
	NFS: Do not report EINTR/ERESTARTSYS as mapping errors
	NFS: fsync() should report filesystem errors over EINTR/ERESTARTSYS
	NFS: Do not report flush errors in nfs_write_end()
	NFS: Don't report errors from nfs_pageio_complete() more than once
	NFSv4/pNFS: Do not fail I/O when we fail to allocate the pNFS layout
	video: fbdev: clcdfb: Fix refcount leak in clcdfb_of_vram_setup
	dmaengine: stm32-mdma: remove GISR1 register
	dmaengine: stm32-mdma: rework interrupt handler
	dmaengine: stm32-mdma: fix chan initialization in stm32_mdma_irq_handler()
	iommu/amd: Increase timeout waiting for GA log enablement
	i2c: npcm: Fix timeout calculation
	i2c: npcm: Correct register access width
	i2c: npcm: Handle spurious interrupts
	i2c: rcar: fix PM ref counts in probe error paths
	perf c2c: Use stdio interface if slang is not supported
	perf jevents: Fix event syntax error caused by ExtSel
	f2fs: fix to avoid f2fs_bug_on() in dec_valid_node_count()
	f2fs: fix to do sanity check on block address in f2fs_do_zero_range()
	f2fs: fix to clear dirty inode in f2fs_evict_inode()
	f2fs: fix deadloop in foreground GC
	f2fs: don't need inode lock for system hidden quota
	f2fs: fix to do sanity check on total_data_blocks
	f2fs: fix fallocate to use file_modified to update permissions consistently
	f2fs: fix to do sanity check for inline inode
	wifi: mac80211: fix use-after-free in chanctx code
	iwlwifi: mvm: fix assert 1F04 upon reconfig
	fs-writeback: writeback_sb_inodes:Recalculate 'wrote' according skipped pages
	efi: Do not import certificates from UEFI Secure Boot for T2 Macs
	bfq: Split shared queues on move between cgroups
	bfq: Update cgroup information before merging bio
	bfq: Track whether bfq_group is still online
	ext4: fix use-after-free in ext4_rename_dir_prepare
	ext4: fix warning in ext4_handle_inode_extension
	ext4: fix bug_on in ext4_writepages
	ext4: filter out EXT4_FC_REPLAY from on-disk superblock field s_state
	ext4: fix bug_on in __es_tree_search
	ext4: verify dir block before splitting it
	ext4: avoid cycles in directory h-tree
	ACPI: property: Release subnode properties with data nodes
	tracing: Fix potential double free in create_var_ref()
	PCI/PM: Fix bridge_d3_blacklist[] Elo i2 overwrite of Gigabyte X299
	PCI: qcom: Fix runtime PM imbalance on probe errors
	PCI: qcom: Fix unbalanced PHY init on probe errors
	mm, compaction: fast_find_migrateblock() should return pfn in the target zone
	s390/perf: obtain sie_block from the right address
	dlm: fix plock invalid read
	dlm: fix missing lkb refcount handling
	ocfs2: dlmfs: fix error handling of user_dlm_destroy_lock
	scsi: dc395x: Fix a missing check on list iterator
	scsi: ufs: qcom: Add a readl() to make sure ref_clk gets enabled
	drm/amdgpu/cs: make commands with 0 chunks illegal behaviour.
	drm/etnaviv: check for reaped mapping in etnaviv_iommu_unmap_gem
	drm/nouveau/clk: Fix an incorrect NULL check on list iterator
	drm/nouveau/kms/nv50-: atom: fix an incorrect NULL check on list iterator
	drm/bridge: analogix_dp: Grab runtime PM reference for DP-AUX
	drm/i915/dsi: fix VBT send packet port selection for ICL+
	md: fix an incorrect NULL check in does_sb_need_changing
	md: fix an incorrect NULL check in md_reload_sb
	mtd: cfi_cmdset_0002: Move and rename chip_check/chip_ready/chip_good_for_write
	mtd: cfi_cmdset_0002: Use chip_ready() for write on S29GL064N
	media: coda: Fix reported H264 profile
	media: coda: Add more H264 levels for CODA960
	ima: remove the IMA_TEMPLATE Kconfig option
	Kconfig: Add option for asm goto w/ tied outputs to workaround clang-13 bug
	RDMA/hfi1: Fix potential integer multiplication overflow errors
	csky: patch_text: Fixup last cpu should be master
	irqchip/armada-370-xp: Do not touch Performance Counter Overflow on A375, A38x, A39x
	irqchip: irq-xtensa-mx: fix initial IRQ affinity
	cfg80211: declare MODULE_FIRMWARE for regulatory.db
	mac80211: upgrade passive scan to active scan on DFS channels after beacon rx
	um: chan_user: Fix winch_tramp() return value
	um: Fix out-of-bounds read in LDT setup
	kexec_file: drop weak attribute from arch_kexec_apply_relocations[_add]
	ftrace: Clean up hash direct_functions on register failures
	iommu/msm: Fix an incorrect NULL check on list iterator
	nodemask.h: fix compilation error with GCC12
	hugetlb: fix huge_pmd_unshare address update
	xtensa/simdisk: fix proc_read_simdisk()
	rtl818x: Prevent using not initialized queues
	ASoC: rt5514: Fix event generation for "DSP Voice Wake Up" control
	carl9170: tx: fix an incorrect use of list iterator
	stm: ltdc: fix two incorrect NULL checks on list iterator
	bcache: improve multithreaded bch_btree_check()
	bcache: improve multithreaded bch_sectors_dirty_init()
	bcache: remove incremental dirty sector counting for bch_sectors_dirty_init()
	bcache: avoid journal no-space deadlock by reserving 1 journal bucket
	serial: pch: don't overwrite xmit->buf[0] by x_char
	tilcdc: tilcdc_external: fix an incorrect NULL check on list iterator
	gma500: fix an incorrect NULL check on list iterator
	arm64: dts: qcom: ipq8074: fix the sleep clock frequency
	phy: qcom-qmp: fix struct clk leak on probe errors
	ARM: dts: s5pv210: Remove spi-cs-high on panel in Aries
	ARM: pxa: maybe fix gpio lookup tables
	SMB3: EBADF/EIO errors in rename/open caused by race condition in smb2_compound_op
	docs/conf.py: Cope with removal of language=None in Sphinx 5.0.0
	dt-bindings: gpio: altera: correct interrupt-cells
	vdpasim: allow to enable a vq repeatedly
	blk-iolatency: Fix inflight count imbalances and IO hangs on offline
	coresight: core: Fix coresight device probe failure issue
	phy: qcom-qmp: fix reset-controller leak on probe errors
	net: ipa: fix page free in ipa_endpoint_trans_release()
	net: ipa: fix page free in ipa_endpoint_replenish_one()
	xfs: set inode size after creating symlink
	xfs: sync lazy sb accounting on quiesce of read-only mounts
	xfs: fix chown leaking delalloc quota blocks when fssetxattr fails
	xfs: fix incorrect root dquot corruption error when switching group/project quota types
	xfs: restore shutdown check in mapped write fault path
	xfs: force log and push AIL to clear pinned inodes when aborting mount
	xfs: consider shutdown in bmapbt cursor delete assert
	xfs: assert in xfs_btree_del_cursor should take into account error
	kseltest/cgroup: Make test_stress.sh work if run interactively
	thermal/core: fix a UAF bug in __thermal_cooling_device_register()
	thermal/core: Fix memory leak in the error path
	bfq: Avoid merging queues with different parents
	bfq: Drop pointless unlock-lock pair
	bfq: Remove pointless bfq_init_rq() calls
	bfq: Get rid of __bio_blkcg() usage
	bfq: Make sure bfqg for which we are queueing requests is online
	block: fix bio_clone_blkg_association() to associate with proper blkcg_gq
	Revert "random: use static branch for crng_ready()"
	RDMA/rxe: Generate a completion for unsupported/invalid opcode
	MIPS: IP27: Remove incorrect `cpu_has_fpu' override
	MIPS: IP30: Remove incorrect `cpu_has_fpu' override
	ext4: only allow test_dummy_encryption when supported
	md: bcache: check the return value of kzalloc() in detached_dev_do_request()
	Linux 5.10.121

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I52dd11dc43acfa0ebddd2b6e277c823b96b07327
This commit is contained in:
Greg Kroah-Hartman
2022-07-23 16:10:22 +02:00
510 changed files with 3880 additions and 2297 deletions

View File

@@ -176,7 +176,7 @@ finally:
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:

View File

@@ -72,6 +72,7 @@ examples:
dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>;
reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>;
rotation = <270>;
backlight = <&backlight>;
};
};

View File

@@ -9,8 +9,9 @@ Required properties:
- The second cell is reserved and is currently unused.
- gpio-controller : Marks the device node as a GPIO controller.
- interrupt-controller: Mark the device node as an interrupt controller
- #interrupt-cells : Should be 1. The interrupt type is fixed in the hardware.
- #interrupt-cells : Should be 2. The interrupt type is fixed in the hardware.
- The first cell is the GPIO offset number within the GPIO controller.
- The second cell is the interrupt trigger type and level flags.
- interrupts: Specify the interrupt.
- altr,interrupt-type: Specifies the interrupt trigger type the GPIO
hardware is synthesized. This field is required if the Altera GPIO controller
@@ -38,6 +39,6 @@ gpio_altr: gpio@ff200000 {
altr,interrupt-type = <IRQ_TYPE_EDGE_RISING>;
#gpio-cells = <2>;
gpio-controller;
#interrupt-cells = <1>;
#interrupt-cells = <2>;
interrupt-controller;
};

View File

@@ -45,6 +45,7 @@ properties:
maxItems: 2
interconnect-names:
minItems: 1
items:
- const: qspi-config
- const: qspi-memory

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 120
SUBLEVEL = 121
EXTRAVERSION =
NAME = Dare mighty things

View File

@@ -53,18 +53,17 @@
"GPIO18",
"NC", /* GPIO19 */
"NC", /* GPIO20 */
"GPIO21",
"CAM_GPIO0",
"GPIO22",
"GPIO23",
"GPIO24",
"GPIO25",
"NC", /* GPIO26 */
"CAM_GPIO0",
/* Binary number representing build/revision */
"CONFIG0",
"CONFIG1",
"CONFIG2",
"CONFIG3",
"GPIO27",
"GPIO28",
"GPIO29",
"GPIO30",
"GPIO31",
"NC", /* GPIO32 */
"NC", /* GPIO33 */
"NC", /* GPIO34 */

View File

@@ -74,16 +74,18 @@
"GPIO27",
"SDA0",
"SCL0",
"NC", /* GPIO30 */
"NC", /* GPIO31 */
"NC", /* GPIO32 */
"NC", /* GPIO33 */
"NC", /* GPIO34 */
"NC", /* GPIO35 */
"NC", /* GPIO36 */
"NC", /* GPIO37 */
"NC", /* GPIO38 */
"NC", /* GPIO39 */
/* Used by BT module */
"CTS0",
"RTS0",
"TXD0",
"RXD0",
/* Used by Wifi */
"SD1_CLK",
"SD1_CMD",
"SD1_DATA0",
"SD1_DATA1",
"SD1_DATA2",
"SD1_DATA3",
"CAM_GPIO1", /* GPIO40 */
"WL_ON", /* GPIO41 */
"NC", /* GPIO42 */

View File

@@ -45,7 +45,7 @@
#gpio-cells = <2>;
gpio-line-names = "BT_ON",
"WL_ON",
"STATUS_LED_R",
"PWR_LED_R",
"LAN_RUN",
"",
"CAM_GPIO0",

View File

@@ -63,8 +63,8 @@
"GPIO43",
"GPIO44",
"GPIO45",
"GPIO46",
"GPIO47",
"SMPS_SCL",
"SMPS_SDA",
/* Used by eMMC */
"SD_CLK_R",
"SD_CMD_R",

View File

@@ -129,7 +129,7 @@
samsung,i2c-max-bus-freq = <20000>;
eeprom@50 {
compatible = "samsung,s524ad0xd1";
compatible = "samsung,s524ad0xd1", "atmel,24c128";
reg = <0x50>;
};
@@ -289,7 +289,7 @@
samsung,i2c-max-bus-freq = <20000>;
eeprom@51 {
compatible = "samsung,s524ad0xd1";
compatible = "samsung,s524ad0xd1", "atmel,24c128";
reg = <0x51>;
};

View File

@@ -297,7 +297,11 @@
phy-mode = "rmii";
phy-reset-gpios = <&gpio1 18 GPIO_ACTIVE_LOW>;
phy-handle = <&phy>;
clocks = <&clks IMX6QDL_CLK_ENET>, <&clks IMX6QDL_CLK_ENET>, <&rmii_clk>;
clocks = <&clks IMX6QDL_CLK_ENET>,
<&clks IMX6QDL_CLK_ENET>,
<&rmii_clk>,
<&clks IMX6QDL_CLK_ENET_REF>;
clock-names = "ipg", "ahb", "ptp", "enet_out";
status = "okay";
mdio {

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+ OR MIT
/*
* Copyright 2014-2020 Toradex
* Copyright 2014-2022 Toradex
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2011 Linaro Ltd.
*/
@@ -132,7 +132,7 @@
clock-frequency = <100000>;
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-0 = <&pinctrl_i2c2_gpio>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
scl-gpios = <&gpio2 30 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
sda-gpios = <&gpio3 16 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
@@ -488,7 +488,7 @@
>;
};
pinctrl_i2c2_gpio: i2c2grp {
pinctrl_i2c2_gpio: i2c2gpiogrp {
fsl,pins = <
MX6QDL_PAD_EIM_EB2__GPIO2_IO30 0x4001b8b1
MX6QDL_PAD_EIM_D16__GPIO3_IO16 0x4001b8b1

View File

@@ -287,7 +287,7 @@
clocks = <&armclk>;
};
gic: gic@1000 {
gic: interrupt-controller@1000 {
compatible = "arm,arm11mp-gic";
interrupt-controller;
#interrupt-cells = <3>;

View File

@@ -564,7 +564,6 @@
reset-gpios = <&mp05 5 GPIO_ACTIVE_LOW>;
vdd3-supply = <&ldo7_reg>;
vci-supply = <&ldo17_reg>;
spi-cs-high;
spi-max-frequency = <1200000>;
pinctrl-names = "default";
@@ -637,7 +636,7 @@
};
&i2s0 {
dmas = <&pdma0 9>, <&pdma0 10>, <&pdma0 11>;
dmas = <&pdma0 10>, <&pdma0 9>, <&pdma0 11>;
status = "okay";
};

View File

@@ -240,8 +240,8 @@
reg = <0xeee30000 0x1000>;
interrupt-parent = <&vic2>;
interrupts = <16>;
dma-names = "rx", "tx", "tx-sec";
dmas = <&pdma1 9>, <&pdma1 10>, <&pdma1 11>;
dma-names = "tx", "rx", "tx-sec";
dmas = <&pdma1 10>, <&pdma1 9>, <&pdma1 11>;
clock-names = "iis",
"i2s_opclk0",
"i2s_opclk1";
@@ -260,8 +260,8 @@
reg = <0xe2100000 0x1000>;
interrupt-parent = <&vic2>;
interrupts = <17>;
dma-names = "rx", "tx";
dmas = <&pdma1 12>, <&pdma1 13>;
dma-names = "tx", "rx";
dmas = <&pdma1 13>, <&pdma1 12>;
clock-names = "iis", "i2s_opclk0";
clocks = <&clocks CLK_I2S1>, <&clocks SCLK_AUDIO1>;
pinctrl-names = "default";
@@ -275,8 +275,8 @@
reg = <0xe2a00000 0x1000>;
interrupt-parent = <&vic2>;
interrupts = <18>;
dma-names = "rx", "tx";
dmas = <&pdma1 14>, <&pdma1 15>;
dma-names = "tx", "rx";
dmas = <&pdma1 15>, <&pdma1 14>;
clock-names = "iis", "i2s_opclk0";
clocks = <&clocks CLK_I2S2>, <&clocks SCLK_AUDIO2>;
pinctrl-names = "default";

View File

@@ -141,6 +141,7 @@
compatible = "snps,dwmac-mdio";
reset-gpios = <&gpioz 2 GPIO_ACTIVE_LOW>;
reset-delay-us = <1000>;
reset-post-delay-us = <1000>;
phy0: ethernet-phy@7 {
reg = <7>;

View File

@@ -104,8 +104,10 @@
wdt: watchdog@1c20ca0 {
compatible = "allwinner,suniv-f1c100s-wdt",
"allwinner,sun4i-a10-wdt";
"allwinner,sun6i-a31-wdt";
reg = <0x01c20ca0 0x20>;
interrupts = <16>;
clocks = <&osc32k>;
};
uart0: serial@1c25000 {

View File

@@ -67,14 +67,17 @@ static void __init hi3xxx_smp_prepare_cpus(unsigned int max_cpus)
}
ctrl_base = of_iomap(np, 0);
if (!ctrl_base) {
of_node_put(np);
pr_err("failed to map address\n");
return;
}
if (of_property_read_u32(np, "smp-offset", &offset) < 0) {
of_node_put(np);
pr_err("failed to find smp-offset property\n");
return;
}
ctrl_base += offset;
of_node_put(np);
}
}
@@ -160,6 +163,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
if (WARN_ON(!node))
return -1;
ctrl_base = of_iomap(node, 0);
of_node_put(node);
/* set the secondary core boot from DDR */
remap_reg_value = readl_relaxed(ctrl_base + REG_SC_CTRL);

View File

@@ -30,6 +30,7 @@ config MACH_MT7623
config MACH_MT7629
bool "MediaTek MT7629 SoCs support"
default ARCH_MEDIATEK
select HAVE_ARM_ARCH_TIMER
config MACH_MT8127
bool "MediaTek MT8127 SoCs support"

View File

@@ -41,7 +41,7 @@ static DEFINE_SPINLOCK(clockfw_lock);
unsigned long omap1_uart_recalc(struct clk *clk)
{
unsigned int val = __raw_readl(clk->enable_reg);
return val & clk->enable_bit ? 48000000 : 12000000;
return val & 1 << clk->enable_bit ? 48000000 : 12000000;
}
unsigned long omap1_sossi_recalc(struct clk *clk)

View File

@@ -354,13 +354,13 @@ static struct platform_device cm_x300_spi_gpio = {
static struct gpiod_lookup_table cm_x300_spi_gpiod_table = {
.dev_id = "spi_gpio",
.table = {
GPIO_LOOKUP("gpio-pxa", GPIO_LCD_SCL,
GPIO_LOOKUP("pca9555.1", GPIO_LCD_SCL - GPIO_LCD_BASE,
"sck", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DIN,
GPIO_LOOKUP("pca9555.1", GPIO_LCD_DIN - GPIO_LCD_BASE,
"mosi", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO_LCD_DOUT,
GPIO_LOOKUP("pca9555.1", GPIO_LCD_DOUT - GPIO_LCD_BASE,
"miso", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO_LCD_CS,
GPIO_LOOKUP("pca9555.1", GPIO_LCD_CS - GPIO_LCD_BASE,
"cs", GPIO_ACTIVE_HIGH),
{ },
},

View File

@@ -681,7 +681,7 @@ static struct platform_device bq24022 = {
static struct gpiod_lookup_table bq24022_gpiod_table = {
.dev_id = "gpio-regulator",
.table = {
GPIO_LOOKUP("gpio-pxa", EGPIO_MAGICIAN_BQ24022_ISET2,
GPIO_LOOKUP("htc-egpio-0", EGPIO_MAGICIAN_BQ24022_ISET2 - MAGICIAN_EGPIO_BASE,
NULL, GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO30_MAGICIAN_BQ24022_nCHARGE_EN,
"enable", GPIO_ACTIVE_LOW),

View File

@@ -296,9 +296,9 @@ static struct gpiod_lookup_table tosa_mci_gpio_table = {
.table = {
GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_nSD_DETECT,
"cd", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_SD_WP,
GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_SD_WP - TOSA_SCOOP_GPIO_BASE,
"wp", GPIO_ACTIVE_LOW),
GPIO_LOOKUP("gpio-pxa", TOSA_GPIO_PWR_ON,
GPIO_LOOKUP("sharp-scoop.0", TOSA_GPIO_PWR_ON - TOSA_SCOOP_GPIO_BASE,
"power", GPIO_ACTIVE_HIGH),
{ },
},

View File

@@ -144,6 +144,7 @@ static int __init dcscb_init(void)
if (!node)
return -ENODEV;
dcscb_base = of_iomap(node, 0);
of_node_put(node);
if (!dcscb_base)
return -EADDRNOTAVAIL;
cfg = readl_relaxed(dcscb_base + DCS_CFG_R);

View File

@@ -244,6 +244,7 @@ config ARCH_STRATIX10
config ARCH_SYNQUACER
bool "Socionext SynQuacer SoC Family"
select IRQ_FASTEOI_HIERARCHY_HANDLERS
config ARCH_TEGRA
bool "NVIDIA Tegra SoC Family"

View File

@@ -13,7 +13,7 @@
clocks {
sleep_clk: sleep_clk {
compatible = "fixed-clock";
clock-frequency = <32000>;
clock-frequency = <32768>;
#clock-cells = <0>;
};

View File

@@ -316,7 +316,7 @@
#dma-cells = <1>;
qcom,ee = <0>;
qcom,controlled-remotely;
num-channels = <18>;
num-channels = <24>;
qcom,num-ees = <4>;
};
@@ -412,7 +412,7 @@
#dma-cells = <1>;
qcom,ee = <0>;
qcom,controlled-remotely;
num-channels = <18>;
num-channels = <24>;
qcom,num-ees = <4>;
};

View File

@@ -1471,6 +1471,7 @@
reg = <0xf780 0x24>;
clocks = <&sdhci>;
clock-names = "emmcclk";
drive-impedance-ohm = <50>;
#phy-cells = <0>;
status = "disabled";
};
@@ -1481,7 +1482,6 @@
clock-names = "refclk";
#phy-cells = <1>;
resets = <&cru SRST_PCIEPHY>;
drive-impedance-ohm = <50>;
reset-names = "phy";
status = "disabled";
};

View File

@@ -114,6 +114,6 @@ long compat_arm_syscall(struct pt_regs *regs, int scno)
addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4);
arm64_notify_die("Oops - bad compat syscall(2)", regs,
SIGILL, ILL_ILLTRP, addr, scno);
SIGILL, ILL_ILLTRP, addr, 0);
return 0;
}

View File

@@ -16,8 +16,8 @@
void copy_highpage(struct page *to, struct page *from)
{
struct page *kto = page_address(to);
struct page *kfrom = page_address(from);
void *kto = page_address(to);
void *kfrom = page_address(from);
copy_page(kto, kfrom);

View File

@@ -28,7 +28,7 @@ static int __kprobes patch_text_cb(void *priv)
struct csky_insn_patch *param = priv;
unsigned int addr = (unsigned int)param->addr;
if (atomic_inc_return(&param->cpu_count) == 1) {
if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
*(u16 *) addr = cpu_to_le16(param->opcode);
dcache_wb_range(addr, addr + 2);
atomic_inc(&param->cpu_count);

View File

@@ -312,7 +312,7 @@ comment "Processor Specific Options"
config M68KFPU_EMU
bool "Math emulation support"
depends on MMU
depends on M68KCLASSIC && FPU
help
At some point in the future, this will cause floating-point math
instructions to be emulated by the kernel on machines that lack a

View File

@@ -80,14 +80,14 @@
({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
#define rom_out_8(addr, b) \
({u8 __maybe_unused __w, __v = (b); u32 _addr = ((u32) (addr)); \
(void)({u8 __maybe_unused __w, __v = (b); u32 _addr = ((u32) (addr)); \
__w = ((*(__force volatile u8 *) ((_addr | 0x10000) + (__v<<1)))); })
#define rom_out_be16(addr, w) \
({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \
(void)({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \
__w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v & 0xFF)<<1)))); \
__w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v >> 8)<<1)))); })
#define rom_out_le16(addr, w) \
({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \
(void)({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \
__w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v >> 8)<<1)))); \
__w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v & 0xFF)<<1)))); })

View File

@@ -26,7 +26,6 @@
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 1
#define cpu_has_tx39_cache 0
#define cpu_has_fpu 1
#define cpu_has_nofpuex 0
#define cpu_has_32fpr 1
#define cpu_has_counter 1

View File

@@ -29,7 +29,6 @@
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 1
#define cpu_has_tx39_cache 0
#define cpu_has_fpu 1
#define cpu_has_nofpuex 0
#define cpu_has_32fpr 1
#define cpu_has_counter 1

View File

@@ -23,6 +23,7 @@ static inline cycles_t get_cycles(void)
{
return mfspr(SPR_TTCR);
}
#define get_cycles get_cycles
/* This isn't really used any more */
#define CLOCK_TICK_RATE 1000

View File

@@ -521,6 +521,15 @@ _start:
l.ori r3,r0,0x1
l.mtspr r0,r3,SPR_SR
/*
* Start the TTCR as early as possible, so that the RNG can make use of
* measurements of boot time from the earliest opportunity. Especially
* important is that the TTCR does not return zero by the time we reach
* rand_initialize().
*/
l.movhi r3,hi(SPR_TTMR_CR)
l.mtspr r0,r3,SPR_TTMR
CLEAR_GPR(r1)
CLEAR_GPR(r2)
CLEAR_GPR(r3)

View File

@@ -12,9 +12,13 @@ static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
}
#if defined(CONFIG_STI_CONSOLE) || defined(CONFIG_FB_STI)
int fb_is_primary_device(struct fb_info *info);
#else
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif
#endif /* _ASM_FB_H_ */

View File

@@ -216,6 +216,9 @@ static inline bool pfn_valid(unsigned long pfn)
#define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
#ifdef CONFIG_PPC64
#define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
/*
* gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
* with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
@@ -223,13 +226,13 @@ static inline bool pfn_valid(unsigned long pfn)
*/
#define __va(x) \
({ \
VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \
VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \
(void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
})
#define __pa(x) \
({ \
VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \
VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \
(unsigned long)(x) & 0x0fffffffffffffffUL; \
})

View File

@@ -52,7 +52,7 @@ enum vas_cop_type {
* Receive window attributes specified by the (in-kernel) owner of window.
*/
struct vas_rx_win_attr {
void *rx_fifo;
u64 rx_fifo;
int rx_fifo_size;
int wcreds_max;

View File

@@ -861,7 +861,6 @@ static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
sizeof(struct fadump_memory_range));
return 0;
}
static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
u64 base, u64 end)
{
@@ -880,7 +879,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
if ((start + size) == base)
/*
* Boot memory area needs separate PT_LOAD segment(s) as it
* is moved to a different location at the time of crash.
* So, fold only if the region is not boot memory area.
*/
if ((start + size) == base && start >= fw_dump.boot_mem_top)
is_adjacent = true;
}
if (!is_adjacent) {

View File

@@ -37,7 +37,7 @@ static int __init powersave_off(char *arg)
{
ppc_md.power_save = NULL;
cpuidle_disable = IDLE_POWERSAVE_OFF;
return 0;
return 1;
}
__setup("powersave=off", powersave_off);

View File

@@ -363,7 +363,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
mask |= CNST_THRESH_MASK;
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
}
} else if (event_is_threshold(event))
return -1;
} else {
/*
* Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,

View File

@@ -327,6 +327,6 @@ late_initcall(cpm_init);
static int __init cpm_powersave_off(char *arg)
{
cpm.powersave_off = 1;
return 0;
return 1;
}
__setup("powersave=off", cpm_powersave_off);

View File

@@ -280,6 +280,7 @@ cpm_setbrg(uint brg, uint rate)
out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
CPM_BRG_EN | CPM_BRG_DIV16);
}
EXPORT_SYMBOL(cpm_setbrg);
struct cpm_ioport16 {
__be16 dir, par, odr_sor, dat, intr;

View File

@@ -60,7 +60,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
addr = be64_to_cpu(addr);
pr_debug("Kernel metadata addr: %llx\n", addr);
opal_fdm_active = (void *)addr;
if (opal_fdm_active->registered_regions == 0)
if (be16_to_cpu(opal_fdm_active->registered_regions) == 0)
return;
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr);
@@ -95,17 +95,17 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf);
static void opal_fadump_update_config(struct fw_dump *fadump_conf,
const struct opal_fadump_mem_struct *fdm)
{
pr_debug("Boot memory regions count: %d\n", fdm->region_cnt);
pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt));
/*
* The destination address of the first boot memory region is the
* destination address of boot memory regions.
*/
fadump_conf->boot_mem_dest_addr = fdm->rgn[0].dest;
fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest);
pr_debug("Destination address of boot memory regions: %#016llx\n",
fadump_conf->boot_mem_dest_addr);
fadump_conf->fadumphdr_addr = fdm->fadumphdr_addr;
fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr);
}
/*
@@ -126,9 +126,9 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
fadump_conf->boot_memory_size = 0;
pr_debug("Boot memory regions:\n");
for (i = 0; i < fdm->region_cnt; i++) {
base = fdm->rgn[i].src;
size = fdm->rgn[i].size;
for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) {
base = be64_to_cpu(fdm->rgn[i].src);
size = be64_to_cpu(fdm->rgn[i].size);
pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size);
fadump_conf->boot_mem_addr[i] = base;
@@ -143,7 +143,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
* Start address of reserve dump area (permanent reservation) for
* re-registering FADump after dump capture.
*/
fadump_conf->reserve_dump_area_start = fdm->rgn[0].dest;
fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest);
/*
* Rarely, but it can so happen that system crashes before all
@@ -155,13 +155,14 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
* Hope the memory that could not be preserved only has pages
* that are usually filtered out while saving the vmcore.
*/
if (fdm->region_cnt > fdm->registered_regions) {
if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) {
pr_warn("Not all memory regions were saved!!!\n");
pr_warn(" Unsaved memory regions:\n");
i = fdm->registered_regions;
while (i < fdm->region_cnt) {
i = be16_to_cpu(fdm->registered_regions);
while (i < be16_to_cpu(fdm->region_cnt)) {
pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n",
i, fdm->rgn[i].src, fdm->rgn[i].size);
i, be64_to_cpu(fdm->rgn[i].src),
be64_to_cpu(fdm->rgn[i].size));
i++;
}
@@ -170,7 +171,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
}
fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size);
fadump_conf->boot_mem_regs_cnt = fdm->region_cnt;
fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt);
opal_fadump_update_config(fadump_conf, fdm);
}
@@ -178,34 +179,37 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm)
{
fdm->version = OPAL_FADUMP_VERSION;
fdm->region_cnt = 0;
fdm->registered_regions = 0;
fdm->fadumphdr_addr = 0;
fdm->region_cnt = cpu_to_be16(0);
fdm->registered_regions = cpu_to_be16(0);
fdm->fadumphdr_addr = cpu_to_be64(0);
}
static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf)
{
u64 addr = fadump_conf->reserve_dump_area_start;
u16 reg_cnt;
int i;
opal_fdm = __va(fadump_conf->kernel_metadata);
opal_fadump_init_metadata(opal_fdm);
/* Boot memory regions */
reg_cnt = be16_to_cpu(opal_fdm->region_cnt);
for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) {
opal_fdm->rgn[i].src = fadump_conf->boot_mem_addr[i];
opal_fdm->rgn[i].dest = addr;
opal_fdm->rgn[i].size = fadump_conf->boot_mem_sz[i];
opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]);
opal_fdm->rgn[i].dest = cpu_to_be64(addr);
opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]);
opal_fdm->region_cnt++;
reg_cnt++;
addr += fadump_conf->boot_mem_sz[i];
}
opal_fdm->region_cnt = cpu_to_be16(reg_cnt);
/*
* Kernel metadata is passed to f/w and retrieved in capture kerenl.
* So, use it to save fadump header address instead of calculating it.
*/
opal_fdm->fadumphdr_addr = (opal_fdm->rgn[0].dest +
opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) +
fadump_conf->boot_memory_size);
opal_fadump_update_config(fadump_conf, opal_fdm);
@@ -269,18 +273,21 @@ static u64 opal_fadump_get_bootmem_min(void)
static int opal_fadump_register(struct fw_dump *fadump_conf)
{
s64 rc = OPAL_PARAMETER;
u16 registered_regs;
int i, err = -EIO;
for (i = 0; i < opal_fdm->region_cnt; i++) {
registered_regs = be16_to_cpu(opal_fdm->registered_regions);
for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) {
rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE,
opal_fdm->rgn[i].src,
opal_fdm->rgn[i].dest,
opal_fdm->rgn[i].size);
be64_to_cpu(opal_fdm->rgn[i].src),
be64_to_cpu(opal_fdm->rgn[i].dest),
be64_to_cpu(opal_fdm->rgn[i].size));
if (rc != OPAL_SUCCESS)
break;
opal_fdm->registered_regions++;
registered_regs++;
}
opal_fdm->registered_regions = cpu_to_be16(registered_regs);
switch (rc) {
case OPAL_SUCCESS:
@@ -291,7 +298,8 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
case OPAL_RESOURCE:
/* If MAX regions limit in f/w is hit, warn and proceed. */
pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n",
(opal_fdm->region_cnt - opal_fdm->registered_regions));
(be16_to_cpu(opal_fdm->region_cnt) -
be16_to_cpu(opal_fdm->registered_regions)));
fadump_conf->dump_registered = 1;
err = 0;
break;
@@ -312,7 +320,7 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
* If some regions were registered before OPAL_MPIPL_ADD_RANGE
* OPAL call failed, unregister all regions.
*/
if ((err < 0) && (opal_fdm->registered_regions > 0))
if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0))
opal_fadump_unregister(fadump_conf);
return err;
@@ -328,7 +336,7 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf)
return -EIO;
}
opal_fdm->registered_regions = 0;
opal_fdm->registered_regions = cpu_to_be16(0);
fadump_conf->dump_registered = 0;
return 0;
}
@@ -563,19 +571,20 @@ static void opal_fadump_region_show(struct fw_dump *fadump_conf,
else
fdm_ptr = opal_fdm;
for (i = 0; i < fdm_ptr->region_cnt; i++) {
for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) {
/*
* Only regions that are registered for MPIPL
* would have dump data.
*/
if ((fadump_conf->dump_active) &&
(i < fdm_ptr->registered_regions))
dumped_bytes = fdm_ptr->rgn[i].size;
(i < be16_to_cpu(fdm_ptr->registered_regions)))
dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size);
seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
fdm_ptr->rgn[i].src, fdm_ptr->rgn[i].dest);
be64_to_cpu(fdm_ptr->rgn[i].src),
be64_to_cpu(fdm_ptr->rgn[i].dest));
seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
fdm_ptr->rgn[i].size, dumped_bytes);
be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes);
}
/* Dump is active. Show reserved area start address. */
@@ -624,6 +633,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
{
const __be32 *prop;
unsigned long dn;
__be64 be_addr;
u64 addr = 0;
int i, len;
s64 ret;
@@ -680,13 +690,13 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
if (!prop)
return;
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr);
if ((ret != OPAL_SUCCESS) || !addr) {
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr);
if ((ret != OPAL_SUCCESS) || !be_addr) {
pr_err("Failed to get Kernel metadata (%lld)\n", ret);
return;
}
addr = be64_to_cpu(addr);
addr = be64_to_cpu(be_addr);
pr_debug("Kernel metadata addr: %llx\n", addr);
opal_fdm_active = __va(addr);
@@ -697,14 +707,14 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
}
/* Kernel regions not registered with f/w for MPIPL */
if (opal_fdm_active->registered_regions == 0) {
if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) {
opal_fdm_active = NULL;
return;
}
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr);
if (addr) {
addr = be64_to_cpu(addr);
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr);
if (be_addr) {
addr = be64_to_cpu(be_addr);
pr_debug("CPU metadata addr: %llx\n", addr);
opal_cpu_metadata = __va(addr);
}

View File

@@ -31,14 +31,14 @@
* OPAL FADump kernel metadata
*
* The address of this structure will be registered with f/w for retrieving
* and processing during crash dump.
* in the capture kernel to process the crash dump.
*/
struct opal_fadump_mem_struct {
u8 version;
u8 reserved[3];
u16 region_cnt; /* number of regions */
u16 registered_regions; /* Regions registered for MPIPL */
u64 fadumphdr_addr;
__be16 region_cnt; /* number of regions */
__be16 registered_regions; /* Regions registered for MPIPL */
__be64 fadumphdr_addr;
struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS];
} __packed;
@@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt,
for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) {
reg_entry = (struct hdat_fadump_reg_entry *)bufp;
val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) :
reg_entry->reg_val);
(u64)(reg_entry->reg_val));
opal_fadump_set_regval_regnum(regs,
be32_to_cpu(reg_entry->reg_type),
be32_to_cpu(reg_entry->reg_num),

View File

@@ -55,6 +55,7 @@ static int __init uv_init(void)
return -ENODEV;
uv_memcons = memcons_init(node, "memcons");
of_node_put(node);
if (!uv_memcons)
return -ENOENT;

View File

@@ -352,7 +352,7 @@ int vas_setup_fault_window(struct vas_instance *vinst)
vas_init_rx_win_attr(&attr, VAS_COP_TYPE_FAULT);
attr.rx_fifo_size = vinst->fault_fifo_size;
attr.rx_fifo = vinst->fault_fifo;
attr.rx_fifo = __pa(vinst->fault_fifo);
/*
* Max creds is based on number of CRBs can fit in the FIFO.

View File

@@ -403,7 +403,7 @@ static void init_winctx_regs(struct vas_window *window,
*
* See also: Design note in function header.
*/
val = __pa(winctx->rx_fifo);
val = winctx->rx_fifo;
val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0);
write_hvwc_reg(window, VREG(LFIFO_BAR), val);
@@ -737,7 +737,7 @@ static void init_winctx_for_rxwin(struct vas_window *rxwin,
*/
winctx->fifo_disable = true;
winctx->intr_disable = true;
winctx->rx_fifo = NULL;
winctx->rx_fifo = 0;
}
winctx->lnotify_lpid = rxattr->lnotify_lpid;

View File

@@ -383,7 +383,7 @@ struct vas_window {
* is a container for the register fields in the window context.
*/
struct vas_winctx {
void *rx_fifo;
u64 rx_fifo;
int rx_fifo_size;
int wcreds_max;
int rsvd_txbuf_count;

View File

@@ -403,9 +403,10 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
}
/* Initialize the DART HW */
if (dart_init(dn) != 0)
if (dart_init(dn) != 0) {
of_node_put(dn);
return;
}
/*
* U4 supports a DART bypass, we use it for 64-bit capable devices to
* improve performance. However, that only works for devices connected
@@ -418,6 +419,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
/* Setup pci_dma ops */
set_pci_dma_ops(&dma_iommu_ops);
of_node_put(dn);
}
#ifdef CONFIG_PM

View File

@@ -505,8 +505,10 @@ int fsl_rio_setup(struct platform_device *dev)
if (rc) {
dev_err(&dev->dev, "Can't get %pOF property 'reg'\n",
rmu_node);
of_node_put(rmu_node);
goto err_rmu;
}
of_node_put(rmu_node);
rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs));
if (!rmu_regs_win) {
dev_err(&dev->dev, "Unable to map rmu register window\n");

View File

@@ -195,6 +195,7 @@ int icp_opal_init(void)
printk("XICS: Using OPAL ICP fallbacks\n");
of_node_put(np);
return 0;
}

View File

@@ -4,7 +4,7 @@
static inline bool arch_irq_work_has_interrupt(void)
{
return true;
return IS_ENABLED(CONFIG_SMP);
}
extern void arch_irq_work_raise(void);
#endif /* _ASM_RISCV_IRQ_WORK_H */

View File

@@ -261,6 +261,7 @@ clear_bss_done:
REG_S a0, (a2)
/* Initialize page tables and relocate to virtual addresses */
la tp, init_task
la sp, init_thread_union + THREAD_SIZE
mv a0, s1
call setup_vm

View File

@@ -9,6 +9,8 @@
#ifndef _S390_KEXEC_H
#define _S390_KEXEC_H
#include <linux/module.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/setup.h>
@@ -83,4 +85,12 @@ struct kimage_arch {
extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops;
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info;
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
#endif
#endif /*_S390_KEXEC_H */

View File

@@ -46,9 +46,16 @@ static inline bool test_preempt_need_resched(void)
static inline void __preempt_count_add(int val)
{
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
/*
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
* enabled, gcc 12 fails to handle __builtin_constant_p().
*/
if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
__atomic_add_const(val, &S390_lowcore.preempt_count);
else
return;
}
}
__atomic_add(val, &S390_lowcore.preempt_count);
}

View File

@@ -51,7 +51,7 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
if (!stack)
return NULL;
return (struct kvm_s390_sie_block *) stack->empty1[0];
return (struct kvm_s390_sie_block *)stack->empty1[1];
}
static bool is_in_guest(struct pt_regs *regs)

View File

@@ -220,7 +220,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
unsigned long *stack_out)
{
struct winch_data data;
int fds[2], n, err;
int fds[2], n, err, pid;
char c;
err = os_pipe(fds, 1, 1);
@@ -238,8 +238,9 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
* problem with /dev/net/tun, which if held open by this
* thread, prevents the TUN/TAP device from being reused.
*/
err = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
if (err < 0) {
pid = run_helper_thread(winch_thread, &data, CLONE_FILES, stack_out);
if (pid < 0) {
err = pid;
printk(UM_KERN_ERR "fork of winch_thread failed - errno = %d\n",
-err);
goto out_close;
@@ -263,7 +264,7 @@ static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
goto out_close;
}
return err;
return pid;
out_close:
close(fds[1]);

View File

@@ -63,6 +63,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTORE_SIGMASK 7
#define TIF_NOTIFY_RESUME 8
#define TIF_SECCOMP 9 /* secure computing */
#define TIF_SINGLESTEP 10 /* single stepping userspace */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
@@ -70,5 +71,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#endif

View File

@@ -44,7 +44,7 @@ void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
{
PT_REGS_IP(regs) = eip;
PT_REGS_SP(regs) = esp;
current->ptrace &= ~PT_DTRACE;
clear_thread_flag(TIF_SINGLESTEP);
#ifdef SUBARCH_EXECVE1
SUBARCH_EXECVE1(regs->regs);
#endif

View File

@@ -341,7 +341,7 @@ int singlestepping(void * t)
{
struct task_struct *task = t ? t : current;
if (!(task->ptrace & PT_DTRACE))
if (!test_thread_flag(TIF_SINGLESTEP))
return 0;
if (task->thread.singlestep_syscall)

View File

@@ -12,7 +12,7 @@
void user_enable_single_step(struct task_struct *child)
{
child->ptrace |= PT_DTRACE;
set_tsk_thread_flag(child, TIF_SINGLESTEP);
child->thread.singlestep_syscall = 0;
#ifdef SUBARCH_SET_SINGLESTEPPING
@@ -22,7 +22,7 @@ void user_enable_single_step(struct task_struct *child)
void user_disable_single_step(struct task_struct *child)
{
child->ptrace &= ~PT_DTRACE;
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
child->thread.singlestep_syscall = 0;
#ifdef SUBARCH_SET_SINGLESTEPPING
@@ -121,7 +121,7 @@ static void send_sigtrap(struct uml_pt_regs *regs, int error_code)
}
/*
* XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and
* XXX Check TIF_SINGLESTEP for singlestepping check and
* PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check
*/
int syscall_trace_enter(struct pt_regs *regs)
@@ -145,7 +145,7 @@ void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(regs);
/* Fake a debug trap */
if (ptraced & PT_DTRACE)
if (test_thread_flag(TIF_SINGLESTEP))
send_sigtrap(&regs->regs, 0);
if (!test_thread_flag(TIF_SYSCALL_TRACE))

View File

@@ -53,7 +53,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
unsigned long sp;
int err;
if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
if (test_thread_flag(TIF_SINGLESTEP) && (current->ptrace & PT_PTRACED))
singlestep = 1;
/* Did we come from a system call? */
@@ -128,7 +128,7 @@ void do_signal(struct pt_regs *regs)
* on the host. The tracing thread will check this flag and
* PTRACE_SYSCALL if necessary.
*/
if (current->ptrace & PT_DTRACE)
if (test_thread_flag(TIF_SINGLESTEP))
current->thread.singlestep_syscall =
is_syscall(PT_REGS_IP(&current->thread.regs));

View File

@@ -1328,7 +1328,7 @@ config MICROCODE
config MICROCODE_INTEL
bool "Intel microcode loading support"
depends on MICROCODE
depends on CPU_SUP_INTEL && MICROCODE
default MICROCODE
help
This options enables microcode patch loading support for Intel
@@ -1340,7 +1340,7 @@ config MICROCODE_INTEL
config MICROCODE_AMD
bool "AMD microcode loading support"
depends on MICROCODE
depends on CPU_SUP_AMD && MICROCODE
help
If you select this option, microcode patch loading support for AMD
processors will be enabled.

View File

@@ -500,6 +500,7 @@ SYM_CODE_START(\asmsym)
call vc_switch_off_ist
movq %rax, %rsp /* Switch to new stack */
ENCODE_FRAME_POINTER
UNWIND_HINT_REGS
/* Update pt_regs */

View File

@@ -440,7 +440,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
static __init int vdso_setup(char *s)
{
vdso64_enabled = simple_strtoul(s, NULL, 0);
return 0;
return 1;
}
__setup("vdso=", vdso_setup);

View File

@@ -312,6 +312,16 @@ static int perf_ibs_init(struct perf_event *event)
hwc->config_base = perf_ibs->msr;
hwc->config = config;
/*
* rip recorded by IbsOpRip will not be consistent with rsp and rbp
* recorded as part of interrupt regs. Thus we need to use rip from
* interrupt regs while unwinding call stack. Setting _EARLY flag
* makes sure we unwind call-stack before perf sample rip is set to
* IbsOpRip.
*/
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
return 0;
}
@@ -692,6 +702,14 @@ fail:
data.raw = &raw;
}
/*
* rip recorded by IbsOpRip will not be consistent with rsp and rbp
* recorded as part of interrupt regs. Thus we need to use rip from
* interrupt regs while unwinding call stack.
*/
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
data.callchain = perf_callchain(event, iregs);
throttle = perf_event_overflow(event, &data, &regs);
out:
if (throttle) {
@@ -764,9 +782,10 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
return ret;
}
static __init void perf_event_ibs_init(void)
static __init int perf_event_ibs_init(void)
{
struct attribute **attr = ibs_op_format_attrs;
int ret;
/*
* Some chips fail to reset the fetch count when it is written; instead
@@ -778,7 +797,9 @@ static __init void perf_event_ibs_init(void)
if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
ret = perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
if (ret)
return ret;
if (ibs_caps & IBS_CAPS_OPCNT) {
perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
@@ -791,15 +812,35 @@ static __init void perf_event_ibs_init(void)
perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
}
perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
ret = perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
if (ret)
goto err_op;
ret = register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
if (ret)
goto err_nmi;
register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
return 0;
err_nmi:
perf_pmu_unregister(&perf_ibs_op.pmu);
free_percpu(perf_ibs_op.pcpu);
perf_ibs_op.pcpu = NULL;
err_op:
perf_pmu_unregister(&perf_ibs_fetch.pmu);
free_percpu(perf_ibs_fetch.pcpu);
perf_ibs_fetch.pcpu = NULL;
return ret;
}
#else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
static __init void perf_event_ibs_init(void) { }
static __init int perf_event_ibs_init(void)
{
return 0;
}
#endif
@@ -1069,9 +1110,7 @@ static __init int amd_ibs_init(void)
x86_pmu_amd_ibs_starting_cpu,
x86_pmu_amd_ibs_dying_cpu);
perf_event_ibs_init();
return 0;
return perf_event_ibs_init();
}
/* Since we need the pci subsystem to init ibs we can't do this earlier: */

View File

@@ -254,7 +254,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
INTEL_EVENT_CONSTRAINT(0x32, 0xf), /* SW_PREFETCH_ACCESS.* */
INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x56, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */

View File

@@ -13,7 +13,19 @@
/* Asm macros */
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
/*
* ACPI_FLUSH_CPU_CACHE() flushes caches on entering sleep states.
* It is required to prevent data loss.
*
* While running inside virtual machine, the kernel can bypass cache flushing.
* Changing sleep state in a virtual machine doesn't affect the host system
* sleep state and cannot lead to data loss.
*/
#define ACPI_FLUSH_CPU_CACHE() \
do { \
if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) \
wbinvd(); \
} while (0)
int __acpi_acquire_global_lock(unsigned int *lock);
int __acpi_release_global_lock(unsigned int *lock);

View File

@@ -191,6 +191,14 @@ extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
#define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
#ifdef CONFIG_KEXEC_FILE
struct purgatory_info;
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
#endif
#endif
typedef void crash_vmclear_fn(void);

View File

@@ -21,7 +21,6 @@ struct saved_context {
#endif
unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable;
bool misc_enable_saved;
struct saved_msrs saved_msrs;
struct desc_ptr gdt_desc;
struct desc_ptr idt;
@@ -30,6 +29,7 @@ struct saved_context {
unsigned long tr;
unsigned long safety;
unsigned long return_address;
bool misc_enable_saved;
} __attribute__((packed));
/* routines for saving/restoring kernel state */

View File

@@ -14,9 +14,13 @@
* Image of the saved processor state, used by the low level ACPI suspend to
* RAM code and by the low level hibernation code.
*
* If you modify it, fix arch/x86/kernel/acpi/wakeup_64.S and make sure that
* __save/__restore_processor_state(), defined in arch/x86/kernel/suspend_64.c,
* still work as required.
* If you modify it, check how it is used in arch/x86/kernel/acpi/wakeup_64.S
* and make sure that __save/__restore_processor_state(), defined in
* arch/x86/power/cpu.c, still work as required.
*
* Because the structure is packed, make sure to avoid unaligned members. For
* optimisation purposes but also because tools like kmemleak only search for
* pointers that are aligned.
*/
struct saved_context {
struct pt_regs regs;
@@ -36,7 +40,6 @@ struct saved_context {
unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable;
bool misc_enable_saved;
struct saved_msrs saved_msrs;
unsigned long efer;
u16 gdt_pad; /* Unused */
@@ -48,6 +51,7 @@ struct saved_context {
unsigned long tr;
unsigned long safety;
unsigned long return_address;
bool misc_enable_saved;
} __attribute__((packed));
#define loaddebug(thread,register) \

View File

@@ -168,7 +168,7 @@ static __init int setup_apicpmtimer(char *s)
{
apic_calibrate_pmtmr = 1;
notsc_setup(NULL);
return 0;
return 1;
}
__setup("apicpmtimer", setup_apicpmtimer);
#endif

View File

@@ -199,7 +199,13 @@ static void __init uv_tsc_check_sync(void)
int mmr_shift;
char *state;
/* Different returns from different UV BIOS versions */
/* UV5 guarantees synced TSCs; do not zero TSC_ADJUST */
if (!is_uv(UV2|UV3|UV4)) {
mark_tsc_async_resets("UV5+");
return;
}
/* UV2,3,4, UV BIOS TSC sync state available */
mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR);
mmr_shift =
is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT;

View File

@@ -88,7 +88,7 @@ static bool ring3mwait_disabled __read_mostly;
static int __init ring3mwait_disable(char *__unused)
{
ring3mwait_disabled = true;
return 0;
return 1;
}
__setup("ring3mwait=disable", ring3mwait_disable);

View File

@@ -1457,10 +1457,23 @@ out_free:
kfree(bank);
}
static void __threshold_remove_device(struct threshold_bank **bp)
{
unsigned int bank, numbanks = this_cpu_read(mce_num_banks);
for (bank = 0; bank < numbanks; bank++) {
if (!bp[bank])
continue;
threshold_remove_bank(bp[bank]);
bp[bank] = NULL;
}
kfree(bp);
}
int mce_threshold_remove_device(unsigned int cpu)
{
struct threshold_bank **bp = this_cpu_read(threshold_banks);
unsigned int bank, numbanks = this_cpu_read(mce_num_banks);
if (!bp)
return 0;
@@ -1471,13 +1484,7 @@ int mce_threshold_remove_device(unsigned int cpu)
*/
this_cpu_write(threshold_banks, NULL);
for (bank = 0; bank < numbanks; bank++) {
if (bp[bank]) {
threshold_remove_bank(bp[bank]);
bp[bank] = NULL;
}
}
kfree(bp);
__threshold_remove_device(bp);
return 0;
}
@@ -1514,15 +1521,14 @@ int mce_threshold_create_device(unsigned int cpu)
if (!(this_cpu_read(bank_map) & (1 << bank)))
continue;
err = threshold_create_bank(bp, cpu, bank);
if (err)
goto out_err;
if (err) {
__threshold_remove_device(bp);
return err;
}
}
this_cpu_write(threshold_banks, bp);
if (thresholding_irq_en)
mce_threshold_vector = amd_threshold_interrupt;
return 0;
out_err:
mce_threshold_remove_device(cpu);
return err;
}

View File

@@ -175,8 +175,7 @@ void set_task_blockstep(struct task_struct *task, bool on)
*
* NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if
* task is current or it can't be running, otherwise we can race
* with __switch_to_xtra(). We rely on ptrace_freeze_traced() but
* PTRACE_KILL is not safe.
* with __switch_to_xtra(). We rely on ptrace_freeze_traced().
*/
local_irq_disable();
debugctl = get_debugctlmsr();

View File

@@ -68,9 +68,6 @@ static int __init control_va_addr_alignment(char *str)
if (*str == 0)
return 1;
if (*str == '=')
str++;
if (!strcmp(str, "32"))
va_align.flags = ALIGN_VA_32;
else if (!strcmp(str, "64"))
@@ -80,11 +77,11 @@ static int __init control_va_addr_alignment(char *str)
else if (!strcmp(str, "on"))
va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
else
return 0;
pr_warn("invalid option value: 'align_va_addr=%s'\n", str);
return 1;
}
__setup("align_va_addr", control_va_addr_alignment);
__setup("align_va_addr=", control_va_addr_alignment);
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,

View File

@@ -3668,12 +3668,34 @@ vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
}
static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
struct vmcs12 *vmcs12,
u32 vm_exit_reason, u32 exit_intr_info)
{
u32 idt_vectoring;
unsigned int nr;
if (vcpu->arch.exception.injected) {
/*
* Per the SDM, VM-Exits due to double and triple faults are never
* considered to occur during event delivery, even if the double/triple
* fault is the result of an escalating vectoring issue.
*
* Note, the SDM qualifies the double fault behavior with "The original
* event results in a double-fault exception". It's unclear why the
* qualification exists since exits due to double fault can occur only
* while vectoring a different exception (injected events are never
* subject to interception), i.e. there's _always_ an original event.
*
* The SDM also uses NMI as a confusing example for the "original event
* causes the VM exit directly" clause. NMI isn't special in any way,
* the same rule applies to all events that cause an exit directly.
* NMI is an odd choice for the example because NMIs can only occur on
* instruction boundaries, i.e. they _can't_ occur during vectoring.
*/
if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT ||
((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI &&
is_double_fault(exit_intr_info))) {
vmcs12->idt_vectoring_info_field = 0;
} else if (vcpu->arch.exception.injected) {
nr = vcpu->arch.exception.nr;
idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
@@ -3706,6 +3728,8 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
idt_vectoring |= INTR_TYPE_EXT_INTR;
vmcs12->idt_vectoring_info_field = idt_vectoring;
} else {
vmcs12->idt_vectoring_info_field = 0;
}
}
@@ -4143,12 +4167,12 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
/* update exit information fields: */
vmcs12->vm_exit_reason = vm_exit_reason;
vmcs12->exit_qualification = exit_qualification;
vmcs12->vm_exit_intr_info = exit_intr_info;
vmcs12->idt_vectoring_info_field = 0;
vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
/*
* On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched
* and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other
* exit info fields are unmodified.
*/
if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
vmcs12->launch_state = 1;
@@ -4160,7 +4184,12 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* Transfer the event that L0 or L1 may wanted to inject into
* L2 to IDT_VECTORING_INFO_FIELD.
*/
vmcs12_save_pending_event(vcpu, vmcs12);
vmcs12_save_pending_event(vcpu, vmcs12,
vm_exit_reason, exit_intr_info);
vmcs12->vm_exit_intr_info = exit_intr_info;
vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
/*
* According to spec, there's no need to store the guest's

View File

@@ -102,6 +102,11 @@ static inline bool is_breakpoint(u32 intr_info)
return is_exception_n(intr_info, BP_VECTOR);
}
static inline bool is_double_fault(u32 intr_info)
{
return is_exception_n(intr_info, DF_VECTOR);
}
static inline bool is_page_fault(u32 intr_info)
{
return is_exception_n(intr_info, PF_VECTOR);

View File

@@ -54,8 +54,8 @@ static void delay_loop(u64 __loops)
" jnz 2b \n"
"3: dec %0 \n"
: /* we don't need output */
:"a" (loops)
: "+a" (loops)
:
);
}

View File

@@ -101,7 +101,7 @@ int pat_debug_enable;
static int __init pat_debug_setup(char *str)
{
pat_debug_enable = 1;
return 0;
return 1;
}
__setup("debugpat", pat_debug_setup);

View File

@@ -23,9 +23,11 @@ static long write_ldt_entry(struct mm_id *mm_idp, int func,
{
long res;
void *stub_addr;
BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
res = syscall_stub_data(mm_idp, (unsigned long *)desc,
(sizeof(*desc) + sizeof(long) - 1) &
~(sizeof(long) - 1),
sizeof(*desc) / sizeof(long),
addr, &stub_addr);
if (!res) {
unsigned long args[] = { func,

View File

@@ -226,12 +226,12 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
void user_enable_single_step(struct task_struct *child)
{
child->ptrace |= PT_SINGLESTEP;
set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
{
child->ptrace &= ~PT_SINGLESTEP;
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/*

View File

@@ -465,7 +465,7 @@ static void do_signal(struct pt_regs *regs)
/* Set up the stack frame */
ret = setup_frame(&ksig, sigmask_to_save(), regs);
signal_setup_done(ret, &ksig, 0);
if (current->ptrace & PT_SINGLESTEP)
if (test_thread_flag(TIF_SINGLESTEP))
task_pt_regs(current)->icountlevel = 1;
return;
@@ -491,7 +491,7 @@ static void do_signal(struct pt_regs *regs)
/* If there's no signal to deliver, we just restore the saved mask. */
restore_saved_sigmask();
if (current->ptrace & PT_SINGLESTEP)
if (test_thread_flag(TIF_SINGLESTEP))
task_pt_regs(current)->icountlevel = 1;
return;
}

View File

@@ -213,12 +213,18 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf,
struct simdisk *dev = PDE_DATA(file_inode(file));
const char *s = dev->filename;
if (s) {
ssize_t n = simple_read_from_buffer(buf, size, ppos,
s, strlen(s));
if (n < 0)
return n;
buf += n;
size -= n;
ssize_t len = strlen(s);
char *temp = kmalloc(len + 2, GFP_KERNEL);
if (!temp)
return -ENOMEM;
len = scnprintf(temp, len + 2, "%s\n", s);
len = simple_read_from_buffer(buf, size, ppos,
temp, len);
kfree(temp);
return len;
}
return simple_read_from_buffer(buf, size, ppos, "\n", 1);
}

View File

@@ -553,6 +553,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
*/
bfqg->bfqd = bfqd;
bfqg->active_entities = 0;
bfqg->online = true;
bfqg->rq_pos_tree = RB_ROOT;
}
@@ -581,28 +582,11 @@ static void bfq_group_set_parent(struct bfq_group *bfqg,
entity->sched_data = &parent->sched_data;
}
static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
struct blkcg *blkcg)
static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
{
struct blkcg_gq *blkg;
blkg = blkg_lookup(blkcg, bfqd->queue);
if (likely(blkg))
return blkg_to_bfqg(blkg);
return NULL;
}
struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
struct blkcg *blkcg)
{
struct bfq_group *bfqg, *parent;
struct bfq_group *parent;
struct bfq_entity *entity;
bfqg = bfq_lookup_bfqg(bfqd, blkcg);
if (unlikely(!bfqg))
return NULL;
/*
* Update chain of bfq_groups as we might be handling a leaf group
* which, along with some of its relatives, has not been hooked yet
@@ -619,9 +603,25 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
bfq_group_set_parent(curr_bfqg, parent);
}
}
}
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
{
struct blkcg_gq *blkg = bio->bi_blkg;
struct bfq_group *bfqg;
while (blkg) {
bfqg = blkg_to_bfqg(blkg);
if (bfqg->online) {
bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
return bfqg;
}
blkg = blkg->parent;
}
bio_associate_blkg_from_css(bio,
&bfqg_to_blkg(bfqd->root_group)->blkcg->css);
return bfqd->root_group;
}
/**
* bfq_bfqq_move - migrate @bfqq to @bfqg.
@@ -696,25 +696,15 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* Move bic to blkcg, assuming that bfqd->lock is held; which makes
* sure that the reference to cgroup is valid across the call (see
* comments in bfq_bic_update_cgroup on this issue)
*
* NOTE: an alternative approach might have been to store the current
* cgroup in bfqq and getting a reference to it, reducing the lookup
* time here, at the price of slightly more complex code.
*/
static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
struct bfq_io_cq *bic,
struct blkcg *blkcg)
struct bfq_group *bfqg)
{
struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
struct bfq_group *bfqg;
struct bfq_entity *entity;
bfqg = bfq_find_set_group(bfqd, blkcg);
if (unlikely(!bfqg))
bfqg = bfqd->root_group;
if (async_bfqq) {
entity = &async_bfqq->entity;
@@ -725,9 +715,39 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
}
if (sync_bfqq) {
entity = &sync_bfqq->entity;
if (entity->sched_data != &bfqg->sched_data)
if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
/* We are the only user of this bfqq, just move it */
if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
} else {
struct bfq_queue *bfqq;
/*
* The queue was merged to a different queue. Check
* that the merge chain still belongs to the same
* cgroup.
*/
for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
if (bfqq->entity.sched_data !=
&bfqg->sched_data)
break;
if (bfqq) {
/*
* Some queue changed cgroup so the merge is
* not valid anymore. We cannot easily just
* cancel the merge (by clearing new_bfqq) as
* there may be other processes using this
* queue and holding refs to all queues below
* sync_bfqq->new_bfqq. Similarly if the merge
* already happened, we need to detach from
* bfqq now so that we cannot merge bio to a
* request from the old cgroup.
*/
bfq_put_cooperator(sync_bfqq);
bfq_release_process_ref(bfqd, sync_bfqq);
bic_set_bfqq(bic, NULL, 1);
}
}
}
return bfqg;
@@ -736,20 +756,24 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
{
struct bfq_data *bfqd = bic_to_bfqd(bic);
struct bfq_group *bfqg = NULL;
struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
uint64_t serial_nr;
rcu_read_lock();
serial_nr = __bio_blkcg(bio)->css.serial_nr;
serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
/*
* Check whether blkcg has changed. The condition may trigger
* spuriously on a newly created cic but there's no harm.
*/
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
goto out;
return;
bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
/*
* New cgroup for this process. Make sure it is linked to bfq internal
* cgroup hierarchy.
*/
bfq_link_bfqg(bfqd, bfqg);
__bfq_bic_change_cgroup(bfqd, bic, bfqg);
/*
* Update blkg_path for bfq_log_* functions. We cache this
* path, and update it here, for the following
@@ -802,8 +826,6 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
*/
blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
bic->blkcg_serial_nr = serial_nr;
out:
rcu_read_unlock();
}
/**
@@ -931,6 +953,7 @@ static void bfq_pd_offline(struct blkg_policy_data *pd)
put_async_queues:
bfq_put_async_queues(bfqd, bfqg);
bfqg->online = false;
spin_unlock_irqrestore(&bfqd->lock, flags);
/*
@@ -1420,7 +1443,7 @@ void bfq_end_wr_async(struct bfq_data *bfqd)
bfq_end_wr_async_queues(bfqd, bfqd->root_group);
}
struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
{
return bfqd->root_group;
}

View File

@@ -2227,10 +2227,17 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
spin_lock_irq(&bfqd->lock);
if (bic)
if (bic) {
/*
* Make sure cgroup info is uptodate for current process before
* considering the merge.
*/
bfq_bic_update_cgroup(bic, bio);
bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
else
} else {
bfqd->bio_bfqq = NULL;
}
bfqd->bio_bic = bic;
ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
@@ -2260,8 +2267,6 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
return ELEVATOR_NO_MERGE;
}
static struct bfq_queue *bfq_init_rq(struct request *rq);
static void bfq_request_merged(struct request_queue *q, struct request *req,
enum elv_merge type)
{
@@ -2270,7 +2275,7 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
blk_rq_pos(req) <
blk_rq_pos(container_of(rb_prev(&req->rb_node),
struct request, rb_node))) {
struct bfq_queue *bfqq = bfq_init_rq(req);
struct bfq_queue *bfqq = RQ_BFQQ(req);
struct bfq_data *bfqd;
struct request *prev, *next_rq;
@@ -2322,8 +2327,8 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
static void bfq_requests_merged(struct request_queue *q, struct request *rq,
struct request *next)
{
struct bfq_queue *bfqq = bfq_init_rq(rq),
*next_bfqq = bfq_init_rq(next);
struct bfq_queue *bfqq = RQ_BFQQ(rq),
*next_bfqq = RQ_BFQQ(next);
if (!bfqq)
return;
@@ -2502,6 +2507,14 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
if (process_refs == 0 || new_process_refs == 0)
return NULL;
/*
* Make sure merged queues belong to the same parent. Parents could
* have changed since the time we decided the two queues are suitable
* for merging.
*/
if (new_bfqq->entity.parent != bfqq->entity.parent)
return NULL;
bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
new_bfqq->pid);
@@ -4914,7 +4927,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
bfqg_and_blkg_put(bfqg);
}
static void bfq_put_cooperator(struct bfq_queue *bfqq)
void bfq_put_cooperator(struct bfq_queue *bfqq)
{
struct bfq_queue *__bfqq, *next;
@@ -5146,14 +5159,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
struct bfq_queue *bfqq;
struct bfq_group *bfqg;
rcu_read_lock();
bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
if (!bfqg) {
bfqq = &bfqd->oom_bfqq;
goto out;
}
bfqg = bfq_bio_bfqg(bfqd, bio);
if (!is_sync) {
async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
ioprio);
@@ -5197,7 +5203,6 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
out:
bfqq->ref++; /* get a process reference to this queue */
bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref);
rcu_read_unlock();
return bfqq;
}
@@ -5500,6 +5505,8 @@ static inline void bfq_update_insert_stats(struct request_queue *q,
unsigned int cmd_flags) {}
#endif /* CONFIG_BFQ_CGROUP_DEBUG */
static struct bfq_queue *bfq_init_rq(struct request *rq);
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head)
{
@@ -5514,17 +5521,14 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bfqg_stats_update_legacy_io(q, rq);
#endif
spin_lock_irq(&bfqd->lock);
bfqq = bfq_init_rq(rq);
if (blk_mq_sched_try_insert_merge(q, rq)) {
spin_unlock_irq(&bfqd->lock);
return;
}
spin_unlock_irq(&bfqd->lock);
blk_mq_sched_request_inserted(rq);
spin_lock_irq(&bfqd->lock);
bfqq = bfq_init_rq(rq);
if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
if (at_head)
list_add(&rq->queuelist, &bfqd->dispatch);

View File

@@ -901,6 +901,8 @@ struct bfq_group {
/* reference counter (see comments in bfq_bic_update_cgroup) */
int ref;
/* Is bfq_group still online? */
bool online;
struct bfq_entity entity;
struct bfq_sched_data sched_data;
@@ -954,6 +956,7 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bool compensate, enum bfqq_expiration reason);
void bfq_put_queue(struct bfq_queue *bfqq);
void bfq_put_cooperator(struct bfq_queue *bfqq);
void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq);
void bfq_schedule_dispatch(struct bfq_data *bfqd);
@@ -981,8 +984,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg);
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
void bfq_end_wr_async(struct bfq_data *bfqd);
struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
struct blkcg *blkcg);
struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio);
struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);

View File

@@ -1898,12 +1898,8 @@ EXPORT_SYMBOL_GPL(bio_associate_blkg);
*/
void bio_clone_blkg_association(struct bio *dst, struct bio *src)
{
if (src->bi_blkg) {
if (dst->bi_blkg)
blkg_put(dst->bi_blkg);
blkg_get(src->bi_blkg);
dst->bi_blkg = src->bi_blkg;
}
if (src->bi_blkg)
bio_associate_blkg_from_css(dst, &bio_blkcg(src)->css);
}
EXPORT_SYMBOL_GPL(bio_clone_blkg_association);

View File

@@ -86,7 +86,17 @@ struct iolatency_grp;
struct blk_iolatency {
struct rq_qos rqos;
struct timer_list timer;
atomic_t enabled;
/*
* ->enabled is the master enable switch gating the throttling logic and
* inflight tracking. The number of cgroups which have iolat enabled is
* tracked in ->enable_cnt, and ->enable is flipped on/off accordingly
* from ->enable_work with the request_queue frozen. For details, See
* blkiolatency_enable_work_fn().
*/
bool enabled;
atomic_t enable_cnt;
struct work_struct enable_work;
};
static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
@@ -94,11 +104,6 @@ static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
return container_of(rqos, struct blk_iolatency, rqos);
}
static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
{
return atomic_read(&blkiolat->enabled) > 0;
}
struct child_latency_info {
spinlock_t lock;
@@ -463,7 +468,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
struct blkcg_gq *blkg = bio->bi_blkg;
bool issue_as_root = bio_issue_as_root_blkg(bio);
if (!blk_iolatency_enabled(blkiolat))
if (!blkiolat->enabled)
return;
while (blkg && blkg->parent) {
@@ -593,7 +598,6 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
u64 window_start;
u64 now;
bool issue_as_root = bio_issue_as_root_blkg(bio);
bool enabled = false;
int inflight = 0;
blkg = bio->bi_blkg;
@@ -604,8 +608,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
if (!iolat)
return;
enabled = blk_iolatency_enabled(iolat->blkiolat);
if (!enabled)
if (!iolat->blkiolat->enabled)
return;
now = ktime_to_ns(ktime_get());
@@ -644,6 +647,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
del_timer_sync(&blkiolat->timer);
flush_work(&blkiolat->enable_work);
blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
kfree(blkiolat);
}
@@ -715,6 +719,44 @@ next:
rcu_read_unlock();
}
/**
* blkiolatency_enable_work_fn - Enable or disable iolatency on the device
* @work: enable_work of the blk_iolatency of interest
*
* iolatency needs to keep track of the number of in-flight IOs per cgroup. This
* is relatively expensive as it involves walking up the hierarchy twice for
* every IO. Thus, if iolatency is not enabled in any cgroup for the device, we
* want to disable the in-flight tracking.
*
* We have to make sure that the counting is balanced - we don't want to leak
* the in-flight counts by disabling accounting in the completion path while IOs
* are in flight. This is achieved by ensuring that no IO is in flight by
* freezing the queue while flipping ->enabled. As this requires a sleepable
* context, ->enabled flipping is punted to this work function.
*/
static void blkiolatency_enable_work_fn(struct work_struct *work)
{
struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency,
enable_work);
bool enabled;
/*
* There can only be one instance of this function running for @blkiolat
* and it's guaranteed to be executed at least once after the latest
* ->enabled_cnt modification. Acting on the latest ->enable_cnt is
* sufficient.
*
* Also, we know @blkiolat is safe to access as ->enable_work is flushed
* in blkcg_iolatency_exit().
*/
enabled = atomic_read(&blkiolat->enable_cnt);
if (enabled != blkiolat->enabled) {
blk_mq_freeze_queue(blkiolat->rqos.q);
blkiolat->enabled = enabled;
blk_mq_unfreeze_queue(blkiolat->rqos.q);
}
}
int blk_iolatency_init(struct request_queue *q)
{
struct blk_iolatency *blkiolat;
@@ -740,17 +782,15 @@ int blk_iolatency_init(struct request_queue *q)
}
timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn);
return 0;
}
/*
* return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
* return 0.
*/
static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
{
struct iolatency_grp *iolat = blkg_to_lat(blkg);
struct blk_iolatency *blkiolat = iolat->blkiolat;
u64 oldval = iolat->min_lat_nsec;
iolat->min_lat_nsec = val;
@@ -758,13 +798,15 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
BLKIOLATENCY_MAX_WIN_SIZE);
if (!oldval && val)
return 1;
if (!oldval && val) {
if (atomic_inc_return(&blkiolat->enable_cnt) == 1)
schedule_work(&blkiolat->enable_work);
}
if (oldval && !val) {
blkcg_clear_delay(blkg);
return -1;
if (atomic_dec_return(&blkiolat->enable_cnt) == 0)
schedule_work(&blkiolat->enable_work);
}
return 0;
}
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@@ -796,7 +838,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
u64 lat_val = 0;
u64 oldval;
int ret;
int enable = 0;
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
if (ret)
@@ -831,41 +872,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
blkg = ctx.blkg;
oldval = iolat->min_lat_nsec;
enable = iolatency_set_min_lat_nsec(blkg, lat_val);
if (enable) {
if (!blk_get_queue(blkg->q)) {
ret = -ENODEV;
goto out;
}
blkg_get(blkg);
}
if (oldval != iolat->min_lat_nsec) {
iolatency_set_min_lat_nsec(blkg, lat_val);
if (oldval != iolat->min_lat_nsec)
iolatency_clear_scaling(blkg);
}
ret = 0;
out:
blkg_conf_finish(&ctx);
if (ret == 0 && enable) {
struct iolatency_grp *tmp = blkg_to_lat(blkg);
struct blk_iolatency *blkiolat = tmp->blkiolat;
blk_mq_freeze_queue(blkg->q);
if (enable == 1)
atomic_inc(&blkiolat->enabled);
else if (enable == -1)
atomic_dec(&blkiolat->enabled);
else
WARN_ON_ONCE(1);
blk_mq_unfreeze_queue(blkg->q);
blkg_put(blkg);
blk_put_queue(blkg->q);
}
return ret ?: nbytes;
}
@@ -1006,14 +1018,8 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
struct blkcg_gq *blkg = lat_to_blkg(iolat);
struct blk_iolatency *blkiolat = iolat->blkiolat;
int ret;
ret = iolatency_set_min_lat_nsec(blkg, 0);
if (ret == 1)
atomic_inc(&blkiolat->enabled);
if (ret == -1)
atomic_dec(&blkiolat->enabled);
iolatency_set_min_lat_nsec(blkg, 0);
iolatency_clear_scaling(blkg);
}

View File

@@ -39,6 +39,10 @@ struct cryptd_cpu_queue {
};
struct cryptd_queue {
/*
* Protected by disabling BH to allow enqueueing from softinterrupt and
* dequeuing from kworker (cryptd_queue_worker()).
*/
struct cryptd_cpu_queue __percpu *cpu_queue;
};
@@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cryptd_queue *queue)
static int cryptd_enqueue_request(struct cryptd_queue *queue,
struct crypto_async_request *request)
{
int cpu, err;
int err;
struct cryptd_cpu_queue *cpu_queue;
refcount_t *refcnt;
cpu = get_cpu();
local_bh_disable();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
if (err == -ENOSPC)
goto out_put_cpu;
goto out;
queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
if (!refcount_read(refcnt))
goto out_put_cpu;
goto out;
refcount_inc(refcnt);
out_put_cpu:
put_cpu();
out:
local_bh_enable();
return err;
}
@@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct work_struct *work)
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
/*
* Only handle one request at a time to avoid hogging crypto workqueue.
* preempt_disable/enable is used to prevent being preempted by
* cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
* cryptd_enqueue_request() being accessed from software interrupts.
*/
local_bh_disable();
preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
preempt_enable();
local_bh_enable();
if (!req)

View File

@@ -433,6 +433,16 @@ void acpi_init_properties(struct acpi_device *adev)
acpi_extract_apple_properties(adev);
}
static void acpi_free_device_properties(struct list_head *list)
{
struct acpi_device_properties *props, *tmp;
list_for_each_entry_safe(props, tmp, list, list) {
list_del(&props->list);
kfree(props);
}
}
static void acpi_destroy_nondev_subnodes(struct list_head *list)
{
struct acpi_data_node *dn, *next;
@@ -445,22 +455,18 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
wait_for_completion(&dn->kobj_done);
list_del(&dn->sibling);
ACPI_FREE((void *)dn->data.pointer);
acpi_free_device_properties(&dn->data.properties);
kfree(dn);
}
}
void acpi_free_properties(struct acpi_device *adev)
{
struct acpi_device_properties *props, *tmp;
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
adev->data.pointer = NULL;
list_for_each_entry_safe(props, tmp, &adev->data.properties, list) {
list_del(&props->list);
kfree(props);
}
acpi_free_device_properties(&adev->data.properties);
}
/**

View File

@@ -377,6 +377,18 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
},
},
/*
* ASUS B1400CEAE hangs on resume from suspend (see
* https://bugzilla.kernel.org/show_bug.cgi?id=215742).
*/
{
.callback = init_default_s3,
.ident = "ASUS B1400CEAE",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
},
},
{},
};

View File

@@ -560,10 +560,9 @@ int register_memory(struct memory_block *memory)
}
ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
GFP_KERNEL));
if (ret) {
put_device(&memory->dev);
if (ret)
device_unregister(&memory->dev);
}
return ret;
}

View File

@@ -655,6 +655,7 @@ static int register_node(struct node *node, int num)
*/
void unregister_node(struct node *node)
{
compaction_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
node_remove_accesses(node);
node_remove_caches(node);

View File

@@ -3631,9 +3631,8 @@ const char *cmdname(enum drbd_packet cmd)
* when we want to support more than
* one PRO_VERSION */
static const char *cmdnames[] = {
[P_DATA] = "Data",
[P_WSAME] = "WriteSame",
[P_TRIM] = "Trim",
[P_DATA_REPLY] = "DataReply",
[P_RS_DATA_REPLY] = "RSDataReply",
[P_BARRIER] = "Barrier",
@@ -3644,7 +3643,6 @@ const char *cmdname(enum drbd_packet cmd)
[P_DATA_REQUEST] = "DataRequest",
[P_RS_DATA_REQUEST] = "RSDataRequest",
[P_SYNC_PARAM] = "SyncParam",
[P_SYNC_PARAM89] = "SyncParam89",
[P_PROTOCOL] = "ReportProtocol",
[P_UUIDS] = "ReportUUIDs",
[P_SIZES] = "ReportSizes",
@@ -3652,6 +3650,7 @@ const char *cmdname(enum drbd_packet cmd)
[P_SYNC_UUID] = "ReportSyncUUID",
[P_AUTH_CHALLENGE] = "AuthChallenge",
[P_AUTH_RESPONSE] = "AuthResponse",
[P_STATE_CHG_REQ] = "StateChgRequest",
[P_PING] = "Ping",
[P_PING_ACK] = "PingAck",
[P_RECV_ACK] = "RecvAck",
@@ -3662,24 +3661,26 @@ const char *cmdname(enum drbd_packet cmd)
[P_NEG_DREPLY] = "NegDReply",
[P_NEG_RS_DREPLY] = "NegRSDReply",
[P_BARRIER_ACK] = "BarrierAck",
[P_STATE_CHG_REQ] = "StateChgRequest",
[P_STATE_CHG_REPLY] = "StateChgReply",
[P_OV_REQUEST] = "OVRequest",
[P_OV_REPLY] = "OVReply",
[P_OV_RESULT] = "OVResult",
[P_CSUM_RS_REQUEST] = "CsumRSRequest",
[P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
[P_SYNC_PARAM89] = "SyncParam89",
[P_COMPRESSED_BITMAP] = "CBitmap",
[P_DELAY_PROBE] = "DelayProbe",
[P_OUT_OF_SYNC] = "OutOfSync",
[P_RETRY_WRITE] = "RetryWrite",
[P_RS_CANCEL] = "RSCancel",
[P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
[P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
[P_RETRY_WRITE] = "retry_write",
[P_PROTOCOL_UPDATE] = "protocol_update",
[P_TRIM] = "Trim",
[P_RS_THIN_REQ] = "rs_thin_req",
[P_RS_DEALLOCATED] = "rs_deallocated",
[P_WSAME] = "WriteSame",
[P_ZEROES] = "Zeroes",
/* enum drbd_packet, but not commands - obsoleted flags:
* P_MAY_IGNORE

View File

@@ -880,11 +880,15 @@ static int wait_for_reconnect(struct nbd_device *nbd)
struct nbd_config *config = nbd->config;
if (!config->dead_conn_timeout)
return 0;
if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
return 0;
return wait_event_timeout(config->conn_wait,
if (!wait_event_timeout(config->conn_wait,
test_bit(NBD_RT_DISCONNECTED,
&config->runtime_flags) ||
atomic_read(&config->live_connections) > 0,
config->dead_conn_timeout) > 0;
config->dead_conn_timeout))
return 0;
return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
}
static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
@@ -2029,6 +2033,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
mutex_lock(&nbd->config_lock);
nbd_disconnect(nbd);
sock_shutdown(nbd);
wake_up(&nbd->config->conn_wait);
/*
* Make sure recv thread has finished, so it does not drop the last
* config ref and try to destroy the workqueue from inside the work

Some files were not shown because too many files have changed in this diff Show More