Merge 5.10.24 into android12-5.10-lts

Changes in 5.10.24
	uapi: nfnetlink_cthelper.h: fix userspace compilation error
	powerpc/perf: Fix handling of privilege level checks in perf interrupt context
	powerpc/pseries: Don't enforce MSI affinity with kdump
	ethernet: alx: fix order of calls on resume
	crypto: mips/poly1305 - enable for all MIPS processors
	ath9k: fix transmitting to stations in dynamic SMPS mode
	net: Fix gro aggregation for udp encaps with zero csum
	net: check if protocol extracted by virtio_net_hdr_set_proto is correct
	net: avoid infinite loop in mpls_gso_segment when mpls_hlen == 0
	net: l2tp: reduce log level of messages in receive path, add counter instead
	can: skb: can_skb_set_owner(): fix ref counting if socket was closed before setting skb ownership
	can: flexcan: assert FRZ bit in flexcan_chip_freeze()
	can: flexcan: enable RX FIFO after FRZ/HALT valid
	can: flexcan: invoke flexcan_chip_freeze() to enter freeze mode
	can: tcan4x5x: tcan4x5x_init(): fix initialization - clear MRAM before entering Normal Mode
	tcp: Fix sign comparison bug in getsockopt(TCP_ZEROCOPY_RECEIVE)
	tcp: add sanity tests to TCP_QUEUE_SEQ
	netfilter: nf_nat: undo erroneous tcp edemux lookup
	netfilter: x_tables: gpf inside xt_find_revision()
	net: always use icmp{,v6}_ndo_send from ndo_start_xmit
	net: phy: fix save wrong speed and duplex problem if autoneg is on
	selftests/bpf: Use the last page in test_snprintf_btf on s390
	selftests/bpf: No need to drop the packet when there is no geneve opt
	selftests/bpf: Mask bpf_csum_diff() return value to 16 bits in test_verifier
	samples, bpf: Add missing munmap in xdpsock
	libbpf: Clear map_info before each bpf_obj_get_info_by_fd
	ibmvnic: Fix possibly uninitialized old_num_tx_queues variable warning.
	ibmvnic: always store valid MAC address
	mt76: dma: do not report truncated frames to mac80211
	powerpc/603: Fix protection of user pages mapped with PROT_NONE
	mount: fix mounting of detached mounts onto targets that reside on shared mounts
	cifs: return proper error code in statfs(2)
	Revert "mm, slub: consider rest of partial list if acquire_slab() fails"
	docs: networking: drop special stable handling
	net: dsa: tag_rtl4_a: fix egress tags
	sh_eth: fix TRSCER mask for SH771x
	net: enetc: don't overwrite the RSS indirection table when initializing
	net: enetc: take the MDIO lock only once per NAPI poll cycle
	net: enetc: fix incorrect TPID when receiving 802.1ad tagged packets
	net: enetc: don't disable VLAN filtering in IFF_PROMISC mode
	net: enetc: force the RGMII speed and duplex instead of operating in inband mode
	net: enetc: remove bogus write to SIRXIDR from enetc_setup_rxbdr
	net: enetc: keep RX ring consumer index in sync with hardware
	net: ethernet: mtk-star-emac: fix wrong unmap in RX handling
	net/mlx4_en: update moderation when config reset
	net: stmmac: fix incorrect DMA channel intr enable setting of EQoS v4.10
	nexthop: Do not flush blackhole nexthops when loopback goes down
	net: sched: avoid duplicates in classes dump
	net: mscc: ocelot: properly reject destination IP keys in VCAP IS1
	net: dsa: sja1105: fix SGMII PCS being forced to SPEED_UNKNOWN instead of SPEED_10
	net: usb: qmi_wwan: allow qmimux add/del with master up
	netdevsim: init u64 stats for 32bit hardware
	cipso,calipso: resolve a number of problems with the DOI refcounts
	net: stmmac: Fix VLAN filter delete timeout issue in Intel mGBE SGMII
	stmmac: intel: Fixes clock registration error seen for multiple interfaces
	net: lapbether: Remove netif_start_queue / netif_stop_queue
	net: davicom: Fix regulator not turned off on failed probe
	net: davicom: Fix regulator not turned off on driver removal
	net: enetc: allow hardware timestamping on TX queues with tc-etf enabled
	net: qrtr: fix error return code of qrtr_sendmsg()
	s390/qeth: fix memory leak after failed TX Buffer allocation
	r8169: fix r8168fp_adjust_ocp_cmd function
	ixgbe: fail to create xfrm offload of IPsec tunnel mode SA
	tools/resolve_btfids: Fix build error with older host toolchains
	perf build: Fix ccache usage in $(CC) when generating arch errno table
	net: stmmac: stop each tx channel independently
	net: stmmac: fix watchdog timeout during suspend/resume stress test
	net: stmmac: fix wrongly set buffer2 valid when sph unsupport
	ethtool: fix the check logic of at least one channel for RX/TX
	net: phy: make mdio_bus_phy_suspend/resume as __maybe_unused
	selftests: forwarding: Fix race condition in mirror installation
	mlxsw: spectrum_ethtool: Add an external speed to PTYS register
	perf traceevent: Ensure read cmdlines are null terminated.
	perf report: Fix -F for branch & mem modes
	net: hns3: fix query vlan mask value error for flow director
	net: hns3: fix bug when calculating the TCAM table info
	s390/cio: return -EFAULT if copy_to_user() fails again
	bnxt_en: reliably allocate IRQ table on reset to avoid crash
	gpiolib: acpi: Add ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER quirk
	gpiolib: acpi: Allow to find GpioInt() resource by name and index
	gpio: pca953x: Set IRQ type when handle Intel Galileo Gen 2
	gpio: fix gpio-device list corruption
	drm/compat: Clear bounce structures
	drm/amd/display: Add a backlight module option
	drm/amdgpu/display: use GFP_ATOMIC in dcn21_validate_bandwidth_fp()
	drm/amd/display: Fix nested FPU context in dcn21_validate_bandwidth()
	drm/amd/pm: bug fix for pcie dpm
	drm/amdgpu/display: simplify backlight setting
	drm/amdgpu/display: don't assert in set backlight function
	drm/amdgpu/display: handle aux backlight in backlight_get_brightness
	drm/shmem-helper: Check for purged buffers in fault handler
	drm/shmem-helper: Don't remove the offset in vm_area_struct pgoff
	drm: Use USB controller's DMA mask when importing dmabufs
	drm: meson_drv add shutdown function
	drm/shmem-helpers: vunmap: Don't put pages for dma-buf
	drm/i915: Wedge the GPU if command parser setup fails
	s390/cio: return -EFAULT if copy_to_user() fails
	s390/crypto: return -EFAULT if copy_to_user() fails
	qxl: Fix uninitialised struct field head.surface_id
	sh_eth: fix TRSCER mask for R7S9210
	media: usbtv: Fix deadlock on suspend
	media: rkisp1: params: fix wrong bits settings
	media: v4l: vsp1: Fix uif null pointer access
	media: v4l: vsp1: Fix bru null pointer access
	media: rc: compile rc-cec.c into rc-core
	cifs: fix credit accounting for extra channel
	net: hns3: fix error mask definition of flow director
	s390/qeth: don't replace a fully completed async TX buffer
	s390/qeth: remove QETH_QDIO_BUF_HANDLED_DELAYED state
	s390/qeth: improve completion of pending TX buffers
	s390/qeth: fix notification for pending buffers during teardown
	net: dsa: implement a central TX reallocation procedure
	net: dsa: tag_ksz: don't allocate additional memory for padding/tagging
	net: dsa: trailer: don't allocate additional memory for padding/tagging
	net: dsa: tag_qca: let DSA core deal with TX reallocation
	net: dsa: tag_ocelot: let DSA core deal with TX reallocation
	net: dsa: tag_mtk: let DSA core deal with TX reallocation
	net: dsa: tag_lan9303: let DSA core deal with TX reallocation
	net: dsa: tag_edsa: let DSA core deal with TX reallocation
	net: dsa: tag_brcm: let DSA core deal with TX reallocation
	net: dsa: tag_dsa: let DSA core deal with TX reallocation
	net: dsa: tag_gswip: let DSA core deal with TX reallocation
	net: dsa: tag_ar9331: let DSA core deal with TX reallocation
	net: dsa: tag_mtk: fix 802.1ad VLAN egress
	enetc: Fix unused var build warning for CONFIG_OF
	net: enetc: initialize RFS/RSS memories for unused ports too
	ath11k: peer delete synchronization with firmware
	ath11k: start vdev if a bss peer is already created
	ath11k: fix AP mode for QCA6390
	i2c: rcar: faster irq code to minimize HW race condition
	i2c: rcar: optimize cacheline to minimize HW race condition
	scsi: ufs: WB is only available on LUN #0 to #7
	udf: fix silent AED tagLocation corruption
	iommu/vt-d: Clear PRQ overflow only when PRQ is empty
	mmc: mxs-mmc: Fix a resource leak in an error handling path in 'mxs_mmc_probe()'
	mmc: mediatek: fix race condition between msdc_request_timeout and irq
	mmc: sdhci-iproc: Add ACPI bindings for the RPi
	Platform: OLPC: Fix probe error handling
	powerpc/pci: Add ppc_md.discover_phbs()
	spi: stm32: make spurious and overrun interrupts visible
	powerpc: improve handling of unrecoverable system reset
	powerpc/perf: Record counter overflow always if SAMPLE_IP is unset
	HID: logitech-dj: add support for the new lightspeed connection iteration
	powerpc/64: Fix stack trace not displaying final frame
	iommu/amd: Fix performance counter initialization
	clk: qcom: gdsc: Implement NO_RET_PERIPH flag
	sparc32: Limit memblock allocation to low memory
	sparc64: Use arch_validate_flags() to validate ADI flag
	Input: applespi - don't wait for responses to commands indefinitely.
	PCI: xgene-msi: Fix race in installing chained irq handler
	PCI: mediatek: Add missing of_node_put() to fix reference leak
	drivers/base: build kunit tests without structleak plugin
	PCI/LINK: Remove bandwidth notification
	ext4: don't try to processed freed blocks until mballoc is initialized
	kbuild: clamp SUBLEVEL to 255
	PCI: Fix pci_register_io_range() memory leak
	i40e: Fix memory leak in i40e_probe
	kasan: fix memory corruption in kasan_bitops_tags test
	s390/smp: __smp_rescan_cpus() - move cpumask away from stack
	drivers/base/memory: don't store phys_device in memory blocks
	sysctl.c: fix underflow value setting risk in vm_table
	scsi: libiscsi: Fix iscsi_prep_scsi_cmd_pdu() error handling
	scsi: target: core: Add cmd length set before cmd complete
	scsi: target: core: Prevent underflow for service actions
	clk: qcom: gpucc-msm8998: Add resets, cxc, fix flags on gpu_gx_gdsc
	mmc: sdhci: Update firmware interface API
	ARM: 9029/1: Make iwmmxt.S support Clang's integrated assembler
	ARM: assembler: introduce adr_l, ldr_l and str_l macros
	ARM: efistub: replace adrl pseudo-op with adr_l macro invocation
	ALSA: usb: Add Plantronics C320-M USB ctrl msg delay quirk
	ALSA: hda/hdmi: Cancel pending works before suspend
	ALSA: hda/conexant: Add quirk for mute LED control on HP ZBook G5
	ALSA: hda/ca0132: Add Sound BlasterX AE-5 Plus support
	ALSA: hda: Drop the BATCH workaround for AMD controllers
	ALSA: hda: Flush pending unsolicited events before suspend
	ALSA: hda: Avoid spurious unsol event handling during S3/S4
	ALSA: usb-audio: Fix "cannot get freq eq" errors on Dell AE515 sound bar
	ALSA: usb-audio: Apply the control quirk to Plantronics headsets
	ALSA: usb-audio: Disable USB autosuspend properly in setup_disable_autosuspend()
	ALSA: usb-audio: fix NULL ptr dereference in usb_audio_probe
	ALSA: usb-audio: fix use after free in usb_audio_disconnect
	Revert 95ebabde382c ("capabilities: Don't allow writing ambiguous v3 file capabilities")
	block: Discard page cache of zone reset target range
	block: Try to handle busy underlying device on discard
	arm64: kasan: fix page_alloc tagging with DEBUG_VIRTUAL
	arm64: mte: Map hotplugged memory as Normal Tagged
	arm64: perf: Fix 64-bit event counter read truncation
	s390/dasd: fix hanging DASD driver unbind
	s390/dasd: fix hanging IO request during DASD driver unbind
	software node: Fix node registration
	xen/events: reset affinity of 2-level event when tearing it down
	mmc: mmci: Add MMC_CAP_NEED_RSP_BUSY for the stm32 variants
	mmc: core: Fix partition switch time for eMMC
	mmc: cqhci: Fix random crash when remove mmc module/card
	cifs: do not send close in compound create+close requests
	Goodix Fingerprint device is not a modem
	USB: gadget: udc: s3c2410_udc: fix return value check in s3c2410_udc_probe()
	USB: gadget: u_ether: Fix a configfs return code
	usb: gadget: f_uac2: always increase endpoint max_packet_size by one audio slot
	usb: gadget: f_uac1: stop playback on function disable
	usb: dwc3: qcom: Add missing DWC3 OF node refcount decrement
	usb: dwc3: qcom: add URS Host support for sdm845 ACPI boot
	usb: dwc3: qcom: add ACPI device id for sc8180x
	usb: dwc3: qcom: Honor wakeup enabled/disabled state
	USB: usblp: fix a hang in poll() if disconnected
	usb: renesas_usbhs: Clear PIPECFG for re-enabling pipe with other EPNUM
	usb: xhci: do not perform Soft Retry for some xHCI hosts
	xhci: Improve detection of device initiated wake signal.
	usb: xhci: Fix ASMedia ASM1042A and ASM3242 DMA addressing
	xhci: Fix repeated xhci wake after suspend due to uncleared internal wake state
	USB: serial: io_edgeport: fix memory leak in edge_startup
	USB: serial: ch341: add new Product ID
	USB: serial: cp210x: add ID for Acuity Brands nLight Air Adapter
	USB: serial: cp210x: add some more GE USB IDs
	usbip: fix stub_dev to check for stream socket
	usbip: fix vhci_hcd to check for stream socket
	usbip: fix vudc to check for stream socket
	usbip: fix stub_dev usbip_sockfd_store() races leading to gpf
	usbip: fix vhci_hcd attach_store() races leading to gpf
	usbip: fix vudc usbip_sockfd_store races leading to gpf
	Revert "serial: max310x: rework RX interrupt handling"
	misc/pvpanic: Export module FDT device table
	misc: fastrpc: restrict user apps from sending kernel RPC messages
	staging: rtl8192u: fix ->ssid overflow in r8192_wx_set_scan()
	staging: rtl8188eu: prevent ->ssid overflow in rtw_wx_set_scan()
	staging: rtl8712: unterminated string leads to read overflow
	staging: rtl8188eu: fix potential memory corruption in rtw_check_beacon_data()
	staging: ks7010: prevent buffer overflow in ks_wlan_set_scan()
	staging: rtl8712: Fix possible buffer overflow in r8712_sitesurvey_cmd
	staging: rtl8192e: Fix possible buffer overflow in _rtl92e_wx_set_scan
	staging: comedi: addi_apci_1032: Fix endian problem for COS sample
	staging: comedi: addi_apci_1500: Fix endian problem for command sample
	staging: comedi: adv_pci1710: Fix endian problem for AI command data
	staging: comedi: das6402: Fix endian problem for AI command data
	staging: comedi: das800: Fix endian problem for AI command data
	staging: comedi: dmm32at: Fix endian problem for AI command data
	staging: comedi: me4000: Fix endian problem for AI command data
	staging: comedi: pcl711: Fix endian problem for AI command data
	staging: comedi: pcl818: Fix endian problem for AI command data
	sh_eth: fix TRSCER mask for R7S72100
	cpufreq: qcom-hw: fix dereferencing freed memory 'data'
	cpufreq: qcom-hw: Fix return value check in qcom_cpufreq_hw_cpu_init()
	arm64/mm: Fix pfn_valid() for ZONE_DEVICE based memory
	SUNRPC: Set memalloc_nofs_save() for sync tasks
	NFS: Don't revalidate the directory permissions on a lookup failure
	NFS: Don't gratuitously clear the inode cache when lookup failed
	NFSv4.2: fix return value of _nfs4_get_security_label()
	block: rsxx: fix error return code of rsxx_pci_probe()
	nvme-fc: fix racing controller reset and create association
	configfs: fix a use-after-free in __configfs_open_file
	arm64: mm: use a 48-bit ID map when possible on 52-bit VA builds
	perf/core: Flush PMU internal buffers for per-CPU events
	perf/x86/intel: Set PERF_ATTACH_SCHED_CB for large PEBS and LBR
	hrtimer: Update softirq_expires_next correctly after __hrtimer_get_next_event()
	powerpc/64s/exception: Clean up a missed SRR specifier
	seqlock,lockdep: Fix seqcount_latch_init()
	stop_machine: mark helpers __always_inline
	include/linux/sched/mm.h: use rcu_dereference in in_vfork()
	zram: fix return value on writeback_store
	linux/compiler-clang.h: define HAVE_BUILTIN_BSWAP*
	sched/membarrier: fix missing local execution of ipi_sync_rq_state()
	efi: stub: omit SetVirtualAddressMap() if marked unsupported in RT_PROP table
	powerpc/64s: Fix instruction encoding for lis in ppc_function_entry()
	powerpc: Fix inverted SET_FULL_REGS bitop
	powerpc: Fix missing declaration of [en/dis]able_kernel_vsx()
	binfmt_misc: fix possible deadlock in bm_register_write
	x86/unwind/orc: Disable KASAN checking in the ORC unwinder, part 2
	x86/sev-es: Introduce ip_within_syscall_gap() helper
	x86/sev-es: Check regs->sp is trusted before adjusting #VC IST stack
	x86/entry: Move nmi entry/exit into common code
	x86/sev-es: Correctly track IRQ states in runtime #VC handler
	x86/sev-es: Use __copy_from_user_inatomic()
	x86/entry: Fix entry/exit mismatch on failed fast 32-bit syscalls
	KVM: x86: Ensure deadline timer has truly expired before posting its IRQ
	KVM: kvmclock: Fix vCPUs > 64 can't be online/hotpluged
	KVM: arm64: Fix range alignment when walking page tables
	KVM: arm64: Avoid corrupting vCPU context register in guest exit
	KVM: arm64: nvhe: Save the SPE context early
	KVM: arm64: Reject VM creation when the default IPA size is unsupported
	KVM: arm64: Fix exclusive limit for IPA size
	mm/userfaultfd: fix memory corruption due to writeprotect
	mm/madvise: replace ptrace attach requirement for process_madvise
	KVM: arm64: Ensure I-cache isolation between vcpus of a same VM
	mm/page_alloc.c: refactor initialization of struct page for holes in memory layout
	xen/events: don't unmask an event channel when an eoi is pending
	xen/events: avoid handling the same event on two cpus at the same time
	KVM: arm64: Fix nVHE hyp panic host context restore
	RDMA/umem: Use ib_dma_max_seg_size instead of dma_get_max_seg_size
	Linux 5.10.24

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ie53a3c1963066a18d41357b6be41cff00690bd40
This commit is contained in:
Greg Kroah-Hartman
2021-03-19 09:42:56 +01:00
310 changed files with 2821 additions and 1667 deletions

View File

@@ -26,8 +26,9 @@ Date: September 2008
Contact: Badari Pulavarty <pbadari@us.ibm.com>
Description:
The file /sys/devices/system/memory/memoryX/phys_device
is read-only and is designed to show the name of physical
memory device. Implementation is currently incomplete.
is read-only; it is a legacy interface only ever used on s390x
to expose the covered storage increment.
Users: Legacy s390-tools lsmem/chmem
What: /sys/devices/system/memory/memoryX/phys_index
Date: September 2008

View File

@@ -160,8 +160,8 @@ Under each memory block, you can see 5 files:
"online_movable", "online", "offline" command
which will be performed on all sections in the block.
``phys_device`` read-only: designed to show the name of physical memory
device. This is not well implemented now.
``phys_device`` read-only: legacy interface only ever used on s390x to
expose the covered storage increment.
``removable`` read-only: contains an integer value indicating
whether the memory block is removable or not
removable. A value of 1 indicates that the memory

View File

@@ -560,6 +560,27 @@ Some of these date from the very introduction of KMS in 2008 ...
Level: Intermediate
Remove automatic page mapping from dma-buf importing
----------------------------------------------------
When importing dma-bufs, the dma-buf and PRIME frameworks automatically map
imported pages into the importer's DMA area. drm_gem_prime_fd_to_handle() and
drm_gem_prime_handle_to_fd() require that importers call dma_buf_attach()
even if they never do actual device DMA, but only CPU access through
dma_buf_vmap(). This is a problem for USB devices, which do not support DMA
operations.
To fix the issue, automatic page mappings should be removed from the
buffer-sharing code. Fixing this is a bit more involved, since the import/export
cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over
this problem for USB devices by fishing out the USB host controller device, as
long as that supports DMA. Otherwise importing can still needlessly fail.
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
Level: Advanced
Better Testing
==============

View File

@@ -144,77 +144,13 @@ Please send incremental versions on top of what has been merged in order to fix
the patches the way they would look like if your latest patch series was to be
merged.
Q: How can I tell what patches are queued up for backporting to the various stable releases?
--------------------------------------------------------------------------------------------
A: Normally Greg Kroah-Hartman collects stable commits himself, but for
networking, Dave collects up patches he deems critical for the
networking subsystem, and then hands them off to Greg.
There is a patchworks queue that you can see here:
https://patchwork.kernel.org/bundle/netdev/stable/?state=*
It contains the patches which Dave has selected, but not yet handed off
to Greg. If Greg already has the patch, then it will be here:
https://git.kernel.org/pub/scm/linux/kernel/git/stable/stable-queue.git
A quick way to find whether the patch is in this stable-queue is to
simply clone the repo, and then git grep the mainline commit ID, e.g.
::
stable-queue$ git grep -l 284041ef21fdf2e
releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
stable/stable-queue$
Q: I see a network patch and I think it should be backported to stable.
-----------------------------------------------------------------------
Q: Should I request it via stable@vger.kernel.org like the references in
the kernel's Documentation/process/stable-kernel-rules.rst file say?
A: No, not for networking. Check the stable queues as per above first
to see if it is already queued. If not, then send a mail to netdev,
listing the upstream commit ID and why you think it should be a stable
candidate.
Before you jump to go do the above, do note that the normal stable rules
in :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
still apply. So you need to explicitly indicate why it is a critical
fix and exactly what users are impacted. In addition, you need to
convince yourself that you *really* think it has been overlooked,
vs. having been considered and rejected.
Generally speaking, the longer it has had a chance to "soak" in
mainline, the better the odds that it is an OK candidate for stable. So
scrambling to request a commit be added the day after it appears should
be avoided.
Q: I have created a network patch and I think it should be backported to stable.
--------------------------------------------------------------------------------
Q: Should I add a Cc: stable@vger.kernel.org like the references in the
kernel's Documentation/ directory say?
A: No. See above answer. In short, if you think it really belongs in
stable, then ensure you write a decent commit log that describes who
gets impacted by the bug fix and how it manifests itself, and when the
bug was introduced. If you do that properly, then the commit will get
handled appropriately and most likely get put in the patchworks stable
queue if it really warrants it.
If you think there is some valid information relating to it being in
stable that does *not* belong in the commit log, then use the three dash
marker line as described in
:ref:`Documentation/process/submitting-patches.rst <the_canonical_patch_format>`
to temporarily embed that information into the patch that you send.
Q: Are all networking bug fixes backported to all stable releases?
------------------------------------------------------------------
A: Due to capacity, Dave could only take care of the backports for the
last two stable releases. For earlier stable releases, each stable
branch maintainer is supposed to take care of them. If you find any
patch is missing from an earlier stable branch, please notify
stable@vger.kernel.org with either a commit ID or a formal patch
backported, and CC Dave and other relevant networking developers.
Q: Are there special rules regarding stable submissions on netdev?
---------------------------------------------------------------
While it used to be the case that netdev submissions were not supposed
to carry explicit ``CC: stable@vger.kernel.org`` tags that is no longer
the case today. Please follow the standard stable rules in
:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`,
and make sure you include appropriate Fixes tags!
Q: Is the comment style convention different for the networking content?
------------------------------------------------------------------------

View File

@@ -35,12 +35,6 @@ Rules on what kind of patches are accepted, and which ones are not, into the
Procedure for submitting patches to the -stable tree
----------------------------------------------------
- If the patch covers files in net/ or drivers/net please follow netdev stable
submission guidelines as described in
:ref:`Documentation/networking/netdev-FAQ.rst <netdev-FAQ>`
after first checking the stable networking queue at
https://patchwork.kernel.org/bundle/netdev/stable/?state=*
to ensure the requested patch is not already queued up.
- Security patches should not be handled (solely) by the -stable review
process but should follow the procedures in
:ref:`Documentation/admin-guide/security-bugs.rst <securitybugs>`.

View File

@@ -250,11 +250,6 @@ should also read
:ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`
in addition to this file.
Note, however, that some subsystem maintainers want to come to their own
conclusions on which patches should go to the stable trees. The networking
maintainer, in particular, would rather not see individual developers
adding lines like the above to their patches.
If changes affect userland-kernel interfaces, please send the MAN-PAGES
maintainer (as listed in the MAINTAINERS file) a man-pages patch, or at
least a notification of the change, so that some information makes its way

View File

@@ -182,6 +182,9 @@ is dependent on the CPU capability and the kernel configuration. The limit can
be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION
ioctl() at run-time.
Creation of the VM will fail if the requested IPA size (whether it is
implicit or explicit) is unsupported on the host.
Please note that configuring the IPA size does not affect the capability
exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects
size of the address translated by the stage2 level (guest physical to

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 23
SUBLEVEL = 24
EXTRAVERSION =
NAME = Dare mighty things
@@ -1332,9 +1332,15 @@ define filechk_utsrelease.h
endef
define filechk_version.h
echo \#define LINUX_VERSION_CODE $(shell \
expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))'
if [ $(SUBLEVEL) -gt 255 ]; then \
echo \#define LINUX_VERSION_CODE $(shell \
expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 255); \
else \
echo \#define LINUX_VERSION_CODE $(shell \
expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \
fi; \
echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + \
((c) > 255 ? 255 : (c)))'
endef
$(version_h): FORCE

View File

@@ -1440,8 +1440,7 @@ ENTRY(efi_enter_kernel)
mov r4, r0 @ preserve image base
mov r8, r1 @ preserve DT pointer
ARM( adrl r0, call_cache_fn )
THUMB( adr r0, call_cache_fn )
adr_l r0, call_cache_fn
adr r1, 0f @ clean the region of code we
bl cache_clean_flush @ may run with the MMU off

View File

@@ -494,4 +494,88 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#define _ASM_NOKPROBE(entry)
#endif
.macro __adldst_l, op, reg, sym, tmp, c
.if __LINUX_ARM_ARCH__ < 7
ldr\c \tmp, .La\@
.subsection 1
.align 2
.La\@: .long \sym - .Lpc\@
.previous
.else
.ifnb \c
THUMB( ittt \c )
.endif
movw\c \tmp, #:lower16:\sym - .Lpc\@
movt\c \tmp, #:upper16:\sym - .Lpc\@
.endif
#ifndef CONFIG_THUMB2_KERNEL
.set .Lpc\@, . + 8 // PC bias
.ifc \op, add
add\c \reg, \tmp, pc
.else
\op\c \reg, [pc, \tmp]
.endif
#else
.Lb\@: add\c \tmp, \tmp, pc
/*
* In Thumb-2 builds, the PC bias depends on whether we are currently
* emitting into a .arm or a .thumb section. The size of the add opcode
* above will be 2 bytes when emitting in Thumb mode and 4 bytes when
* emitting in ARM mode, so let's use this to account for the bias.
*/
.set .Lpc\@, . + (. - .Lb\@)
.ifnc \op, add
\op\c \reg, [\tmp]
.endif
#endif
.endm
/*
* mov_l - move a constant value or [relocated] address into a register
*/
.macro mov_l, dst:req, imm:req
.if __LINUX_ARM_ARCH__ < 7
ldr \dst, =\imm
.else
movw \dst, #:lower16:\imm
movt \dst, #:upper16:\imm
.endif
.endm
/*
* adr_l - adr pseudo-op with unlimited range
*
* @dst: destination register
* @sym: name of the symbol
* @cond: conditional opcode suffix
*/
.macro adr_l, dst:req, sym:req, cond
__adldst_l add, \dst, \sym, \dst, \cond
.endm
/*
* ldr_l - ldr <literal> pseudo-op with unlimited range
*
* @dst: destination register
* @sym: name of the symbol
* @cond: conditional opcode suffix
*/
.macro ldr_l, dst:req, sym:req, cond
__adldst_l ldr, \dst, \sym, \dst, \cond
.endm
/*
* str_l - str <literal> pseudo-op with unlimited range
*
* @src: source register
* @sym: name of the symbol
* @tmp: mandatory scratch register
* @cond: conditional opcode suffix
*/
.macro str_l, src:req, sym:req, tmp:req, cond
__adldst_l str, \src, \sym, \tmp, \cond
.endm
#endif /* __ASM_ASSEMBLER_H__ */

View File

@@ -16,6 +16,7 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include "iwmmxt.h"
#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
#define PJ4(code...) code
@@ -113,33 +114,33 @@ concan_save:
concan_dump:
wstrw wCSSF, [r1, #MMX_WCSSF]
wstrw wCASF, [r1, #MMX_WCASF]
wstrw wCGR0, [r1, #MMX_WCGR0]
wstrw wCGR1, [r1, #MMX_WCGR1]
wstrw wCGR2, [r1, #MMX_WCGR2]
wstrw wCGR3, [r1, #MMX_WCGR3]
wstrw wCSSF, r1, MMX_WCSSF
wstrw wCASF, r1, MMX_WCASF
wstrw wCGR0, r1, MMX_WCGR0
wstrw wCGR1, r1, MMX_WCGR1
wstrw wCGR2, r1, MMX_WCGR2
wstrw wCGR3, r1, MMX_WCGR3
1: @ MUP? wRn
tst r2, #0x2
beq 2f
wstrd wR0, [r1, #MMX_WR0]
wstrd wR1, [r1, #MMX_WR1]
wstrd wR2, [r1, #MMX_WR2]
wstrd wR3, [r1, #MMX_WR3]
wstrd wR4, [r1, #MMX_WR4]
wstrd wR5, [r1, #MMX_WR5]
wstrd wR6, [r1, #MMX_WR6]
wstrd wR7, [r1, #MMX_WR7]
wstrd wR8, [r1, #MMX_WR8]
wstrd wR9, [r1, #MMX_WR9]
wstrd wR10, [r1, #MMX_WR10]
wstrd wR11, [r1, #MMX_WR11]
wstrd wR12, [r1, #MMX_WR12]
wstrd wR13, [r1, #MMX_WR13]
wstrd wR14, [r1, #MMX_WR14]
wstrd wR15, [r1, #MMX_WR15]
wstrd wR0, r1, MMX_WR0
wstrd wR1, r1, MMX_WR1
wstrd wR2, r1, MMX_WR2
wstrd wR3, r1, MMX_WR3
wstrd wR4, r1, MMX_WR4
wstrd wR5, r1, MMX_WR5
wstrd wR6, r1, MMX_WR6
wstrd wR7, r1, MMX_WR7
wstrd wR8, r1, MMX_WR8
wstrd wR9, r1, MMX_WR9
wstrd wR10, r1, MMX_WR10
wstrd wR11, r1, MMX_WR11
wstrd wR12, r1, MMX_WR12
wstrd wR13, r1, MMX_WR13
wstrd wR14, r1, MMX_WR14
wstrd wR15, r1, MMX_WR15
2: teq r0, #0 @ anything to load?
reteq lr @ if not, return
@@ -147,30 +148,30 @@ concan_dump:
concan_load:
@ Load wRn
wldrd wR0, [r0, #MMX_WR0]
wldrd wR1, [r0, #MMX_WR1]
wldrd wR2, [r0, #MMX_WR2]
wldrd wR3, [r0, #MMX_WR3]
wldrd wR4, [r0, #MMX_WR4]
wldrd wR5, [r0, #MMX_WR5]
wldrd wR6, [r0, #MMX_WR6]
wldrd wR7, [r0, #MMX_WR7]
wldrd wR8, [r0, #MMX_WR8]
wldrd wR9, [r0, #MMX_WR9]
wldrd wR10, [r0, #MMX_WR10]
wldrd wR11, [r0, #MMX_WR11]
wldrd wR12, [r0, #MMX_WR12]
wldrd wR13, [r0, #MMX_WR13]
wldrd wR14, [r0, #MMX_WR14]
wldrd wR15, [r0, #MMX_WR15]
wldrd wR0, r0, MMX_WR0
wldrd wR1, r0, MMX_WR1
wldrd wR2, r0, MMX_WR2
wldrd wR3, r0, MMX_WR3
wldrd wR4, r0, MMX_WR4
wldrd wR5, r0, MMX_WR5
wldrd wR6, r0, MMX_WR6
wldrd wR7, r0, MMX_WR7
wldrd wR8, r0, MMX_WR8
wldrd wR9, r0, MMX_WR9
wldrd wR10, r0, MMX_WR10
wldrd wR11, r0, MMX_WR11
wldrd wR12, r0, MMX_WR12
wldrd wR13, r0, MMX_WR13
wldrd wR14, r0, MMX_WR14
wldrd wR15, r0, MMX_WR15
@ Load wCx
wldrw wCSSF, [r0, #MMX_WCSSF]
wldrw wCASF, [r0, #MMX_WCASF]
wldrw wCGR0, [r0, #MMX_WCGR0]
wldrw wCGR1, [r0, #MMX_WCGR1]
wldrw wCGR2, [r0, #MMX_WCGR2]
wldrw wCGR3, [r0, #MMX_WCGR3]
wldrw wCSSF, r0, MMX_WCSSF
wldrw wCASF, r0, MMX_WCASF
wldrw wCGR0, r0, MMX_WCGR0
wldrw wCGR1, r0, MMX_WCGR1
wldrw wCGR2, r0, MMX_WCGR2
wldrw wCGR3, r0, MMX_WCGR3
@ clear CUP/MUP (only if r1 != 0)
teq r1, #0

47
arch/arm/kernel/iwmmxt.h Normal file
View File

@@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IWMMXT_H__
#define __IWMMXT_H__
.irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
.set .LwR\b, \b
.set .Lr\b, \b
.endr
.set .LwCSSF, 0x2
.set .LwCASF, 0x3
.set .LwCGR0, 0x8
.set .LwCGR1, 0x9
.set .LwCGR2, 0xa
.set .LwCGR3, 0xb
.macro wldrd, reg:req, base:req, offset:req
.inst 0xedd00100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2)
.endm
.macro wldrw, reg:req, base:req, offset:req
.inst 0xfd900100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2)
.endm
.macro wstrd, reg:req, base:req, offset:req
.inst 0xedc00100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2)
.endm
.macro wstrw, reg:req, base:req, offset:req
.inst 0xfd800100 | (.L\reg << 12) | (.L\base << 16) | (\offset >> 2)
.endm
#ifdef __clang__
#define wCon c1
.macro tmrc, dest:req, control:req
mrc p1, 0, \dest, \control, c0, 0
.endm
.macro tmcr, control:req, src:req
mcr p1, 0, \src, \control, c0, 0
.endm
#endif
#endif

View File

@@ -335,6 +335,11 @@ static inline void *phys_to_virt(phys_addr_t x)
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
#define page_to_virt(x) ({ \
__typeof__(x) __page = x; \
void *__addr = __va(page_to_phys(__page)); \
(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
})
#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
#else
#define page_to_virt(x) ({ \

View File

@@ -65,10 +65,7 @@ extern u64 idmap_ptrs_per_pgd;
static inline bool __cpu_uses_extended_idmap(void)
{
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
return false;
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
return unlikely(idmap_t0sz != TCR_T0SZ(vabits_actual));
}
/*

View File

@@ -334,7 +334,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
*/
adrp x5, __idmap_text_end
clz x5, x5
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
b.ge 1f // .. then skip VA range extension
adr_l x6, idmap_t0sz

View File

@@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
static inline u32 armv8pmu_read_evcntr(int idx)
static inline u64 armv8pmu_read_evcntr(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);

View File

@@ -152,7 +152,7 @@ SYM_FUNC_END(__hyp_do_panic)
.macro invalid_host_el1_vect
.align 7
mov x0, xzr /* restore_host = false */
mov x0, xzr /* host_ctxt = NULL */
mrs x1, spsr_el2
mrs x2, elr_el2
mrs x3, par_el1

View File

@@ -1312,8 +1312,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* Prevent userspace from creating a memory region outside of the IPA
* space addressable by the KVM guest IPA space.
*/
if (memslot->base_gfn + memslot->npages >=
(kvm_phys_size(kvm) >> PAGE_SHIFT))
if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
mmap_read_lock(current->mm);

View File

@@ -324,10 +324,9 @@ int kvm_set_ipa_limit(void)
}
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
WARN(kvm_ipa_limit < KVM_PHYS_SHIFT,
"KVM IPA Size Limit (%d bits) is smaller than default size\n",
kvm_ipa_limit);
kvm_info("IPA Size Limit: %d bits\n", kvm_ipa_limit);
kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
" (Reduced IPA size, limited VM/VMM compatibility)" : ""));
return 0;
}
@@ -356,6 +355,11 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
return -EINVAL;
} else {
phys_shift = KVM_PHYS_SHIFT;
if (phys_shift > kvm_ipa_limit) {
pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
current->comm);
return -EINVAL;
}
}
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);

View File

@@ -218,6 +218,18 @@ int pfn_valid(unsigned long pfn)
if (!valid_section(__pfn_to_section(pfn)))
return 0;
/*
* ZONE_DEVICE memory does not have the memblock entries.
* memblock_is_map_memory() check for ZONE_DEVICE based
* addresses will always fail. Even the normal hotplugged
* memory will never have MEMBLOCK_NOMAP flag set in their
* memblock entries. Skip memblock search for all non early
* memory sections covering all of hotplug memory including
* both normal and ZONE_DEVICE based.
*/
if (!early_section(__pfn_to_section(pfn)))
return pfn_section_valid(__pfn_to_section(pfn), pfn);
#endif
return memblock_is_map_memory(addr);
}

View File

@@ -40,7 +40,7 @@
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 __section(".mmuoff.data.write") vabits_actual;

View File

@@ -12,8 +12,8 @@ AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
poly1305-mips-y := poly1305-core.o poly1305-glue.o
perlasm-flavour-$(CONFIG_CPU_MIPS32) := o32
perlasm-flavour-$(CONFIG_CPU_MIPS64) := 64
perlasm-flavour-$(CONFIG_32BIT) := o32
perlasm-flavour-$(CONFIG_64BIT) := 64
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)

View File

@@ -73,7 +73,7 @@ void __patch_exception(int exc, unsigned long addr);
#endif
#define OP_RT_RA_MASK 0xffff0000UL
#define LIS_R2 0x3c020000UL
#define LIS_R2 0x3c400000UL
#define ADDIS_R2_R12 0x3c4c0000UL
#define ADDI_R2_R2 0x38420000UL

View File

@@ -59,6 +59,9 @@ struct machdep_calls {
int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
*bridge);
/* finds all the pci_controllers present at boot */
void (*discover_phbs)(void);
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);

View File

@@ -62,6 +62,9 @@ struct pt_regs
};
#endif
#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs))
#ifdef __powerpc64__
/*
@@ -190,7 +193,7 @@ extern int ptrace_put_reg(struct task_struct *task, int regno,
#define TRAP_FLAGS_MASK 0x11
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
#endif
#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
#define NV_REG_POISON 0xdeadbeefdeadbeefUL
@@ -205,7 +208,7 @@ extern int ptrace_put_reg(struct task_struct *task, int regno,
#define TRAP_FLAGS_MASK 0x1F
#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
#define SET_FULL_REGS(regs) ((regs)->trap &= ~1)
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)

View File

@@ -71,6 +71,16 @@ static inline void disable_kernel_vsx(void)
{
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
#else
static inline void enable_kernel_vsx(void)
{
BUILD_BUG();
}
static inline void disable_kernel_vsx(void)
{
BUILD_BUG();
}
#endif
#ifdef CONFIG_SPE

View File

@@ -307,7 +307,7 @@ int main(void)
/* Interrupt register frame */
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS);
STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
STACK_PT_REGS_OFFSET(GPR2, gpr[2]);

View File

@@ -470,7 +470,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
/* MSR[RI] is clear iff using SRR regs */
.if IHSRR == EXC_HV_OR_STD
.if IHSRR_IF_HVMODE
BEGIN_FTR_SECTION
xori r10,r10,MSR_RI
END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)

View File

@@ -461,10 +461,11 @@ InstructionTLBMiss:
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SPRG_PGDIR
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
#endif
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
@@ -523,9 +524,10 @@ DataLoadTLBMiss:
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2, SPRN_SPRG_PGDIR
li r1, _PAGE_PRESENT | _PAGE_ACCESSED
li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
li r1, _PAGE_PRESENT | _PAGE_ACCESSED
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r2) /* get pmd entry */
@@ -599,9 +601,10 @@ DataStoreTLBMiss:
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2, SPRN_SPRG_PGDIR
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r2) /* get pmd entry */

View File

@@ -1625,3 +1625,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
static int __init discover_phbs(void)
{
if (ppc_md.discover_phbs)
ppc_md.discover_phbs();
return 0;
}
core_initcall(discover_phbs);

View File

@@ -2170,7 +2170,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack,
* See if this is an exception frame.
* We look for the "regshere" marker in the current frame.
*/
if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
&& stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);

View File

@@ -509,8 +509,11 @@ out:
die("Unrecoverable nested System Reset", regs, SIGABRT);
#endif
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
if (!(regs->msr & MSR_RI)) {
/* For the reason explained in die_mce, nmi_exit before die */
nmi_exit();
die("Unrecoverable System Reset", regs, SIGABRT);
}
if (saved_hsrrs) {
mtspr(SPRN_HSRR0, hsrr0);

View File

@@ -211,7 +211,7 @@ static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
*addrp = mfspr(SPRN_SDAR);
if (is_kernel_addr(mfspr(SPRN_SDAR)) && perf_allow_kernel(&event->attr) != 0)
if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel)
*addrp = 0;
}
@@ -477,7 +477,7 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *
* addresses, hence include a check before filtering code
*/
if (!(ppmu->flags & PPMU_ARCH_31) &&
is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0)
is_kernel_addr(addr) && event->attr.exclude_kernel)
continue;
/* Branches are read most recent first (ie. mfbhrb 0 is
@@ -2112,7 +2112,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
left += period;
if (left <= 0)
left = period;
record = siar_valid(regs);
/*
* If address is not requested in the sample via
* PERF_SAMPLE_IP, just record that sample irrespective
* of SIAR valid check.
*/
if (event->attr.sample_type & PERF_SAMPLE_IP)
record = siar_valid(regs);
else
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
@@ -2130,9 +2140,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
* MMCR2. Check attr.exclude_kernel and address to drop the sample in
* these cases.
*/
if (event->attr.exclude_kernel && record)
if (is_kernel_addr(mfspr(SPRN_SIAR)))
record = 0;
if (event->attr.exclude_kernel &&
(event->attr.sample_type & PERF_SAMPLE_IP) &&
is_kernel_addr(mfspr(SPRN_SIAR)))
record = 0;
/*
* Finally record data if requested.

View File

@@ -4,6 +4,7 @@
* Copyright 2006-2007 Michael Ellerman, IBM Corp.
*/
#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/msi.h>
@@ -458,8 +459,28 @@ again:
return hwirq;
}
virq = irq_create_mapping_affinity(NULL, hwirq,
entry->affinity);
/*
* Depending on the number of online CPUs in the original
* kernel, it is likely for CPU #0 to be offline in a kdump
* kernel. The associated IRQs in the affinity mappings
* provided by irq_create_affinity_masks() are thus not
* started by irq_startup(), as per-design for managed IRQs.
* This can be a problem with multi-queue block devices driven
* by blk-mq : such a non-started IRQ is very likely paired
* with the single queue enforced by blk-mq during kdump (see
* blk_mq_alloc_tag_set()). This causes the device to remain
* silent and likely hangs the guest at some point.
*
* We don't really care for fine-grained affinity when doing
* kdump actually : simply ignore the pre-computed affinity
* masks in this case and let the default mask with all CPUs
* be used when creating the IRQ mappings.
*/
if (is_kdump_kernel())
virq = irq_create_mapping(NULL, hwirq);
else
virq = irq_create_mapping_affinity(NULL, hwirq,
entry->affinity);
if (!virq) {
pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);

View File

@@ -775,7 +775,7 @@ static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
{
struct sclp_core_entry *core;
cpumask_t avail;
static cpumask_t avail;
bool configured;
u16 core_id;
int nr, i;

View File

@@ -57,36 +57,40 @@ static inline int sparc_validate_prot(unsigned long prot, unsigned long addr)
{
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI))
return 0;
if (prot & PROT_ADI) {
if (!adi_capable())
return 0;
if (addr) {
struct vm_area_struct *vma;
vma = find_vma(current->mm, addr);
if (vma) {
/* ADI can not be enabled on PFN
* mapped pages
*/
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
return 0;
/* Mergeable pages can become unmergeable
* if ADI is enabled on them even if they
* have identical data on them. This can be
* because ADI enabled pages with identical
* data may still not have identical ADI
* tags on them. Disallow ADI on mergeable
* pages.
*/
if (vma->vm_flags & VM_MERGEABLE)
return 0;
}
}
}
return 1;
}
#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags)
/* arch_validate_flags() - Ensure combination of flags is valid for a
* VMA.
*/
static inline bool arch_validate_flags(unsigned long vm_flags)
{
/* If ADI is being enabled on this VMA, check for ADI
* capability on the platform and ensure VMA is suitable
* for ADI
*/
if (vm_flags & VM_SPARC_ADI) {
if (!adi_capable())
return false;
/* ADI can not be enabled on PFN mapped pages */
if (vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
return false;
/* Mergeable pages can become unmergeable
* if ADI is enabled on them even if they
* have identical data on them. This can be
* because ADI enabled pages with identical
* data may still not have identical ADI
* tags on them. Disallow ADI on mergeable
* pages.
*/
if (vm_flags & VM_MERGEABLE)
return false;
}
return true;
}
#endif /* CONFIG_SPARC64 */
#endif /* __ASSEMBLY__ */

View File

@@ -197,6 +197,9 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
size = memblock_phys_mem_size() - memblock_reserved_size();
*pages_avail = (size >> PAGE_SHIFT) - high_pages;
/* Only allow low memory to be allocated via memblock allocation */
memblock_set_current_limit(max_low_pfn << PAGE_SHIFT);
return max_pfn;
}

View File

@@ -128,7 +128,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
regs->ax = -EFAULT;
instrumentation_end();
syscall_exit_to_user_mode(regs);
local_irq_disable();
irqentry_exit_to_user_mode(regs);
return false;
}
@@ -213,40 +214,6 @@ SYSCALL_DEFINE0(ni_syscall)
return -ENOSYS;
}
noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
{
bool irq_state = lockdep_hardirqs_enabled();
__nmi_enter();
lockdep_hardirqs_off(CALLER_ADDR0);
lockdep_hardirq_enter();
rcu_nmi_enter();
instrumentation_begin();
trace_hardirqs_off_finish();
ftrace_nmi_enter();
instrumentation_end();
return irq_state;
}
noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore)
{
instrumentation_begin();
ftrace_nmi_exit();
if (restore) {
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
}
instrumentation_end();
rcu_nmi_exit();
lockdep_hardirq_exit();
if (restore)
lockdep_hardirqs_on(CALLER_ADDR0);
__nmi_exit();
}
#ifdef CONFIG_XEN_PV
#ifndef CONFIG_PREEMPTION
/*

View File

@@ -210,6 +210,8 @@ SYM_CODE_START(entry_SYSCALL_compat)
/* Switch to the kernel stack */
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL)
/* Construct struct pt_regs on stack */
pushq $__USER32_DS /* pt_regs->ss */
pushq %r8 /* pt_regs->sp */

View File

@@ -3565,8 +3565,10 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type &
~intel_pmu_large_pebs_flags(event)))
~intel_pmu_large_pebs_flags(event))) {
event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
event->attach_state |= PERF_ATTACH_SCHED_CB;
}
}
if (x86_pmu.pebs_aliases)
x86_pmu.pebs_aliases(event);
@@ -3579,6 +3581,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
ret = intel_pmu_setup_lbr_filter(event);
if (ret)
return ret;
event->attach_state |= PERF_ATTACH_SCHED_CB;
/*
* BTS is set up earlier in this path, so don't account twice

View File

@@ -11,9 +11,6 @@
#include <asm/irq_stack.h>
bool idtentry_enter_nmi(struct pt_regs *regs);
void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state);
/**
* DECLARE_IDTENTRY - Declare functions for simple IDT entry points
* No error code pushed by hardware

View File

@@ -23,6 +23,8 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
int insn_get_code_seg_params(struct pt_regs *regs);
int insn_fetch_from_user(struct pt_regs *regs,
unsigned char buf[MAX_INSN_SIZE]);
int insn_fetch_from_user_inatomic(struct pt_regs *regs,
unsigned char buf[MAX_INSN_SIZE]);
bool insn_decode(struct insn *insn, struct pt_regs *regs,
unsigned char buf[MAX_INSN_SIZE], int buf_size);

View File

@@ -25,6 +25,7 @@ void __end_SYSENTER_singlestep_region(void);
void entry_SYSENTER_compat(void);
void __end_entry_SYSENTER_compat(void);
void entry_SYSCALL_compat(void);
void entry_SYSCALL_compat_safe_stack(void);
void entry_INT80_compat(void);
#ifdef CONFIG_XEN_PV
void xen_entry_INT80_compat(void);

View File

@@ -94,6 +94,8 @@ struct pt_regs {
#include <asm/paravirt_types.h>
#endif
#include <asm/proto.h>
struct cpuinfo_x86;
struct task_struct;
@@ -175,6 +177,19 @@ static inline bool any_64bit_mode(struct pt_regs *regs)
#ifdef CONFIG_X86_64
#define current_user_stack_pointer() current_pt_regs()->sp
#define compat_user_stack_pointer() current_pt_regs()->sp
static inline bool ip_within_syscall_gap(struct pt_regs *regs)
{
bool ret = (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack);
#ifdef CONFIG_IA32_EMULATION
ret = ret || (regs->ip >= (unsigned long)entry_SYSCALL_compat &&
regs->ip < (unsigned long)entry_SYSCALL_compat_safe_stack);
#endif
return ret;
}
#endif
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)

View File

@@ -1986,7 +1986,7 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
{
bool irq_state;
irqentry_state_t irq_state;
WARN_ON_ONCE(user_mode(regs));
@@ -1998,7 +1998,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
mce_check_crashing_cpu())
return;
irq_state = idtentry_enter_nmi(regs);
irq_state = irqentry_nmi_enter(regs);
/*
* The call targets are marked noinstr, but objtool can't figure
* that out because it's an indirect call. Annotate it.
@@ -2009,7 +2009,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
if (regs->flags & X86_EFLAGS_IF)
trace_hardirqs_on_prepare();
instrumentation_end();
idtentry_exit_nmi(regs, irq_state);
irqentry_nmi_exit(regs, irq_state);
}
static __always_inline void exc_machine_check_user(struct pt_regs *regs)

View File

@@ -269,21 +269,20 @@ static void __init kvmclock_init_mem(void)
static int __init kvm_setup_vsyscall_timeinfo(void)
{
#ifdef CONFIG_X86_64
u8 flags;
if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
return 0;
flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
if (!(flags & PVCLOCK_TSC_STABLE_BIT))
return 0;
kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
#endif
kvmclock_init_mem();
#ifdef CONFIG_X86_64
if (per_cpu(hv_clock_per_cpu, 0) && kvmclock_vsyscall) {
u8 flags;
flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
if (!(flags & PVCLOCK_TSC_STABLE_BIT))
return 0;
kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
}
#endif
return 0;
}
early_initcall(kvm_setup_vsyscall_timeinfo);

View File

@@ -475,7 +475,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);
DEFINE_IDTENTRY_RAW(exc_nmi)
{
bool irq_state;
irqentry_state_t irq_state;
/*
* Re-enable NMIs right here when running as an SEV-ES guest. This might
@@ -502,14 +502,14 @@ nmi_restart:
this_cpu_write(nmi_dr7, local_db_save());
irq_state = idtentry_enter_nmi(regs);
irq_state = irqentry_nmi_enter(regs);
inc_irq_stat(__nmi_count);
if (!ignore_nmis)
default_do_nmi(regs);
idtentry_exit_nmi(regs, irq_state);
irqentry_nmi_exit(regs, irq_state);
local_db_restore(this_cpu_read(nmi_dr7));

View File

@@ -121,8 +121,18 @@ static void __init setup_vc_stacks(int cpu)
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
}
static __always_inline bool on_vc_stack(unsigned long sp)
static __always_inline bool on_vc_stack(struct pt_regs *regs)
{
unsigned long sp = regs->sp;
/* User-mode RSP is not trusted */
if (user_mode(regs))
return false;
/* SYSCALL gap still has user-mode RSP */
if (ip_within_syscall_gap(regs))
return false;
return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
}
@@ -144,7 +154,7 @@ void noinstr __sev_es_ist_enter(struct pt_regs *regs)
old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
/* Make room on the IST stack */
if (on_vc_stack(regs->sp))
if (on_vc_stack(regs))
new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
else
new_ist = old_ist - sizeof(old_ist);
@@ -248,7 +258,7 @@ static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
int res;
if (user_mode(ctxt->regs)) {
res = insn_fetch_from_user(ctxt->regs, buffer);
res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
if (!res) {
ctxt->fi.vector = X86_TRAP_PF;
ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
@@ -1248,13 +1258,12 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
{
struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
irqentry_state_t irq_state;
struct ghcb_state state;
struct es_em_ctxt ctxt;
enum es_result result;
struct ghcb *ghcb;
lockdep_assert_irqs_disabled();
/*
* Handle #DB before calling into !noinstr code to avoid recursive #DB.
*/
@@ -1263,6 +1272,8 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
return;
}
irq_state = irqentry_nmi_enter(regs);
lockdep_assert_irqs_disabled();
instrumentation_begin();
/*
@@ -1325,6 +1336,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
out:
instrumentation_end();
irqentry_nmi_exit(regs, irq_state);
return;

View File

@@ -406,7 +406,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
}
#endif
idtentry_enter_nmi(regs);
irqentry_nmi_enter(regs);
instrumentation_begin();
notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
@@ -652,12 +652,13 @@ DEFINE_IDTENTRY_RAW(exc_int3)
instrumentation_end();
irqentry_exit_to_user_mode(regs);
} else {
bool irq_state = idtentry_enter_nmi(regs);
irqentry_state_t irq_state = irqentry_nmi_enter(regs);
instrumentation_begin();
if (!do_int3(regs))
die("int3", regs, 0);
instrumentation_end();
idtentry_exit_nmi(regs, irq_state);
irqentry_nmi_exit(regs, irq_state);
}
}
@@ -686,8 +687,7 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r
* In the SYSCALL entry path the RSP value comes from user-space - don't
* trust it and switch to the current kernel stack
*/
if (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) {
if (ip_within_syscall_gap(regs)) {
sp = this_cpu_read(cpu_current_top_of_stack);
goto sync;
}
@@ -852,7 +852,7 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
* includes the entry stack is excluded for everything.
*/
unsigned long dr7 = local_db_save();
bool irq_state = idtentry_enter_nmi(regs);
irqentry_state_t irq_state = irqentry_nmi_enter(regs);
instrumentation_begin();
/*
@@ -909,7 +909,7 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
regs->flags &= ~X86_EFLAGS_TF;
out:
instrumentation_end();
idtentry_exit_nmi(regs, irq_state);
irqentry_nmi_exit(regs, irq_state);
local_db_restore(dr7);
}
@@ -927,7 +927,7 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
/*
* NB: We can't easily clear DR7 here because
* idtentry_exit_to_usermode() can invoke ptrace, schedule, access
* irqentry_exit_to_usermode() can invoke ptrace, schedule, access
* user memory, etc. This means that a recursive #DB is possible. If
* this happens, that #DB will hit exc_debug_kernel() and clear DR7.
* Since we're not on the IST stack right now, everything will be

View File

@@ -367,8 +367,8 @@ static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
return false;
*ip = regs->ip;
*sp = regs->sp;
*ip = READ_ONCE_NOCHECK(regs->ip);
*sp = READ_ONCE_NOCHECK(regs->sp);
return true;
}
@@ -380,8 +380,8 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
return false;
*ip = regs->ip;
*sp = regs->sp;
*ip = READ_ONCE_NOCHECK(regs->ip);
*sp = READ_ONCE_NOCHECK(regs->sp);
return true;
}
@@ -402,12 +402,12 @@ static bool get_reg(struct unwind_state *state, unsigned int reg_off,
return false;
if (state->full_regs) {
*val = ((unsigned long *)state->regs)[reg];
*val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
return true;
}
if (state->prev_regs) {
*val = ((unsigned long *)state->prev_regs)[reg];
*val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
return true;
}

View File

@@ -1641,7 +1641,16 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
}
if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
kvm_wait_lapic_expire(vcpu);
/*
* Ensure the guest's timer has truly expired before posting an
* interrupt. Open code the relevant checks to avoid querying
* lapic_timer_int_injected(), which will be false since the
* interrupt isn't yet injected. Waiting until after injecting
* is not an option since that won't help a posted interrupt.
*/
if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
vcpu->arch.apic->lapic_timer.timer_advance_ns)
__kvm_wait_lapic_expire(vcpu);
kvm_apic_inject_pending_timer_irqs(apic);
return;
}

View File

@@ -1415,6 +1415,25 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
}
}
static unsigned long insn_get_effective_ip(struct pt_regs *regs)
{
unsigned long seg_base = 0;
/*
* If not in user-space long mode, a custom code segment could be in
* use. This is true in protected mode (if the process defined a local
* descriptor table), or virtual-8086 mode. In most of the cases
* seg_base will be zero as in USER_CS.
*/
if (!user_64bit_mode(regs)) {
seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
if (seg_base == -1L)
return 0;
}
return seg_base + regs->ip;
}
/**
* insn_fetch_from_user() - Copy instruction bytes from user-space memory
* @regs: Structure with register values as seen when entering kernel mode
@@ -1431,24 +1450,43 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
*/
int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
{
unsigned long seg_base = 0;
unsigned long ip;
int not_copied;
/*
* If not in user-space long mode, a custom code segment could be in
* use. This is true in protected mode (if the process defined a local
* descriptor table), or virtual-8086 mode. In most of the cases
* seg_base will be zero as in USER_CS.
*/
if (!user_64bit_mode(regs)) {
seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
if (seg_base == -1L)
return 0;
}
ip = insn_get_effective_ip(regs);
if (!ip)
return 0;
not_copied = copy_from_user(buf, (void __user *)ip, MAX_INSN_SIZE);
not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip),
MAX_INSN_SIZE);
return MAX_INSN_SIZE - not_copied;
}
/**
* insn_fetch_from_user_inatomic() - Copy instruction bytes from user-space memory
* while in atomic code
* @regs: Structure with register values as seen when entering kernel mode
* @buf: Array to store the fetched instruction
*
* Gets the linear address of the instruction and copies the instruction bytes
* to the buf. This function must be used in atomic context.
*
* Returns:
*
* Number of instruction bytes copied.
*
* 0 if nothing was copied.
*/
int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
{
unsigned long ip;
int not_copied;
ip = insn_get_effective_ip(regs);
if (!ip)
return 0;
not_copied = __copy_from_user_inatomic(buf, (void __user *)ip, MAX_INSN_SIZE);
return MAX_INSN_SIZE - not_copied;
}

View File

@@ -318,6 +318,22 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
return 0;
}
static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
const struct blk_zone_range *zrange)
{
loff_t start, end;
if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
/* Out of range */
return -EINVAL;
start = zrange->sector << SECTOR_SHIFT;
end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
return truncate_bdev_range(bdev, mode, start, end);
}
/*
* BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
* Called from blkdev_ioctl.
@@ -329,6 +345,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
struct request_queue *q;
struct blk_zone_range zrange;
enum req_opf op;
int ret;
if (!argp)
return -EINVAL;
@@ -352,6 +369,11 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
switch (cmd) {
case BLKRESETZONE:
op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
return ret;
break;
case BLKOPENZONE:
op = REQ_OP_ZONE_OPEN;
@@ -366,8 +388,20 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
return -ENOTTY;
}
return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
GFP_KERNEL);
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
GFP_KERNEL);
/*
* Invalidate the page cache again for zone reset: writes can only be
* direct for zoned devices so concurrent writes would not add any page
* to the page cache after/during reset. The page cache may be filled
* again due to concurrent reads though and dropping the pages for
* these is fine.
*/
if (!ret && cmd == BLKRESETZONE)
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
return ret;
}
static inline unsigned long *blk_alloc_zone_bitmap(int node,

View File

@@ -772,7 +772,7 @@ config CRYPTO_POLY1305_X86_64
config CRYPTO_POLY1305_MIPS
tristate "Poly1305 authenticator algorithm (MIPS optimized)"
depends on CPU_MIPS32 || (CPU_MIPS64 && 64BIT)
depends on MIPS
select CRYPTO_ARCH_HAVE_LIB_POLY1305
config CRYPTO_MD4

View File

@@ -290,20 +290,20 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
}
/*
* phys_device is a bad name for this. What I really want
* is a way to differentiate between memory ranges that
* are part of physical devices that constitute
* a complete removable unit or fru.
* i.e. do these ranges belong to the same physical device,
* s.t. if I offline all of these sections I can then
* remove the physical device?
* Legacy interface that we cannot remove: s390x exposes the storage increment
* covered by a memory block, allowing for identifying which memory blocks
* comprise a storage increment. Since a memory block spans complete
* storage increments nowadays, this interface is basically unused. Other
* archs never exposed != 0.
*/
static ssize_t phys_device_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
return sysfs_emit(buf, "%d\n", mem->phys_device);
return sysfs_emit(buf, "%d\n",
arch_get_memory_phys_device(start_pfn));
}
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -488,11 +488,7 @@ static DEVICE_ATTR_WO(soft_offline_page);
static DEVICE_ATTR_WO(hard_offline_page);
#endif
/*
* Note that phys_device is optional. It is here to allow for
* differentiation between which *physical* devices each
* section belongs to...
*/
/* See phys_device_show(). */
int __weak arch_get_memory_phys_device(unsigned long start_pfn)
{
return 0;
@@ -574,7 +570,6 @@ int register_memory(struct memory_block *memory)
static int init_memory_block(unsigned long block_id, unsigned long state)
{
struct memory_block *mem;
unsigned long start_pfn;
int ret = 0;
mem = find_memory_block_by_id(block_id);
@@ -588,8 +583,6 @@ static int init_memory_block(unsigned long block_id, unsigned long state)
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
mem->phys_device = arch_get_memory_phys_device(start_pfn);
mem->nid = NUMA_NO_NODE;
ret = register_memory(mem);

View File

@@ -799,6 +799,9 @@ int software_node_register(const struct software_node *node)
if (software_node_to_swnode(node))
return -EEXIST;
if (node->parent && !parent)
return -EINVAL;
return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
}
EXPORT_SYMBOL_GPL(software_node_register);

View File

@@ -2,3 +2,4 @@
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
obj-$(CONFIG_KUNIT_DRIVER_PE_TEST) += property-entry-test.o
CFLAGS_REMOVE_property-entry-test.o += -fplugin-arg-structleak_plugin-byref -fplugin-arg-structleak_plugin-byref-all

View File

@@ -871,6 +871,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
if (!card->event_wq) {
dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
st = -ENOMEM;
goto failed_event_handler;
}

View File

@@ -637,7 +637,7 @@ static ssize_t writeback_store(struct device *dev,
struct bio_vec bio_vec;
struct page *page;
ssize_t ret = len;
int mode;
int mode, err;
unsigned long blk_idx = 0;
if (sysfs_streq(buf, "idle"))
@@ -738,12 +738,17 @@ static ssize_t writeback_store(struct device *dev,
* XXX: A single page IO would be inefficient for write
* but it would be not bad as starter.
*/
ret = submit_bio_wait(&bio);
if (ret) {
err = submit_bio_wait(&bio);
if (err) {
zram_slot_lock(zram, index);
zram_clear_flag(zram, index, ZRAM_UNDER_WB);
zram_clear_flag(zram, index, ZRAM_IDLE);
zram_slot_unlock(zram, index);
/*
* Return last IO error unless every IO were
* not suceeded.
*/
ret = err;
continue;
}

View File

@@ -183,7 +183,10 @@ static inline int gdsc_assert_reset(struct gdsc *sc)
static inline void gdsc_force_mem_on(struct gdsc *sc)
{
int i;
u32 mask = RETAIN_MEM | RETAIN_PERIPH;
u32 mask = RETAIN_MEM;
if (!(sc->flags & NO_RET_PERIPH))
mask |= RETAIN_PERIPH;
for (i = 0; i < sc->cxc_count; i++)
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
@@ -192,7 +195,10 @@ static inline void gdsc_force_mem_on(struct gdsc *sc)
static inline void gdsc_clear_mem_on(struct gdsc *sc)
{
int i;
u32 mask = RETAIN_MEM | RETAIN_PERIPH;
u32 mask = RETAIN_MEM;
if (!(sc->flags & NO_RET_PERIPH))
mask |= RETAIN_PERIPH;
for (i = 0; i < sc->cxc_count; i++)
regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);

View File

@@ -42,7 +42,7 @@ struct gdsc {
#define PWRSTS_ON BIT(2)
#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
const u8 flags;
const u16 flags;
#define VOTABLE BIT(0)
#define CLAMP_IO BIT(1)
#define HW_CTRL BIT(2)
@@ -51,6 +51,7 @@ struct gdsc {
#define POLL_CFG_GDSCR BIT(5)
#define ALWAYS_ON BIT(6)
#define RETAIN_FF_ENABLE BIT(7)
#define NO_RET_PERIPH BIT(8)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;

View File

@@ -253,12 +253,16 @@ static struct gdsc gpu_cx_gdsc = {
static struct gdsc gpu_gx_gdsc = {
.gdscr = 0x1094,
.clamp_io_ctrl = 0x130,
.resets = (unsigned int []){ GPU_GX_BCR },
.reset_count = 1,
.cxcs = (unsigned int []){ 0x1098 },
.cxc_count = 1,
.pd = {
.name = "gpu_gx",
},
.parent = &gpu_cx_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
.flags = CLAMP_IO | AON_RESET,
.pwrsts = PWRSTS_OFF_ON | PWRSTS_RET,
.flags = CLAMP_IO | SW_RESET | AON_RESET | NO_RET_PERIPH,
};
static struct clk_regmap *gpucc_msm8998_clocks[] = {

View File

@@ -317,9 +317,9 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
}
base = ioremap(res->start, resource_size(res));
if (IS_ERR(base)) {
if (!base) {
dev_err(dev, "failed to map resource %pR\n", res);
ret = PTR_ERR(base);
ret = -ENOMEM;
goto release_region;
}
@@ -368,7 +368,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
error:
kfree(data);
unmap_base:
iounmap(data->base);
iounmap(base);
release_region:
release_mem_region(res->start, resource_size(res));
return ret;

View File

@@ -96,6 +96,18 @@ static void install_memreserve_table(void)
efi_err("Failed to install memreserve config table!\n");
}
static u32 get_supported_rt_services(void)
{
const efi_rt_properties_table_t *rt_prop_table;
u32 supported = EFI_RT_SUPPORTED_ALL;
rt_prop_table = get_efi_config_table(EFI_RT_PROPERTIES_TABLE_GUID);
if (rt_prop_table)
supported &= rt_prop_table->runtime_services_supported;
return supported;
}
/*
* EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
* that is described in the PE/COFF header. Most of the code is the same
@@ -250,6 +262,10 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
(prop_tbl->memory_protection_attribute &
EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
/* force efi_novamap if SetVirtualAddressMap() is unsupported */
efi_novamap |= !(get_supported_rt_services() &
EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP);
/* hibernation expects the runtime regions to stay in the same place */
if (!IS_ENABLED(CONFIG_HIBERNATION) && !efi_nokaslr && !flat_va_mapping) {
/*

View File

@@ -112,8 +112,29 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
#ifdef CONFIG_GPIO_PCA953X_IRQ
#include <linux/dmi.h>
#include <linux/gpio.h>
#include <linux/list.h>
static const struct acpi_gpio_params pca953x_irq_gpios = { 0, 0, true };
static const struct acpi_gpio_mapping pca953x_acpi_irq_gpios[] = {
{ "irq-gpios", &pca953x_irq_gpios, 1, ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER },
{ }
};
static int pca953x_acpi_get_irq(struct device *dev)
{
int ret;
ret = devm_acpi_dev_add_driver_gpios(dev, pca953x_acpi_irq_gpios);
if (ret)
dev_warn(dev, "can't add GPIO ACPI mapping\n");
ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0);
if (ret < 0)
return ret;
dev_info(dev, "ACPI interrupt quirk (IRQ %d)\n", ret);
return ret;
}
static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
{
@@ -132,59 +153,6 @@ static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
},
{}
};
#ifdef CONFIG_ACPI
static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data)
{
struct acpi_resource_gpio *agpio;
int *pin = data;
if (acpi_gpio_get_irq_resource(ares, &agpio))
*pin = agpio->pin_table[0];
return 1;
}
static int pca953x_acpi_find_pin(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
int pin = -ENOENT, ret;
LIST_HEAD(r);
ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin);
acpi_dev_free_resource_list(&r);
if (ret < 0)
return ret;
return pin;
}
#else
static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; }
#endif
static int pca953x_acpi_get_irq(struct device *dev)
{
int pin, ret;
pin = pca953x_acpi_find_pin(dev);
if (pin < 0)
return pin;
dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin);
if (!gpio_is_valid(pin))
return -EINVAL;
ret = gpio_request(pin, "pca953x interrupt");
if (ret)
return ret;
ret = gpio_to_irq(pin);
/* When pin is used as an IRQ, no need to keep it requested */
gpio_free(pin);
return ret;
}
#endif
static const struct acpi_device_id pca953x_acpi_ids[] = {

View File

@@ -649,6 +649,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
if (!lookup->desc) {
const struct acpi_resource_gpio *agpio = &ares->data.gpio;
bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
struct gpio_desc *desc;
int pin_index;
if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
@@ -661,8 +662,12 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
if (pin_index >= agpio->pin_table_length)
return 1;
lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
if (lookup->info.quirks & ACPI_GPIO_QUIRK_ABSOLUTE_NUMBER)
desc = gpio_to_desc(agpio->pin_table[pin_index]);
else
desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
agpio->pin_table[pin_index]);
lookup->desc = desc;
lookup->info.pin_config = agpio->pin_config;
lookup->info.gpioint = gpioint;
@@ -911,8 +916,9 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
}
/**
* acpi_dev_gpio_irq_get() - Find GpioInt and translate it to Linux IRQ number
* acpi_dev_gpio_irq_get_by() - Find GpioInt and translate it to Linux IRQ number
* @adev: pointer to a ACPI device to get IRQ from
* @name: optional name of GpioInt resource
* @index: index of GpioInt resource (starting from %0)
*
* If the device has one or more GpioInt resources, this function can be
@@ -922,9 +928,12 @@ struct gpio_desc *acpi_node_get_gpiod(struct fwnode_handle *fwnode,
* The function is idempotent, though each time it runs it will configure GPIO
* pin direction according to the flags in GpioInt resource.
*
* The function takes optional @name parameter. If the resource has a property
* name, then only those will be taken into account.
*
* Return: Linux IRQ number (> %0) on success, negative errno on failure.
*/
int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, int index)
{
int idx, i;
unsigned int irq_flags;
@@ -934,7 +943,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
struct acpi_gpio_info info;
struct gpio_desc *desc;
desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
desc = acpi_get_gpiod_by_index(adev, name, i, &info);
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER)
@@ -971,7 +980,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
return -ENOENT;
}
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get_by);
static acpi_status
acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,

View File

@@ -474,8 +474,12 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
static void gpiodevice_release(struct device *dev)
{
struct gpio_device *gdev = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
list_del(&gdev->list);
spin_unlock_irqrestore(&gpio_lock, flags);
ida_free(&gpio_ida, gdev->id);
kfree_const(gdev->label);
kfree(gdev->descs);

View File

@@ -178,6 +178,7 @@ extern uint amdgpu_smu_memory_pool_size;
extern uint amdgpu_dc_feature_mask;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dm_abm_level;
extern int amdgpu_backlight;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;

View File

@@ -768,6 +768,10 @@ uint amdgpu_dm_abm_level = 0;
MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
int amdgpu_backlight = -1;
MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
module_param_named(backlight, amdgpu_backlight, bint, 0444);
/**
* DOC: tmz (int)
* Trusted Memory Zone (TMZ) is a method to protect data being written

View File

@@ -2140,6 +2140,11 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->ext_caps->bits.hdr_aux_backlight_control == 1)
caps->aux_support = true;
if (amdgpu_backlight == 0)
caps->aux_support = false;
else if (amdgpu_backlight == 1)
caps->aux_support = true;
/* From the specification (CTA-861-G), for calculating the maximum
* luminance we need to use:
* Luminance = 50*2**(CV/32)
@@ -3038,19 +3043,6 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
#endif
}
static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
{
bool rc;
if (!link)
return 1;
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
return rc ? 0 : 1;
}
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
unsigned *min, unsigned *max)
{
@@ -3113,9 +3105,10 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
brightness = convert_brightness_from_user(&caps, bd->props.brightness);
// Change brightness based on AUX property
if (caps.aux_support)
return set_backlight_via_aux(link, brightness);
rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
else
rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
return rc ? 0 : 1;
}
@@ -3123,11 +3116,27 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
int ret = dc_link_get_backlight_level(dm->backlight_link);
struct amdgpu_dm_backlight_caps caps;
if (ret == DC_ERROR_UNEXPECTED)
return bd->props.brightness;
return convert_brightness_to_user(&dm->backlight_caps, ret);
amdgpu_dm_update_backlight_caps(dm);
caps = dm->backlight_caps;
if (caps.aux_support) {
struct dc_link *link = (struct dc_link *)dm->backlight_link;
u32 avg, peak;
bool rc;
rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
if (!rc)
return bd->props.brightness;
return convert_brightness_to_user(&caps, avg);
} else {
int ret = dc_link_get_backlight_level(dm->backlight_link);
if (ret == DC_ERROR_UNEXPECTED)
return bd->props.brightness;
return convert_brightness_to_user(&caps, ret);
}
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {

View File

@@ -2555,7 +2555,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
if (pipe_ctx->plane_state == NULL)
frame_ramp = 0;
} else {
ASSERT(false);
return false;
}

View File

@@ -1058,8 +1058,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
{
int i;
DC_FP_START();
if (dc->bb_overrides.sr_exit_time_ns) {
for (i = 0; i < WM_SET_COUNT; i++) {
dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
@@ -1084,8 +1082,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
}
DC_FP_END();
}
void dcn21_calculate_wm(
@@ -1183,7 +1179,7 @@ static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
BW_VAL_TRACE_COUNT();

View File

@@ -1506,6 +1506,48 @@ static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
return 0;
}
static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
uint32_t pcie_gen = 0, pcie_width = 0;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
int i;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
pcie_gen = 2;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
pcie_gen = 1;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
pcie_gen = 0;
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
pcie_width = 6;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
pcie_width = 5;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
pcie_width = 4;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
pcie_width = 3;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
pcie_width = 2;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
pcie_width = 1;
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (pp_table->PcieGenSpeed[i] > pcie_gen)
pp_table->PcieGenSpeed[i] = pcie_gen;
if (pp_table->PcieLaneCount[i] > pcie_width)
pp_table->PcieLaneCount[i] = pcie_width;
}
return 0;
}
static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
{
int result = -1;
@@ -2557,6 +2599,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to initialize Link Level!",
return result);
result = vega10_override_pcie_parameters(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to override pcie parameters!",
return result);
result = vega10_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize Graphics Level!",
@@ -2923,6 +2970,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
return 0;
}
static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
{
struct vega10_hwmgr *data = hwmgr->backend;

View File

@@ -481,6 +481,67 @@ static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
dpm_state->hard_max_level = 0xffff;
}
static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
int i;
int ret;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
pcie_gen = 2;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
pcie_gen = 1;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
pcie_gen = 0;
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
pcie_width = 6;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
pcie_width = 5;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
pcie_width = 4;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
pcie_width = 3;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
pcie_width = 2;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
pcie_width = 1;
/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
* Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
*/
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
pp_table->PcieGenSpeed[i];
pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
pp_table->PcieLaneCount[i];
if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
pp_table->PcieLaneCount[i]) {
smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
}
/* update the pptable */
pp_table->PcieGenSpeed[i] = pcie_gen_arg;
pp_table->PcieLaneCount[i] = pcie_width_arg;
}
return 0;
}
static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
PPCLK_e clk_id, uint32_t *num_of_levels)
{
@@ -969,6 +1030,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to enable all smu features!",
return result);
result = vega12_override_pcie_parameters(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"[EnableDPMTasks] Failed to override pcie parameters!",
return result);
tmp_result = vega12_power_control_set_level(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to power control set level!",

View File

@@ -832,7 +832,9 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
int i;
int ret;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
@@ -861,17 +863,27 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
* Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
*/
smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
pp_table->PcieGenSpeed[i];
pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
pp_table->PcieLaneCount[i];
data->pcie_parameters_override = true;
data->pcie_gen_level1 = pcie_gen;
data->pcie_width_level1 = pcie_width;
if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
pp_table->PcieLaneCount[i]) {
smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
}
/* update the pptable */
pp_table->PcieGenSpeed[i] = pcie_gen_arg;
pp_table->PcieLaneCount[i] = pcie_width_arg;
}
return 0;
}
@@ -3320,9 +3332,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
data->od8_settings.od8_settings_array;
OverDriveTable_t *od_table =
&(data->smc_state_table.overdrive_table);
struct phm_ppt_v3_information *pptable_information =
(struct phm_ppt_v3_information *)hwmgr->pptable;
PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
PPTable_t *pptable = &(data->smc_state_table.pp_table);
struct pp_clock_levels_with_latency clocks;
struct vega20_single_dpm_table *fclk_dpm_table =
&(data->dpm_table.fclk_table);
@@ -3421,13 +3431,9 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
current_lane_width =
vega20_get_current_pcie_link_width_level(hwmgr);
for (i = 0; i < NUM_LINK_LEVELS; i++) {
if (i == 1 && data->pcie_parameters_override) {
gen_speed = data->pcie_gen_level1;
lane_width = data->pcie_width_level1;
} else {
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
}
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :

View File

@@ -340,13 +340,14 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
if (--shmem->vmap_use_count > 0)
return;
if (obj->import_attach)
if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
else
} else {
vunmap(shmem->vaddr);
drm_gem_shmem_put_pages(shmem);
}
shmem->vaddr = NULL;
drm_gem_shmem_put_pages(shmem);
}
/*
@@ -534,14 +535,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
vm_fault_t ret;
struct page *page;
pgoff_t page_offset;
if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
return VM_FAULT_SIGBUS;
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
page = shmem->pages[vmf->pgoff];
mutex_lock(&shmem->pages_lock);
return vmf_insert_page(vma, vmf->address, page);
if (page_offset >= num_pages ||
WARN_ON_ONCE(!shmem->pages) ||
shmem->madv < 0) {
ret = VM_FAULT_SIGBUS;
} else {
page = shmem->pages[page_offset];
ret = vmf_insert_page(vma, vmf->address, page);
}
mutex_unlock(&shmem->pages_lock);
return ret;
}
static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
@@ -590,9 +605,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct drm_gem_shmem_object *shmem;
int ret;
/* Remove the fake offset */
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
if (obj->import_attach) {
/* Drop the reference drm_gem_mmap_obj() acquired.*/
drm_gem_object_put(obj);

View File

@@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
return -EFAULT;
memset(&v, 0, sizeof(v));
v = (struct drm_version) {
.name_len = v32.name_len,
.name = compat_ptr(v32.name),
@@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
memset(&uq, 0, sizeof(uq));
uq = (struct drm_unique){
.unique_len = uq32.unique_len,
.unique = compat_ptr(uq32.unique),
@@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
if (copy_from_user(&c32, argp, sizeof(c32)))
return -EFAULT;
memset(&client, 0, sizeof(client));
client.idx = c32.idx;
err = drm_ioctl_kernel(file, drm_getclient, &client, 0);
@@ -852,6 +859,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
memset(&req, 0, sizeof(req));
req.request.type = req32.request.type;
req.request.sequence = req32.request.sequence;
req.request.signal = req32.request.signal;
@@ -889,6 +898,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
struct drm_mode_fb_cmd2 req64;
int err;
memset(&req64, 0, sizeof(req64));
if (copy_from_user(&req64, argp,
offsetof(drm_mode_fb_cmd232_t, modifier)))
return -EFAULT;

View File

@@ -708,9 +708,12 @@ static int engine_setup_common(struct intel_engine_cs *engine)
goto err_status;
}
err = intel_engine_init_cmd_parser(engine);
if (err)
goto err_cmd_parser;
intel_engine_init_active(engine, ENGINE_PHYSICAL);
intel_engine_init_execlists(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
@@ -724,6 +727,8 @@ static int engine_setup_common(struct intel_engine_cs *engine)
return 0;
err_cmd_parser:
intel_breadcrumbs_free(engine->breadcrumbs);
err_status:
cleanup_status_page(engine);
return err;

View File

@@ -939,7 +939,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
* struct intel_engine_cs based on whether the platform requires software
* command parsing.
*/
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
{
const struct drm_i915_cmd_table *cmd_tables;
int cmd_table_count;
@@ -947,7 +947,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
engine->class == COPY_ENGINE_CLASS))
return;
return 0;
switch (engine->class) {
case RENDER_CLASS:
@@ -1012,19 +1012,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
break;
default:
MISSING_CASE(engine->class);
return;
goto out;
}
if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
drm_err(&engine->i915->drm,
"%s: command descriptions are not sorted\n",
engine->name);
return;
goto out;
}
if (!validate_regs_sorted(engine)) {
drm_err(&engine->i915->drm,
"%s: registers are not sorted\n", engine->name);
return;
goto out;
}
ret = init_hash_table(engine, cmd_tables, cmd_table_count);
@@ -1032,10 +1032,17 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
drm_err(&engine->i915->drm,
"%s: initialised failed!\n", engine->name);
fini_hash_table(engine);
return;
goto out;
}
engine->flags |= I915_ENGINE_USING_CMD_PARSER;
out:
if (intel_engine_requires_cmd_parser(engine) &&
!intel_engine_using_cmd_parser(engine))
return -EINVAL;
return 0;
}
/**

View File

@@ -1946,7 +1946,7 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
/* i915_cmd_parser.c */
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct i915_vma *batch,

View File

@@ -482,6 +482,16 @@ static int meson_probe_remote(struct platform_device *pdev,
return count;
}
static void meson_drv_shutdown(struct platform_device *pdev)
{
struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
struct drm_device *drm = priv->drm;
DRM_DEBUG_DRIVER("\n");
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
}
static int meson_drv_probe(struct platform_device *pdev)
{
struct component_match *match = NULL;
@@ -553,6 +563,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = {
static struct platform_driver meson_drm_platform_driver = {
.probe = meson_drv_probe,
.shutdown = meson_drv_shutdown,
.driver = {
.name = "meson-drm",
.of_match_table = dt_match,

View File

@@ -327,6 +327,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
head.id = i;
head.flags = 0;
head.surface_id = 0;
oldcount = qdev->monitors_config->count;
if (crtc->state->active) {
struct drm_display_mode *mode = &crtc->mode;

View File

@@ -83,6 +83,7 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
struct gm12u320_device {
struct drm_device dev;
struct device *dmadev;
struct drm_simple_display_pipe pipe;
struct drm_connector conn;
struct usb_device *udev;
@@ -598,6 +599,22 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
/*
* FIXME: Dma-buf sharing requires DMA support by the importing device.
* This function is a workaround to make USB devices work as well.
* See todo.rst for how to fix the issue in the dma-buf framework.
*/
static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct gm12u320_device *gm12u320 = to_gm12u320(dev);
if (!gm12u320->dmadev)
return ERR_PTR(-ENODEV);
return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev);
}
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static struct drm_driver gm12u320_drm_driver = {
@@ -611,6 +628,7 @@ static struct drm_driver gm12u320_drm_driver = {
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = gm12u320_gem_prime_import,
};
static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = {
@@ -637,16 +655,19 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
struct gm12u320_device, dev);
if (IS_ERR(gm12u320))
return PTR_ERR(gm12u320);
dev = &gm12u320->dev;
gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
if (!gm12u320->dmadev)
drm_warn(dev, "buffer sharing not supported"); /* not an error */
gm12u320->udev = interface_to_usbdev(interface);
INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
dev = &gm12u320->dev;
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
goto err_put_device;
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
@@ -656,15 +677,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
return ret;
goto err_put_device;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
return ret;
goto err_put_device;
ret = gm12u320_conn_init(gm12u320);
if (ret)
return ret;
goto err_put_device;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
@@ -674,24 +695,31 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
return ret;
goto err_put_device;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
return ret;
goto err_put_device;
drm_fbdev_generic_setup(dev, 0);
return 0;
err_put_device:
put_device(gm12u320->dmadev);
return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
struct gm12u320_device *gm12u320 = to_gm12u320(dev);
put_device(gm12u320->dmadev);
gm12u320->dmadev = NULL;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
}

View File

@@ -32,6 +32,22 @@ static int udl_usb_resume(struct usb_interface *interface)
return drm_mode_config_helper_resume(dev);
}
/*
* FIXME: Dma-buf sharing requires DMA support by the importing device.
* This function is a workaround to make USB devices work as well.
* See todo.rst for how to fix the issue in the dma-buf framework.
*/
static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct udl_device *udl = to_udl(dev);
if (!udl->dmadev)
return ERR_PTR(-ENODEV);
return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev);
}
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
static struct drm_driver driver = {
@@ -42,6 +58,7 @@ static struct drm_driver driver = {
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = udl_driver_gem_prime_import,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,

View File

@@ -50,6 +50,7 @@ struct urb_list {
struct udl_device {
struct drm_device drm;
struct device *dev;
struct device *dmadev;
struct usb_device *udev;
struct drm_simple_display_pipe display_pipe;

View File

@@ -314,6 +314,10 @@ int udl_init(struct udl_device *udl)
DRM_DEBUG("\n");
udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
if (!udl->dmadev)
drm_warn(dev, "buffer sharing not supported"); /* not an error */
mutex_init(&udl->gem_lock);
if (!udl_parse_vendor_descriptor(dev, udl->udev)) {
@@ -342,12 +346,18 @@ int udl_init(struct udl_device *udl)
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
put_device(udl->dmadev);
DRM_ERROR("%d\n", ret);
return ret;
}
int udl_drop_usb(struct drm_device *dev)
{
struct udl_device *udl = to_udl(dev);
udl_free_urb_list(dev);
put_device(udl->dmadev);
udl->dmadev = NULL;
return 0;
}

View File

@@ -995,7 +995,12 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
workitem.reports_supported |= STD_KEYBOARD;
break;
case 0x0d:
device_type = "eQUAD Lightspeed 1_1";
device_type = "eQUAD Lightspeed 1.1";
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
workitem.reports_supported |= STD_KEYBOARD;
break;
case 0x0f:
device_type = "eQUAD Lightspeed 1.2";
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
workitem.reports_supported |= STD_KEYBOARD;
break;

View File

@@ -91,7 +91,6 @@
#define RCAR_BUS_PHASE_START (MDBS | MIE | ESG)
#define RCAR_BUS_PHASE_DATA (MDBS | MIE)
#define RCAR_BUS_MASK_DATA (~(ESG | FSB) & 0xFF)
#define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB)
#define RCAR_IRQ_SEND (MNR | MAL | MST | MAT | MDE)
@@ -120,6 +119,7 @@ enum rcar_i2c_type {
};
struct rcar_i2c_priv {
u32 flags;
void __iomem *io;
struct i2c_adapter adap;
struct i2c_msg *msg;
@@ -130,7 +130,6 @@ struct rcar_i2c_priv {
int pos;
u32 icccr;
u32 flags;
u8 recovery_icmcr; /* protected by adapter lock */
enum rcar_i2c_type devtype;
struct i2c_client *slave;
@@ -621,7 +620,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
/*
* This driver has a lock-free design because there are IP cores (at least
* R-Car Gen2) which have an inherent race condition in their hardware design.
* There, we need to clear RCAR_BUS_MASK_DATA bits as soon as possible after
* There, we need to switch to RCAR_BUS_PHASE_DATA as soon as possible after
* the interrupt was generated, otherwise an unwanted repeated message gets
* generated. It turned out that taking a spinlock at the beginning of the ISR
* was already causing repeated messages. Thus, this driver was converted to
@@ -630,13 +629,11 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
{
struct rcar_i2c_priv *priv = ptr;
u32 msr, val;
u32 msr;
/* Clear START or STOP immediately, except for REPSTART after read */
if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) {
val = rcar_i2c_read(priv, ICMCR);
rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
}
if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
msr = rcar_i2c_read(priv, ICMSR);

View File

@@ -220,10 +220,10 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
cur_base += ret * PAGE_SIZE;
npages -= ret;
sg = __sg_alloc_table_from_pages(
&umem->sg_head, page_list, ret, 0, ret << PAGE_SHIFT,
dma_get_max_seg_size(device->dma_device), sg, npages,
GFP_KERNEL);
sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret,
0, ret << PAGE_SHIFT,
ib_dma_max_seg_size(device), sg, npages,
GFP_KERNEL);
umem->sg_nents = umem->sg_head.nents;
if (IS_ERR(sg)) {
unpin_user_pages_dirty_lock(page_list, ret, 0);

View File

@@ -48,6 +48,7 @@
#include <linux/efi.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/ktime.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/spinlock.h>
@@ -400,7 +401,7 @@ struct applespi_data {
unsigned int cmd_msg_cntr;
/* lock to protect the above parameters and flags below */
spinlock_t cmd_msg_lock;
bool cmd_msg_queued;
ktime_t cmd_msg_queued;
enum applespi_evt_type cmd_evt_type;
struct led_classdev backlight_info;
@@ -716,7 +717,7 @@ static void applespi_msg_complete(struct applespi_data *applespi,
wake_up_all(&applespi->drain_complete);
if (is_write_msg) {
applespi->cmd_msg_queued = false;
applespi->cmd_msg_queued = 0;
applespi_send_cmd_msg(applespi);
}
@@ -758,8 +759,16 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi)
return 0;
/* check whether send is in progress */
if (applespi->cmd_msg_queued)
return 0;
if (applespi->cmd_msg_queued) {
if (ktime_ms_delta(ktime_get(), applespi->cmd_msg_queued) < 1000)
return 0;
dev_warn(&applespi->spi->dev, "Command %d timed out\n",
applespi->cmd_evt_type);
applespi->cmd_msg_queued = 0;
applespi->write_active = false;
}
/* set up packet */
memset(packet, 0, APPLESPI_PACKET_SIZE);
@@ -856,7 +865,7 @@ static int applespi_send_cmd_msg(struct applespi_data *applespi)
return sts;
}
applespi->cmd_msg_queued = true;
applespi->cmd_msg_queued = ktime_get_coarse();
applespi->write_active = true;
return 0;
@@ -1908,7 +1917,7 @@ static int __maybe_unused applespi_resume(struct device *dev)
applespi->drain = false;
applespi->have_cl_led_on = false;
applespi->have_bl_level = 0;
applespi->cmd_msg_queued = false;
applespi->cmd_msg_queued = 0;
applespi->read_active = false;
applespi->write_active = false;

View File

@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
@@ -254,6 +255,8 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value, bool is_write);
static bool amd_iommu_pre_enabled = true;
@@ -1717,13 +1720,11 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0;
}
static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value, bool is_write);
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
{
int retry;
struct pci_dev *pdev = iommu->dev;
u64 val = 0xabcd, val2 = 0, save_reg = 0;
u64 val = 0xabcd, val2 = 0, save_reg, save_src;
if (!iommu_feature(iommu, FEATURE_PC))
return;
@@ -1731,17 +1732,39 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true;
/* save the value to restore, if writable */
if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
goto pc_false;
/*
* Disable power gating by programing the performance counter
* source to 20 (i.e. counts the reads and writes from/to IOMMU
* Reserved Register [MMIO Offset 1FF8h] that are ignored.),
* which never get incremented during this init phase.
* (Note: The event is also deprecated.)
*/
val = 20;
if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
goto pc_false;
/* Check if the performance counters can be written to */
if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
(iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
(val != val2))
goto pc_false;
val = 0xabcd;
for (retry = 5; retry; retry--) {
if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
val2)
break;
/* Wait about 20 msec for power gating to disable and retry. */
msleep(20);
}
/* restore */
if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
goto pc_false;
if (val != val2)
goto pc_false;
pci_info(pdev, "IOMMU performance counters supported\n");

View File

@@ -1079,8 +1079,17 @@ prq_advance:
* Clear the page request overflow bit and wake up all threads that
* are waiting for the completion of this handling.
*/
if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO)
writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
iommu->name);
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
if (head == tail) {
writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
iommu->name);
}
}
if (!completion_done(&iommu->prq_complete))
complete(&iommu->prq_complete);

View File

@@ -245,7 +245,7 @@ static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1,
brx = &vsp1->bru->entity;
else if (pipe->brx && !drm_pipe->force_brx_release)
brx = pipe->brx;
else if (!vsp1->bru->entity.pipe)
else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe)
brx = &vsp1->bru->entity;
else
brx = &vsp1->brs->entity;
@@ -462,9 +462,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
* make sure it is present in the pipeline's list of entities if it
* wasn't already.
*/
if (!use_uif) {
if (drm_pipe->uif && !use_uif) {
drm_pipe->uif->pipe = NULL;
} else if (!drm_pipe->uif->pipe) {
} else if (drm_pipe->uif && !drm_pipe->uif->pipe) {
drm_pipe->uif->pipe = pipe;
list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities);
}

View File

@@ -5,6 +5,7 @@ obj-y += keymaps/
obj-$(CONFIG_RC_CORE) += rc-core.o
rc-core-y := rc-main.o rc-ir-raw.o
rc-core-$(CONFIG_LIRC) += lirc_dev.o
rc-core-$(CONFIG_MEDIA_CEC_RC) += keymaps/rc-cec.o
rc-core-$(CONFIG_BPF_LIRC_MODE2) += bpf-lirc.o
obj-$(CONFIG_IR_NEC_DECODER) += ir-nec-decoder.o
obj-$(CONFIG_IR_RC5_DECODER) += ir-rc5-decoder.o

View File

@@ -21,7 +21,6 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-behold.o \
rc-behold-columbus.o \
rc-budget-ci-old.o \
rc-cec.o \
rc-cinergy-1400.o \
rc-cinergy.o \
rc-d680-dmb.o \

View File

@@ -1,5 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Keytable for the CEC remote control
*
* This keymap is unusual in that it can't be built as a module,
* instead it is registered directly in rc-main.c if CONFIG_MEDIA_CEC_RC
* is set. This is because it can be called from drm_dp_cec_set_edid() via
* cec_register_adapter() in an asynchronous context, and it is not
* allowed to use request_module() to load rc-cec.ko in that case.
*
* Since this keymap is only used if CONFIG_MEDIA_CEC_RC is set, we
* just compile this keymap into the rc-core module and never as a
* separate module.
*
* Copyright (c) 2015 by Kamil Debski
*/
@@ -152,7 +162,7 @@ static struct rc_map_table cec[] = {
/* 0x77-0xff: Reserved */
};
static struct rc_map_list cec_map = {
struct rc_map_list cec_map = {
.map = {
.scan = cec,
.size = ARRAY_SIZE(cec),
@@ -160,19 +170,3 @@ static struct rc_map_list cec_map = {
.name = RC_MAP_CEC,
}
};
static int __init init_rc_map_cec(void)
{
return rc_map_register(&cec_map);
}
static void __exit exit_rc_map_cec(void)
{
rc_map_unregister(&cec_map);
}
module_init(init_rc_map_cec);
module_exit(exit_rc_map_cec);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kamil Debski");

View File

@@ -2069,6 +2069,9 @@ static int __init rc_core_init(void)
led_trigger_register_simple("rc-feedback", &led_feedback);
rc_map_register(&empty_map);
#ifdef CONFIG_MEDIA_CEC_RC
rc_map_register(&cec_map);
#endif
return 0;
}
@@ -2078,6 +2081,9 @@ static void __exit rc_core_exit(void)
lirc_dev_exit();
class_unregister(&rc_class);
led_trigger_unregister_simple(led_feedback);
#ifdef CONFIG_MEDIA_CEC_RC
rc_map_unregister(&cec_map);
#endif
rc_map_unregister(&empty_map);
}

View File

@@ -371,7 +371,7 @@ void usbtv_audio_free(struct usbtv *usbtv)
cancel_work_sync(&usbtv->snd_trigger);
if (usbtv->snd && usbtv->udev) {
snd_card_free(usbtv->snd);
snd_card_free_when_closed(usbtv->snd);
usbtv->snd = NULL;
}
}

View File

@@ -948,6 +948,11 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
if (!fl->cctx->rpdev)
return -EPIPE;
if (handle == FASTRPC_INIT_HANDLE && !kernel) {
dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
return -EPERM;
}
ctx = fastrpc_context_alloc(fl, kernel, sc, args);
if (IS_ERR(ctx))
return PTR_ERR(ctx);

View File

@@ -166,6 +166,7 @@ static const struct of_device_id pvpanic_mmio_match[] = {
{ .compatible = "qemu,pvpanic-mmio", },
{}
};
MODULE_DEVICE_TABLE(of, pvpanic_mmio_match);
static struct platform_driver pvpanic_mmio_driver = {
.driver = {

Some files were not shown because too many files have changed in this diff Show More