Snap for 7316566 from a5293ea1ab
to android12-5.10-keystone-qcom-release
Change-Id: I329e265268c702e8e15c888ef9ad57bf5a0a162d
This commit is contained in:
64
Documentation/ABI/testing/sysfs-fs-incfs
Normal file
64
Documentation/ABI/testing/sysfs-fs-incfs
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
What: /sys/fs/incremental-fs/features/corefs
|
||||||
|
Date: 2019
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Reads 'supported'. Always present.
|
||||||
|
|
||||||
|
What: /sys/fs/incremental-fs/features/v2
|
||||||
|
Date: April 2021
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Reads 'supported'. Present if all v2 features of incfs are
|
||||||
|
supported.
|
||||||
|
|
||||||
|
What: /sys/fs/incremental-fs/features/zstd
|
||||||
|
Date: April 2021
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Reads 'supported'. Present if zstd compression is supported
|
||||||
|
for data blocks.
|
||||||
|
|
||||||
|
What: /sys/fs/incremental-fs/instances/[name]
|
||||||
|
Date: April 2021
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Folder created when incfs is mounted with the sysfs_name=[name]
|
||||||
|
option. If this option is used, the following values are created
|
||||||
|
in this folder.
|
||||||
|
|
||||||
|
What: /sys/fs/incremental-fs/instances/[name]/reads_delayed_min
|
||||||
|
Date: April 2021
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Returns a count of the number of reads that were delayed as a
|
||||||
|
result of the per UID read timeouts min time setting.
|
||||||
|
|
||||||
|
What: /sys/fs/incremental-fs/instances/[name]/reads_delayed_min_us
|
||||||
|
Date: April 2021
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Returns total delay time for all files since first mount as a
|
||||||
|
result of the per UID read timeouts min time setting.
|
||||||
|
|
||||||
|
What: /sys/fs/incremental-fs/instances/[name]/reads_delayed_pending
|
||||||
|
Date: April 2021
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Returns a count of the number of reads that were delayed as a
|
||||||
|
result of waiting for a pending read.
|
||||||
|
|
||||||
|
What: /sys/fs/incremental-fs/instances/[name]/reads_delayed_pending_us
|
||||||
|
Date: April 2021
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Returns total delay time for all files since first mount as a
|
||||||
|
result of waiting for a pending read.
|
||||||
|
|
||||||
|
What: /sys/fs/incremental-fs/instances/[name]/reads_failed_hash_verification
|
||||||
|
Date: April 2021
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Returns number of reads that failed because of hash verification
|
||||||
|
failures.
|
||||||
|
|
||||||
|
What: /sys/fs/incremental-fs/instances/[name]/reads_failed_other
|
||||||
|
Date: April 2021
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Returns number of reads that failed for reasons other than
|
||||||
|
timing out or hash failures.
|
||||||
|
|
||||||
|
What: /sys/fs/incremental-fs/instances/[name]/reads_failed_timed_out
|
||||||
|
Date: April 2021
|
||||||
|
Contact: Paul Lawrence <paullawrence@google.com>
|
||||||
|
Description: Returns number of reads that timed out.
|
@@ -289,6 +289,12 @@
|
|||||||
do not want to use tracing_snapshot_alloc() as it needs
|
do not want to use tracing_snapshot_alloc() as it needs
|
||||||
to be done where GFP_KERNEL allocations are allowed.
|
to be done where GFP_KERNEL allocations are allowed.
|
||||||
|
|
||||||
|
allow_file_spec_access
|
||||||
|
Allow speculative faults on file backed pages.
|
||||||
|
Speculative faults are enabled only for those vm_ops
|
||||||
|
that implement and return true for allow_speculation
|
||||||
|
callback.
|
||||||
|
|
||||||
allow_mismatched_32bit_el0 [ARM64]
|
allow_mismatched_32bit_el0 [ARM64]
|
||||||
Allow execve() of 32-bit applications and setting of the
|
Allow execve() of 32-bit applications and setting of the
|
||||||
PER_LINUX32 personality on systems where only a strict
|
PER_LINUX32 personality on systems where only a strict
|
||||||
|
@@ -161,6 +161,15 @@ particular KASAN features.
|
|||||||
|
|
||||||
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
|
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
|
||||||
|
|
||||||
|
- ``kasan.mode=sync`` or ``=async`` controls whether KASAN is configured in
|
||||||
|
synchronous or asynchronous mode of execution (default: ``sync``).
|
||||||
|
Synchronous mode: a bad access is detected immediately when a tag
|
||||||
|
check fault occurs.
|
||||||
|
Asynchronous mode: a bad access detection is delayed. When a tag check
|
||||||
|
fault occurs, the information is stored in hardware (in the TFSR_EL1
|
||||||
|
register for arm64). The kernel periodically checks the hardware and
|
||||||
|
only reports tag faults during these checks.
|
||||||
|
|
||||||
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
|
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
|
||||||
traces collection (default: ``on``).
|
traces collection (default: ``on``).
|
||||||
|
|
||||||
|
@@ -178,6 +178,16 @@ properties:
|
|||||||
$ref: /schemas/types.yaml#/definitions/uint32
|
$ref: /schemas/types.yaml#/definitions/uint32
|
||||||
enum: [1, 2, 3]
|
enum: [1, 2, 3]
|
||||||
|
|
||||||
|
slow-charger-loop:
|
||||||
|
description: Allows PMIC charger loops which are slow(i.e. cannot meet the 15ms deadline) to
|
||||||
|
still comply to pSnkStby i.e Maximum power that can be consumed by sink while in Sink Standby
|
||||||
|
state as defined in 7.4.2 Sink Electrical Parameters of USB Power Delivery Specification
|
||||||
|
Revision 3.0, Version 1.2. When the property is set, the port requests pSnkStby(2.5W -
|
||||||
|
5V@500mA) upon entering SNK_DISCOVERY(instead of 3A or the 1.5A, Rp current advertised, during
|
||||||
|
SNK_DISCOVERY) and the actual currrent limit after reception of PS_Ready for PD link or during
|
||||||
|
SNK_READY for non-pd link.
|
||||||
|
type: boolean
|
||||||
|
|
||||||
required:
|
required:
|
||||||
- compatible
|
- compatible
|
||||||
|
|
||||||
|
82
Documentation/filesystems/incfs.rst
Normal file
82
Documentation/filesystems/incfs.rst
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
.. SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
=================================================
|
||||||
|
incfs: A stacked incremental filesystem for Linux
|
||||||
|
=================================================
|
||||||
|
|
||||||
|
/sys/fs interface
|
||||||
|
=================
|
||||||
|
|
||||||
|
Please update Documentation/ABI/testing/sys-fs-incfs if you update this
|
||||||
|
section.
|
||||||
|
|
||||||
|
incfs creates the following files in /sys/fs.
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/features/corefs
|
||||||
|
Reads 'supported'. Always present.
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/features/v2
|
||||||
|
Reads 'supported'. Present if all v2 features of incfs are supported. These
|
||||||
|
are:
|
||||||
|
fs-verity support
|
||||||
|
inotify support
|
||||||
|
ioclts:
|
||||||
|
INCFS_IOC_SET_READ_TIMEOUTS
|
||||||
|
INCFS_IOC_GET_READ_TIMEOUTS
|
||||||
|
INCFS_IOC_GET_BLOCK_COUNT
|
||||||
|
INCFS_IOC_CREATE_MAPPED_FILE
|
||||||
|
.incomplete folder
|
||||||
|
.blocks_written pseudo file
|
||||||
|
report_uid mount option
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/features/zstd
|
||||||
|
Reads 'supported'. Present if zstd compression is supported for data blocks.
|
||||||
|
|
||||||
|
Optional per mount
|
||||||
|
------------------
|
||||||
|
|
||||||
|
For each incfs mount, the mount option sysfs_name=[name] creates a /sys/fs
|
||||||
|
node called:
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/instances/[name]
|
||||||
|
|
||||||
|
This will contain the following files:
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/instances/[name]/reads_delayed_min
|
||||||
|
Returns a count of the number of reads that were delayed as a result of the
|
||||||
|
per UID read timeouts min time setting.
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/instances/[name]/reads_delayed_min_us
|
||||||
|
Returns total delay time for all files since first mount as a result of the
|
||||||
|
per UID read timeouts min time setting.
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/instances/[name]/reads_delayed_pending
|
||||||
|
Returns a count of the number of reads that were delayed as a result of
|
||||||
|
waiting for a pending read.
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/instances/[name]/reads_delayed_pending_us
|
||||||
|
Returns total delay time for all files since first mount as a result of
|
||||||
|
waiting for a pending read.
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/instances/[name]/reads_failed_hash_verification
|
||||||
|
Returns number of reads that failed because of hash verification failures.
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/instances/[name]/reads_failed_other
|
||||||
|
Returns number of reads that failed for reasons other than timing out or
|
||||||
|
hash failures.
|
||||||
|
|
||||||
|
/sys/fs/incremental-fs/instances/[name]/reads_failed_timed_out
|
||||||
|
Returns number of reads that timed out.
|
||||||
|
|
||||||
|
For reads_delayed_*** settings, note that a file can count for both
|
||||||
|
reads_delayed_min and reads_delayed_pending if incfs first waits for a pending
|
||||||
|
read then has to wait further for the min time. In that case, the time spent
|
||||||
|
waiting is split between reads_delayed_pending_us, which is increased by the
|
||||||
|
time spent waiting for the pending read, and reads_delayed_min_us, which is
|
||||||
|
increased by the remainder of the time spent waiting.
|
||||||
|
|
||||||
|
Reads that timed out are not added to the reads_delayed_pending or the
|
||||||
|
reads_delayed_pending_us counters.
|
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 10
|
PATCHLEVEL = 10
|
||||||
SUBLEVEL = 31
|
SUBLEVEL = 32
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = Dare mighty things
|
NAME = Dare mighty things
|
||||||
|
|
||||||
|
147427
android/abi_gki_aarch64.xml
147427
android/abi_gki_aarch64.xml
File diff suppressed because it is too large
Load Diff
@@ -21,6 +21,7 @@
|
|||||||
cdev_device_add
|
cdev_device_add
|
||||||
cdev_device_del
|
cdev_device_del
|
||||||
cdev_init
|
cdev_init
|
||||||
|
__cfi_slowpath
|
||||||
__check_object_size
|
__check_object_size
|
||||||
clk_bulk_disable
|
clk_bulk_disable
|
||||||
clk_bulk_enable
|
clk_bulk_enable
|
||||||
@@ -183,7 +184,6 @@
|
|||||||
flush_workqueue
|
flush_workqueue
|
||||||
free_io_pgtable_ops
|
free_io_pgtable_ops
|
||||||
free_irq
|
free_irq
|
||||||
__free_pages
|
|
||||||
generic_handle_irq
|
generic_handle_irq
|
||||||
generic_mii_ioctl
|
generic_mii_ioctl
|
||||||
get_device
|
get_device
|
||||||
@@ -224,7 +224,6 @@
|
|||||||
icc_nodes_remove
|
icc_nodes_remove
|
||||||
icc_provider_add
|
icc_provider_add
|
||||||
icc_provider_del
|
icc_provider_del
|
||||||
icc_put
|
|
||||||
icc_set_bw
|
icc_set_bw
|
||||||
icc_sync_state
|
icc_sync_state
|
||||||
ida_alloc_range
|
ida_alloc_range
|
||||||
@@ -241,6 +240,7 @@
|
|||||||
init_timer_key
|
init_timer_key
|
||||||
init_wait_entry
|
init_wait_entry
|
||||||
__init_waitqueue_head
|
__init_waitqueue_head
|
||||||
|
iomem_resource
|
||||||
iommu_attach_device
|
iommu_attach_device
|
||||||
iommu_detach_device
|
iommu_detach_device
|
||||||
iommu_domain_alloc
|
iommu_domain_alloc
|
||||||
@@ -276,6 +276,7 @@
|
|||||||
irq_to_desc
|
irq_to_desc
|
||||||
is_vmalloc_addr
|
is_vmalloc_addr
|
||||||
jiffies
|
jiffies
|
||||||
|
kasan_flag_enabled
|
||||||
kasprintf
|
kasprintf
|
||||||
kernel_connect
|
kernel_connect
|
||||||
kernel_getsockname
|
kernel_getsockname
|
||||||
@@ -293,7 +294,6 @@
|
|||||||
kstrdup_const
|
kstrdup_const
|
||||||
kstrtoint
|
kstrtoint
|
||||||
kstrtouint
|
kstrtouint
|
||||||
kthread_create_on_node
|
|
||||||
ktime_get
|
ktime_get
|
||||||
ktime_get_mono_fast_ns
|
ktime_get_mono_fast_ns
|
||||||
ktime_get_real_ts64
|
ktime_get_real_ts64
|
||||||
@@ -348,13 +348,11 @@
|
|||||||
of_device_is_compatible
|
of_device_is_compatible
|
||||||
of_device_uevent_modalias
|
of_device_uevent_modalias
|
||||||
of_dma_configure_id
|
of_dma_configure_id
|
||||||
of_find_device_by_node
|
|
||||||
of_find_property
|
of_find_property
|
||||||
of_fwnode_ops
|
of_fwnode_ops
|
||||||
of_genpd_add_provider_onecell
|
of_genpd_add_provider_onecell
|
||||||
of_genpd_del_provider
|
of_genpd_del_provider
|
||||||
of_get_child_by_name
|
of_get_child_by_name
|
||||||
of_get_compatible_child
|
|
||||||
of_get_named_gpio_flags
|
of_get_named_gpio_flags
|
||||||
of_get_next_available_child
|
of_get_next_available_child
|
||||||
of_get_next_child
|
of_get_next_child
|
||||||
@@ -455,7 +453,6 @@
|
|||||||
regcache_sync
|
regcache_sync
|
||||||
register_reboot_notifier
|
register_reboot_notifier
|
||||||
__register_rpmsg_driver
|
__register_rpmsg_driver
|
||||||
register_shrinker
|
|
||||||
regmap_bulk_read
|
regmap_bulk_read
|
||||||
regmap_bulk_write
|
regmap_bulk_write
|
||||||
__regmap_init
|
__regmap_init
|
||||||
@@ -473,10 +470,12 @@
|
|||||||
regulator_set_load
|
regulator_set_load
|
||||||
regulator_set_voltage
|
regulator_set_voltage
|
||||||
release_firmware
|
release_firmware
|
||||||
|
__release_region
|
||||||
remap_pfn_range
|
remap_pfn_range
|
||||||
request_firmware
|
request_firmware
|
||||||
request_firmware_direct
|
request_firmware_direct
|
||||||
request_firmware_into_buf
|
request_firmware_into_buf
|
||||||
|
__request_region
|
||||||
request_threaded_irq
|
request_threaded_irq
|
||||||
reset_control_assert
|
reset_control_assert
|
||||||
reset_control_deassert
|
reset_control_deassert
|
||||||
@@ -576,6 +575,9 @@
|
|||||||
trace_event_raw_init
|
trace_event_raw_init
|
||||||
trace_event_reg
|
trace_event_reg
|
||||||
trace_handle_return
|
trace_handle_return
|
||||||
|
__traceiter_rwmmio_post_read
|
||||||
|
__traceiter_rwmmio_read
|
||||||
|
__traceiter_rwmmio_write
|
||||||
__tracepoint_rwmmio_post_read
|
__tracepoint_rwmmio_post_read
|
||||||
__tracepoint_rwmmio_read
|
__tracepoint_rwmmio_read
|
||||||
__tracepoint_rwmmio_write
|
__tracepoint_rwmmio_write
|
||||||
@@ -629,7 +631,6 @@
|
|||||||
vunmap
|
vunmap
|
||||||
wait_for_completion_timeout
|
wait_for_completion_timeout
|
||||||
__wake_up
|
__wake_up
|
||||||
wake_up_process
|
|
||||||
__warn_printk
|
__warn_printk
|
||||||
watchdog_init_timeout
|
watchdog_init_timeout
|
||||||
|
|
||||||
@@ -694,11 +695,15 @@
|
|||||||
# required by ath10k_core.ko
|
# required by ath10k_core.ko
|
||||||
bcmp
|
bcmp
|
||||||
cancel_delayed_work
|
cancel_delayed_work
|
||||||
|
__cfg80211_alloc_event_skb
|
||||||
|
__cfg80211_alloc_reply_skb
|
||||||
cfg80211_calculate_bitrate
|
cfg80211_calculate_bitrate
|
||||||
cfg80211_find_elem_match
|
cfg80211_find_elem_match
|
||||||
cfg80211_find_vendor_elem
|
cfg80211_find_vendor_elem
|
||||||
cfg80211_get_bss
|
cfg80211_get_bss
|
||||||
cfg80211_put_bss
|
cfg80211_put_bss
|
||||||
|
__cfg80211_send_event_skb
|
||||||
|
cfg80211_vendor_cmd_reply
|
||||||
cpu_latency_qos_add_request
|
cpu_latency_qos_add_request
|
||||||
cpu_latency_qos_remove_request
|
cpu_latency_qos_remove_request
|
||||||
device_get_mac_address
|
device_get_mac_address
|
||||||
@@ -757,6 +762,8 @@
|
|||||||
__kfifo_alloc
|
__kfifo_alloc
|
||||||
__kfifo_free
|
__kfifo_free
|
||||||
__local_bh_enable_ip
|
__local_bh_enable_ip
|
||||||
|
__nla_parse
|
||||||
|
nla_put
|
||||||
param_ops_ulong
|
param_ops_ulong
|
||||||
regulatory_hint
|
regulatory_hint
|
||||||
skb_copy
|
skb_copy
|
||||||
@@ -840,15 +847,10 @@
|
|||||||
of_clk_get_parent_name
|
of_clk_get_parent_name
|
||||||
|
|
||||||
# required by cqhci.ko
|
# required by cqhci.ko
|
||||||
|
devm_blk_ksm_init
|
||||||
dmam_free_coherent
|
dmam_free_coherent
|
||||||
mmc_cqe_request_done
|
mmc_cqe_request_done
|
||||||
|
|
||||||
# required by deferred-free-helper.ko
|
|
||||||
freezing_slow_path
|
|
||||||
__refrigerator
|
|
||||||
sched_set_normal
|
|
||||||
system_freezing_cnt
|
|
||||||
|
|
||||||
# required by extcon-usb-gpio.ko
|
# required by extcon-usb-gpio.ko
|
||||||
devm_extcon_dev_allocate
|
devm_extcon_dev_allocate
|
||||||
devm_extcon_dev_register
|
devm_extcon_dev_register
|
||||||
@@ -1185,12 +1187,14 @@
|
|||||||
hdmi_infoframe_pack
|
hdmi_infoframe_pack
|
||||||
hrtimer_init
|
hrtimer_init
|
||||||
hrtimer_start_range_ns
|
hrtimer_start_range_ns
|
||||||
|
icc_put
|
||||||
invalidate_mapping_pages
|
invalidate_mapping_pages
|
||||||
iommu_map_sg
|
iommu_map_sg
|
||||||
iommu_set_fault_handler
|
iommu_set_fault_handler
|
||||||
irq_domain_xlate_onecell
|
irq_domain_xlate_onecell
|
||||||
kstrdup_quotable_cmdline
|
kstrdup_quotable_cmdline
|
||||||
kstrtouint_from_user
|
kstrtouint_from_user
|
||||||
|
kthread_create_on_node
|
||||||
kthread_create_worker
|
kthread_create_worker
|
||||||
kthread_destroy_worker
|
kthread_destroy_worker
|
||||||
kthread_queue_work
|
kthread_queue_work
|
||||||
@@ -1213,7 +1217,9 @@
|
|||||||
of_device_is_available
|
of_device_is_available
|
||||||
of_drm_find_bridge
|
of_drm_find_bridge
|
||||||
of_drm_find_panel
|
of_drm_find_panel
|
||||||
|
of_find_device_by_node
|
||||||
of_find_matching_node_and_match
|
of_find_matching_node_and_match
|
||||||
|
of_get_compatible_child
|
||||||
of_graph_get_endpoint_by_regs
|
of_graph_get_endpoint_by_regs
|
||||||
of_graph_get_next_endpoint
|
of_graph_get_next_endpoint
|
||||||
of_graph_get_remote_port_parent
|
of_graph_get_remote_port_parent
|
||||||
@@ -1229,6 +1235,7 @@
|
|||||||
_raw_read_unlock
|
_raw_read_unlock
|
||||||
_raw_write_lock
|
_raw_write_lock
|
||||||
_raw_write_unlock
|
_raw_write_unlock
|
||||||
|
register_shrinker
|
||||||
register_vmap_purge_notifier
|
register_vmap_purge_notifier
|
||||||
regulator_get
|
regulator_get
|
||||||
regulator_put
|
regulator_put
|
||||||
@@ -1253,13 +1260,11 @@
|
|||||||
vm_get_page_prot
|
vm_get_page_prot
|
||||||
vscnprintf
|
vscnprintf
|
||||||
vsnprintf
|
vsnprintf
|
||||||
|
wake_up_process
|
||||||
ww_mutex_lock_interruptible
|
ww_mutex_lock_interruptible
|
||||||
ww_mutex_unlock
|
ww_mutex_unlock
|
||||||
|
|
||||||
# required by msm_serial.ko
|
# required by msm_serial.ko
|
||||||
iomem_resource
|
|
||||||
__release_region
|
|
||||||
__request_region
|
|
||||||
tty_termios_baud_rate
|
tty_termios_baud_rate
|
||||||
tty_termios_encode_baud_rate
|
tty_termios_encode_baud_rate
|
||||||
|
|
||||||
@@ -1318,11 +1323,6 @@
|
|||||||
usb_put_hcd
|
usb_put_hcd
|
||||||
usb_remove_hcd
|
usb_remove_hcd
|
||||||
|
|
||||||
# required by page_pool.ko
|
|
||||||
__alloc_pages_nodemask
|
|
||||||
contig_page_data
|
|
||||||
mod_node_page_state
|
|
||||||
|
|
||||||
# required by pdr_interface.ko
|
# required by pdr_interface.ko
|
||||||
strnlen
|
strnlen
|
||||||
|
|
||||||
@@ -1403,9 +1403,7 @@
|
|||||||
of_get_cpu_node
|
of_get_cpu_node
|
||||||
|
|
||||||
# required by qcom-geni-se.ko
|
# required by qcom-geni-se.ko
|
||||||
console_drivers
|
|
||||||
icc_set_tag
|
icc_set_tag
|
||||||
of_get_next_parent
|
|
||||||
|
|
||||||
# required by qcom-pdc.ko
|
# required by qcom-pdc.ko
|
||||||
irq_chip_get_parent_state
|
irq_chip_get_parent_state
|
||||||
@@ -1677,10 +1675,17 @@
|
|||||||
spmi_controller_remove
|
spmi_controller_remove
|
||||||
|
|
||||||
# required by system_heap.ko
|
# required by system_heap.ko
|
||||||
|
deferred_free
|
||||||
|
dmabuf_page_pool_alloc
|
||||||
|
dmabuf_page_pool_create
|
||||||
|
dmabuf_page_pool_destroy
|
||||||
|
dmabuf_page_pool_free
|
||||||
dma_heap_add
|
dma_heap_add
|
||||||
dma_heap_get_dev
|
dma_heap_get_dev
|
||||||
|
dma_heap_get_name
|
||||||
dma_sync_sg_for_cpu
|
dma_sync_sg_for_cpu
|
||||||
dma_sync_sg_for_device
|
dma_sync_sg_for_device
|
||||||
|
__free_pages
|
||||||
preempt_schedule
|
preempt_schedule
|
||||||
__sg_page_iter_next
|
__sg_page_iter_next
|
||||||
|
|
||||||
|
@@ -43,9 +43,9 @@
|
|||||||
blocking_notifier_call_chain
|
blocking_notifier_call_chain
|
||||||
blocking_notifier_chain_register
|
blocking_notifier_chain_register
|
||||||
blocking_notifier_chain_unregister
|
blocking_notifier_chain_unregister
|
||||||
bpf_trace_run1
|
|
||||||
bpf_trace_run10
|
bpf_trace_run10
|
||||||
bpf_trace_run12
|
bpf_trace_run12
|
||||||
|
bpf_trace_run1
|
||||||
bpf_trace_run2
|
bpf_trace_run2
|
||||||
bpf_trace_run3
|
bpf_trace_run3
|
||||||
bpf_trace_run4
|
bpf_trace_run4
|
||||||
@@ -708,9 +708,9 @@
|
|||||||
irq_work_queue
|
irq_work_queue
|
||||||
irq_work_sync
|
irq_work_sync
|
||||||
is_vmalloc_addr
|
is_vmalloc_addr
|
||||||
jiffies
|
|
||||||
jiffies_64_to_clock_t
|
jiffies_64_to_clock_t
|
||||||
jiffies64_to_msecs
|
jiffies64_to_msecs
|
||||||
|
jiffies
|
||||||
jiffies_to_msecs
|
jiffies_to_msecs
|
||||||
jiffies_to_usecs
|
jiffies_to_usecs
|
||||||
kasan_flag_enabled
|
kasan_flag_enabled
|
||||||
@@ -1360,6 +1360,45 @@
|
|||||||
trace_event_raw_init
|
trace_event_raw_init
|
||||||
trace_event_reg
|
trace_event_reg
|
||||||
trace_handle_return
|
trace_handle_return
|
||||||
|
__traceiter_android_rvh_can_migrate_task
|
||||||
|
__traceiter_android_rvh_cpu_cgroup_can_attach
|
||||||
|
__traceiter_android_rvh_dequeue_task
|
||||||
|
__traceiter_android_rvh_enqueue_task
|
||||||
|
__traceiter_android_rvh_find_lowest_rq
|
||||||
|
__traceiter_android_rvh_sched_newidle_balance
|
||||||
|
__traceiter_android_rvh_sched_nohz_balancer_kick
|
||||||
|
__traceiter_android_rvh_sched_rebalance_domains
|
||||||
|
__traceiter_android_rvh_select_fallback_rq
|
||||||
|
__traceiter_android_rvh_select_task_rq_fair
|
||||||
|
__traceiter_android_rvh_select_task_rq_rt
|
||||||
|
__traceiter_android_vh_cpu_idle_enter
|
||||||
|
__traceiter_android_vh_cpu_idle_exit
|
||||||
|
__traceiter_android_vh_gic_v3_set_affinity
|
||||||
|
__traceiter_android_vh_ipi_stop
|
||||||
|
__traceiter_android_vh_scheduler_tick
|
||||||
|
__traceiter_cpu_idle
|
||||||
|
__traceiter_device_pm_callback_end
|
||||||
|
__traceiter_device_pm_callback_start
|
||||||
|
__traceiter_hrtimer_expire_entry
|
||||||
|
__traceiter_hrtimer_expire_exit
|
||||||
|
__traceiter_ipi_entry
|
||||||
|
__traceiter_ipi_exit
|
||||||
|
__traceiter_ipi_raise
|
||||||
|
__traceiter_irq_handler_entry
|
||||||
|
__traceiter_irq_handler_exit
|
||||||
|
__traceiter_pelt_cfs_tp
|
||||||
|
__traceiter_pelt_dl_tp
|
||||||
|
__traceiter_pelt_irq_tp
|
||||||
|
__traceiter_pelt_rt_tp
|
||||||
|
__traceiter_pelt_se_tp
|
||||||
|
__traceiter_rwmmio_post_read
|
||||||
|
__traceiter_rwmmio_read
|
||||||
|
__traceiter_rwmmio_write
|
||||||
|
__traceiter_sched_overutilized_tp
|
||||||
|
__traceiter_sched_switch
|
||||||
|
__traceiter_suspend_resume
|
||||||
|
__traceiter_workqueue_execute_end
|
||||||
|
__traceiter_workqueue_execute_start
|
||||||
__tracepoint_android_rvh_can_migrate_task
|
__tracepoint_android_rvh_can_migrate_task
|
||||||
__tracepoint_android_rvh_cpu_cgroup_can_attach
|
__tracepoint_android_rvh_cpu_cgroup_can_attach
|
||||||
__tracepoint_android_rvh_dequeue_task
|
__tracepoint_android_rvh_dequeue_task
|
||||||
|
@@ -467,6 +467,8 @@
|
|||||||
dma_map_resource
|
dma_map_resource
|
||||||
dma_map_sg_attrs
|
dma_map_sg_attrs
|
||||||
dma_mmap_attrs
|
dma_mmap_attrs
|
||||||
|
dma_pool_alloc
|
||||||
|
dma_pool_free
|
||||||
dma_release_channel
|
dma_release_channel
|
||||||
dma_request_chan
|
dma_request_chan
|
||||||
dma_set_coherent_mask
|
dma_set_coherent_mask
|
||||||
@@ -1326,6 +1328,7 @@
|
|||||||
prandom_u32
|
prandom_u32
|
||||||
preempt_schedule
|
preempt_schedule
|
||||||
preempt_schedule_notrace
|
preempt_schedule_notrace
|
||||||
|
prepare_to_wait
|
||||||
prepare_to_wait_event
|
prepare_to_wait_event
|
||||||
print_hex_dump
|
print_hex_dump
|
||||||
printk
|
printk
|
||||||
@@ -1429,6 +1432,7 @@
|
|||||||
regulator_get
|
regulator_get
|
||||||
regulator_get_optional
|
regulator_get_optional
|
||||||
regulator_get_voltage_sel_regmap
|
regulator_get_voltage_sel_regmap
|
||||||
|
regulator_is_enabled
|
||||||
regulator_is_enabled_regmap
|
regulator_is_enabled_regmap
|
||||||
regulator_list_voltage_linear
|
regulator_list_voltage_linear
|
||||||
regulator_map_voltage_linear
|
regulator_map_voltage_linear
|
||||||
@@ -1622,6 +1626,7 @@
|
|||||||
snd_soc_of_get_dai_name
|
snd_soc_of_get_dai_name
|
||||||
snd_soc_of_parse_card_name
|
snd_soc_of_parse_card_name
|
||||||
snd_soc_of_parse_daifmt
|
snd_soc_of_parse_daifmt
|
||||||
|
snd_soc_params_to_bclk
|
||||||
snd_soc_params_to_frame_size
|
snd_soc_params_to_frame_size
|
||||||
snd_soc_put_enum_double
|
snd_soc_put_enum_double
|
||||||
snd_soc_put_volsw
|
snd_soc_put_volsw
|
||||||
@@ -1761,12 +1766,58 @@
|
|||||||
trace_event_raw_init
|
trace_event_raw_init
|
||||||
trace_event_reg
|
trace_event_reg
|
||||||
trace_handle_return
|
trace_handle_return
|
||||||
|
__traceiter_android_rvh_cpu_overutilized
|
||||||
|
__traceiter_android_rvh_dequeue_task
|
||||||
|
__traceiter_android_rvh_find_energy_efficient_cpu
|
||||||
|
__traceiter_android_rvh_select_task_rq_rt
|
||||||
|
__traceiter_android_rvh_set_iowait
|
||||||
|
__traceiter_android_rvh_typec_tcpci_chk_contaminant
|
||||||
|
__traceiter_android_rvh_typec_tcpci_get_vbus
|
||||||
|
__traceiter_android_rvh_uclamp_eff_get
|
||||||
|
__traceiter_android_rvh_util_est_update
|
||||||
|
__traceiter_android_vh_arch_set_freq_scale
|
||||||
|
__traceiter_android_vh_cma_alloc_finish
|
||||||
|
__traceiter_android_vh_cma_alloc_start
|
||||||
|
__traceiter_android_vh_cpu_idle_enter
|
||||||
|
__traceiter_android_vh_cpu_idle_exit
|
||||||
|
__traceiter_android_vh_enable_thermal_genl_check
|
||||||
|
__traceiter_android_vh_ep_create_wakeup_source
|
||||||
|
__traceiter_android_vh_ipi_stop
|
||||||
|
__traceiter_android_vh_pagecache_get_page
|
||||||
|
__traceiter_android_vh_rmqueue
|
||||||
|
__traceiter_android_vh_timerfd_create
|
||||||
|
__traceiter_android_vh_typec_store_partner_src_caps
|
||||||
|
__traceiter_android_vh_typec_tcpci_override_toggling
|
||||||
|
__traceiter_android_vh_typec_tcpm_adj_current_limit
|
||||||
|
__traceiter_android_vh_typec_tcpm_get_timer
|
||||||
|
__traceiter_android_vh_ufs_check_int_errors
|
||||||
|
__traceiter_android_vh_ufs_compl_command
|
||||||
|
__traceiter_android_vh_ufs_fill_prdt
|
||||||
|
__traceiter_android_vh_ufs_prepare_command
|
||||||
|
__traceiter_android_vh_ufs_send_command
|
||||||
|
__traceiter_android_vh_ufs_send_tm_command
|
||||||
|
__traceiter_android_vh_ufs_send_uic_command
|
||||||
|
__traceiter_android_vh_ufs_update_sysfs
|
||||||
__traceiter_clock_set_rate
|
__traceiter_clock_set_rate
|
||||||
__traceiter_cpu_frequency
|
__traceiter_cpu_frequency
|
||||||
|
__traceiter_device_pm_callback_end
|
||||||
|
__traceiter_device_pm_callback_start
|
||||||
__traceiter_dwc3_readl
|
__traceiter_dwc3_readl
|
||||||
__traceiter_dwc3_writel
|
__traceiter_dwc3_writel
|
||||||
__traceiter_gpu_mem_total
|
__traceiter_gpu_mem_total
|
||||||
|
__traceiter_pelt_cfs_tp
|
||||||
|
__traceiter_pelt_dl_tp
|
||||||
|
__traceiter_pelt_irq_tp
|
||||||
|
__traceiter_pelt_rt_tp
|
||||||
|
__traceiter_pelt_se_tp
|
||||||
|
__traceiter_rwmmio_post_read
|
||||||
|
__traceiter_rwmmio_read
|
||||||
|
__traceiter_rwmmio_write
|
||||||
|
__traceiter_sched_cpu_capacity_tp
|
||||||
|
__traceiter_sched_overutilized_tp
|
||||||
|
__traceiter_sched_util_est_cfs_tp
|
||||||
__traceiter_sched_util_est_se_tp
|
__traceiter_sched_util_est_se_tp
|
||||||
|
__traceiter_suspend_resume
|
||||||
trace_output_call
|
trace_output_call
|
||||||
__tracepoint_android_rvh_cpu_overutilized
|
__tracepoint_android_rvh_cpu_overutilized
|
||||||
__tracepoint_android_rvh_dequeue_task
|
__tracepoint_android_rvh_dequeue_task
|
||||||
|
@@ -25,6 +25,8 @@
|
|||||||
blk_mq_quiesce_queue
|
blk_mq_quiesce_queue
|
||||||
blk_mq_requeue_request
|
blk_mq_requeue_request
|
||||||
blk_mq_start_request
|
blk_mq_start_request
|
||||||
|
blk_mq_tagset_busy_iter
|
||||||
|
blk_mq_tagset_wait_completed_request
|
||||||
blk_mq_unquiesce_queue
|
blk_mq_unquiesce_queue
|
||||||
blk_put_queue
|
blk_put_queue
|
||||||
blk_queue_flag_clear
|
blk_queue_flag_clear
|
||||||
@@ -44,6 +46,7 @@
|
|||||||
cdev_device_add
|
cdev_device_add
|
||||||
cdev_device_del
|
cdev_device_del
|
||||||
cdev_init
|
cdev_init
|
||||||
|
__cfi_slowpath
|
||||||
__check_object_size
|
__check_object_size
|
||||||
__class_create
|
__class_create
|
||||||
class_destroy
|
class_destroy
|
||||||
@@ -193,7 +196,6 @@
|
|||||||
kstrtouint
|
kstrtouint
|
||||||
kstrtoul_from_user
|
kstrtoul_from_user
|
||||||
kstrtoull
|
kstrtoull
|
||||||
kthread_create_on_node
|
|
||||||
ktime_get
|
ktime_get
|
||||||
ktime_get_mono_fast_ns
|
ktime_get_mono_fast_ns
|
||||||
ktime_get_with_offset
|
ktime_get_with_offset
|
||||||
@@ -308,7 +310,6 @@
|
|||||||
__rcu_read_unlock
|
__rcu_read_unlock
|
||||||
rdev_get_drvdata
|
rdev_get_drvdata
|
||||||
refcount_warn_saturate
|
refcount_warn_saturate
|
||||||
register_shrinker
|
|
||||||
regmap_read
|
regmap_read
|
||||||
regmap_update_bits_base
|
regmap_update_bits_base
|
||||||
regmap_write
|
regmap_write
|
||||||
@@ -376,6 +377,9 @@
|
|||||||
trace_event_raw_init
|
trace_event_raw_init
|
||||||
trace_event_reg
|
trace_event_reg
|
||||||
trace_handle_return
|
trace_handle_return
|
||||||
|
__traceiter_rwmmio_post_read
|
||||||
|
__traceiter_rwmmio_read
|
||||||
|
__traceiter_rwmmio_write
|
||||||
__tracepoint_rwmmio_post_read
|
__tracepoint_rwmmio_post_read
|
||||||
__tracepoint_rwmmio_read
|
__tracepoint_rwmmio_read
|
||||||
__tracepoint_rwmmio_write
|
__tracepoint_rwmmio_write
|
||||||
@@ -398,7 +402,6 @@
|
|||||||
wait_for_completion
|
wait_for_completion
|
||||||
wait_for_completion_timeout
|
wait_for_completion_timeout
|
||||||
__wake_up
|
__wake_up
|
||||||
wake_up_process
|
|
||||||
__warn_printk
|
__warn_printk
|
||||||
xa_destroy
|
xa_destroy
|
||||||
xa_erase
|
xa_erase
|
||||||
@@ -507,12 +510,6 @@
|
|||||||
get_cpu_device
|
get_cpu_device
|
||||||
policy_has_boost_freq
|
policy_has_boost_freq
|
||||||
|
|
||||||
# required by deferred-free-helper.ko
|
|
||||||
freezing_slow_path
|
|
||||||
__refrigerator
|
|
||||||
sched_set_normal
|
|
||||||
system_freezing_cnt
|
|
||||||
|
|
||||||
# required by dw_mmc.ko
|
# required by dw_mmc.ko
|
||||||
debugfs_create_u32
|
debugfs_create_u32
|
||||||
debugfs_create_x64
|
debugfs_create_x64
|
||||||
@@ -809,6 +806,7 @@
|
|||||||
kimage_voffset
|
kimage_voffset
|
||||||
kstrdup
|
kstrdup
|
||||||
kstrtobool_from_user
|
kstrtobool_from_user
|
||||||
|
kthread_create_on_node
|
||||||
kthread_should_stop
|
kthread_should_stop
|
||||||
kthread_stop
|
kthread_stop
|
||||||
ktime_add_safe
|
ktime_add_safe
|
||||||
@@ -830,6 +828,7 @@
|
|||||||
rb_next
|
rb_next
|
||||||
rb_prev
|
rb_prev
|
||||||
rb_replace_node
|
rb_replace_node
|
||||||
|
register_shrinker
|
||||||
regulator_get_optional
|
regulator_get_optional
|
||||||
regulator_put
|
regulator_put
|
||||||
__release_region
|
__release_region
|
||||||
@@ -857,6 +856,7 @@
|
|||||||
vmf_insert_pfn
|
vmf_insert_pfn
|
||||||
vm_mmap
|
vm_mmap
|
||||||
vzalloc
|
vzalloc
|
||||||
|
wake_up_process
|
||||||
|
|
||||||
# required by mmc_block.ko
|
# required by mmc_block.ko
|
||||||
blk_get_request
|
blk_get_request
|
||||||
@@ -882,6 +882,8 @@
|
|||||||
mmc_cqe_post_req
|
mmc_cqe_post_req
|
||||||
mmc_cqe_recovery
|
mmc_cqe_recovery
|
||||||
mmc_cqe_start_req
|
mmc_cqe_start_req
|
||||||
|
mmc_crypto_prepare_req
|
||||||
|
mmc_crypto_setup_queue
|
||||||
mmc_detect_card_removed
|
mmc_detect_card_removed
|
||||||
mmc_erase
|
mmc_erase
|
||||||
mmc_erase_group_aligned
|
mmc_erase_group_aligned
|
||||||
@@ -978,8 +980,6 @@
|
|||||||
blk_mq_complete_request_remote
|
blk_mq_complete_request_remote
|
||||||
blk_mq_map_queues
|
blk_mq_map_queues
|
||||||
blk_mq_pci_map_queues
|
blk_mq_pci_map_queues
|
||||||
blk_mq_tagset_busy_iter
|
|
||||||
blk_mq_tagset_wait_completed_request
|
|
||||||
blk_mq_tag_to_rq
|
blk_mq_tag_to_rq
|
||||||
blk_mq_update_nr_hw_queues
|
blk_mq_update_nr_hw_queues
|
||||||
device_release_driver
|
device_release_driver
|
||||||
@@ -1113,10 +1113,6 @@
|
|||||||
usb_put_hcd
|
usb_put_hcd
|
||||||
usb_remove_hcd
|
usb_remove_hcd
|
||||||
|
|
||||||
# required by page_pool.ko
|
|
||||||
contig_page_data
|
|
||||||
mod_node_page_state
|
|
||||||
|
|
||||||
# required by phy-hi3660-usb3.ko
|
# required by phy-hi3660-usb3.ko
|
||||||
__devm_of_phy_provider_register
|
__devm_of_phy_provider_register
|
||||||
devm_phy_create
|
devm_phy_create
|
||||||
@@ -1212,6 +1208,11 @@
|
|||||||
__unregister_chrdev
|
__unregister_chrdev
|
||||||
|
|
||||||
# required by system_heap.ko
|
# required by system_heap.ko
|
||||||
|
deferred_free
|
||||||
|
dmabuf_page_pool_alloc
|
||||||
|
dmabuf_page_pool_create
|
||||||
|
dmabuf_page_pool_destroy
|
||||||
|
dmabuf_page_pool_free
|
||||||
dma_heap_get_dev
|
dma_heap_get_dev
|
||||||
__sg_page_iter_next
|
__sg_page_iter_next
|
||||||
__sg_page_iter_start
|
__sg_page_iter_start
|
||||||
|
@@ -58,6 +58,7 @@
|
|||||||
bitmap_allocate_region
|
bitmap_allocate_region
|
||||||
__bitmap_clear
|
__bitmap_clear
|
||||||
bitmap_find_next_zero_area_off
|
bitmap_find_next_zero_area_off
|
||||||
|
__bitmap_or
|
||||||
bitmap_parselist
|
bitmap_parselist
|
||||||
bitmap_parselist_user
|
bitmap_parselist_user
|
||||||
bitmap_print_to_pagebuf
|
bitmap_print_to_pagebuf
|
||||||
@@ -84,10 +85,10 @@
|
|||||||
blocking_notifier_call_chain
|
blocking_notifier_call_chain
|
||||||
blocking_notifier_chain_register
|
blocking_notifier_chain_register
|
||||||
blocking_notifier_chain_unregister
|
blocking_notifier_chain_unregister
|
||||||
bpf_trace_run1
|
|
||||||
bpf_trace_run10
|
bpf_trace_run10
|
||||||
bpf_trace_run11
|
bpf_trace_run11
|
||||||
bpf_trace_run12
|
bpf_trace_run12
|
||||||
|
bpf_trace_run1
|
||||||
bpf_trace_run2
|
bpf_trace_run2
|
||||||
bpf_trace_run3
|
bpf_trace_run3
|
||||||
bpf_trace_run4
|
bpf_trace_run4
|
||||||
@@ -939,6 +940,7 @@
|
|||||||
get_governor_parent_kobj
|
get_governor_parent_kobj
|
||||||
get_option
|
get_option
|
||||||
get_page_owner_handle
|
get_page_owner_handle
|
||||||
|
get_pfnblock_flags_mask
|
||||||
get_pid_task
|
get_pid_task
|
||||||
get_random_bytes
|
get_random_bytes
|
||||||
get_random_u32
|
get_random_u32
|
||||||
@@ -1058,6 +1060,7 @@
|
|||||||
import_iovec
|
import_iovec
|
||||||
in4_pton
|
in4_pton
|
||||||
in6_pton
|
in6_pton
|
||||||
|
inc_node_page_state
|
||||||
inc_zone_page_state
|
inc_zone_page_state
|
||||||
in_egroup_p
|
in_egroup_p
|
||||||
inet_proto_csum_replace4
|
inet_proto_csum_replace4
|
||||||
@@ -1093,11 +1096,11 @@
|
|||||||
input_unregister_device
|
input_unregister_device
|
||||||
input_unregister_handle
|
input_unregister_handle
|
||||||
input_unregister_handler
|
input_unregister_handler
|
||||||
int_sqrt
|
|
||||||
interval_tree_insert
|
interval_tree_insert
|
||||||
interval_tree_iter_first
|
interval_tree_iter_first
|
||||||
interval_tree_iter_next
|
interval_tree_iter_next
|
||||||
interval_tree_remove
|
interval_tree_remove
|
||||||
|
int_sqrt
|
||||||
invalidate_mapping_pages
|
invalidate_mapping_pages
|
||||||
iomem_resource
|
iomem_resource
|
||||||
iommu_alloc_resv_region
|
iommu_alloc_resv_region
|
||||||
@@ -1193,6 +1196,8 @@
|
|||||||
irq_work_queue_on
|
irq_work_queue_on
|
||||||
irq_work_sync
|
irq_work_sync
|
||||||
is_dma_buf_file
|
is_dma_buf_file
|
||||||
|
isolate_and_split_free_page
|
||||||
|
isolate_anon_lru_page
|
||||||
is_vmalloc_addr
|
is_vmalloc_addr
|
||||||
jiffies
|
jiffies
|
||||||
jiffies_to_msecs
|
jiffies_to_msecs
|
||||||
@@ -1316,10 +1321,10 @@
|
|||||||
lock_sock_nested
|
lock_sock_nested
|
||||||
log_buf_addr_get
|
log_buf_addr_get
|
||||||
log_buf_len_get
|
log_buf_len_get
|
||||||
lookup_page_ext
|
|
||||||
__log_post_read_mmio
|
__log_post_read_mmio
|
||||||
__log_read_mmio
|
__log_read_mmio
|
||||||
__log_write_mmio
|
__log_write_mmio
|
||||||
|
lookup_page_ext
|
||||||
lzo1x_1_compress
|
lzo1x_1_compress
|
||||||
lzo1x_decompress_safe
|
lzo1x_decompress_safe
|
||||||
lzorle1x_1_compress
|
lzorle1x_1_compress
|
||||||
@@ -1359,11 +1364,12 @@
|
|||||||
mempool_free
|
mempool_free
|
||||||
mempool_free_slab
|
mempool_free_slab
|
||||||
memremap
|
memremap
|
||||||
memset
|
|
||||||
memset64
|
memset64
|
||||||
|
memset
|
||||||
__memset_io
|
__memset_io
|
||||||
memstart_addr
|
memstart_addr
|
||||||
memunmap
|
memunmap
|
||||||
|
migrate_pages
|
||||||
migrate_swap
|
migrate_swap
|
||||||
mipi_dsi_create_packet
|
mipi_dsi_create_packet
|
||||||
mipi_dsi_dcs_set_display_brightness
|
mipi_dsi_dcs_set_display_brightness
|
||||||
@@ -1385,6 +1391,7 @@
|
|||||||
__module_get
|
__module_get
|
||||||
module_layout
|
module_layout
|
||||||
module_put
|
module_put
|
||||||
|
__mod_zone_page_state
|
||||||
__msecs_to_jiffies
|
__msecs_to_jiffies
|
||||||
msleep
|
msleep
|
||||||
msleep_interruptible
|
msleep_interruptible
|
||||||
@@ -1426,10 +1433,10 @@
|
|||||||
nla_find
|
nla_find
|
||||||
nla_memcpy
|
nla_memcpy
|
||||||
__nla_parse
|
__nla_parse
|
||||||
nla_put
|
|
||||||
nla_put_64bit
|
nla_put_64bit
|
||||||
nla_reserve
|
nla_put
|
||||||
nla_reserve_64bit
|
nla_reserve_64bit
|
||||||
|
nla_reserve
|
||||||
__nla_validate
|
__nla_validate
|
||||||
__nlmsg_put
|
__nlmsg_put
|
||||||
no_llseek
|
no_llseek
|
||||||
@@ -1585,8 +1592,8 @@
|
|||||||
pci_bus_type
|
pci_bus_type
|
||||||
pci_clear_master
|
pci_clear_master
|
||||||
pci_d3cold_disable
|
pci_d3cold_disable
|
||||||
pci_dev_present
|
|
||||||
pci_device_group
|
pci_device_group
|
||||||
|
pci_dev_present
|
||||||
pci_disable_device
|
pci_disable_device
|
||||||
pci_disable_msi
|
pci_disable_msi
|
||||||
pcie_capability_read_word
|
pcie_capability_read_word
|
||||||
@@ -1750,6 +1757,7 @@
|
|||||||
pskb_expand_head
|
pskb_expand_head
|
||||||
__pskb_pull_tail
|
__pskb_pull_tail
|
||||||
___pskb_trim
|
___pskb_trim
|
||||||
|
putback_movable_pages
|
||||||
put_device
|
put_device
|
||||||
put_disk
|
put_disk
|
||||||
put_iova_domain
|
put_iova_domain
|
||||||
@@ -2073,8 +2081,8 @@
|
|||||||
show_rcu_gp_kthreads
|
show_rcu_gp_kthreads
|
||||||
show_regs
|
show_regs
|
||||||
sigprocmask
|
sigprocmask
|
||||||
si_meminfo
|
|
||||||
si_mem_available
|
si_mem_available
|
||||||
|
si_meminfo
|
||||||
simple_attr_open
|
simple_attr_open
|
||||||
simple_attr_read
|
simple_attr_read
|
||||||
simple_attr_release
|
simple_attr_release
|
||||||
@@ -2272,6 +2280,8 @@
|
|||||||
synchronize_rcu_tasks_trace
|
synchronize_rcu_tasks_trace
|
||||||
synchronize_srcu
|
synchronize_srcu
|
||||||
synchronize_srcu_expedited
|
synchronize_srcu_expedited
|
||||||
|
synth_event_create
|
||||||
|
synth_event_delete
|
||||||
syscon_node_to_regmap
|
syscon_node_to_regmap
|
||||||
syscon_regmap_lookup_by_phandle
|
syscon_regmap_lookup_by_phandle
|
||||||
sysctl_sched_features
|
sysctl_sched_features
|
||||||
@@ -2336,6 +2346,9 @@
|
|||||||
topology_set_thermal_pressure
|
topology_set_thermal_pressure
|
||||||
_totalram_pages
|
_totalram_pages
|
||||||
total_swapcache_pages
|
total_swapcache_pages
|
||||||
|
trace_array_get_by_name
|
||||||
|
trace_array_put
|
||||||
|
trace_array_set_clr_event
|
||||||
__trace_bprintk
|
__trace_bprintk
|
||||||
__trace_bputs
|
__trace_bputs
|
||||||
trace_clock_local
|
trace_clock_local
|
||||||
@@ -2344,9 +2357,93 @@
|
|||||||
trace_event_ignore_this_pid
|
trace_event_ignore_this_pid
|
||||||
trace_event_raw_init
|
trace_event_raw_init
|
||||||
trace_event_reg
|
trace_event_reg
|
||||||
|
trace_get_event_file
|
||||||
trace_handle_return
|
trace_handle_return
|
||||||
|
__traceiter_android_rvh_account_irq
|
||||||
|
__traceiter_android_rvh_build_perf_domains
|
||||||
|
__traceiter_android_rvh_can_migrate_task
|
||||||
|
__traceiter_android_rvh_check_preempt_wakeup
|
||||||
|
__traceiter_android_rvh_cpu_cgroup_attach
|
||||||
|
__traceiter_android_rvh_cpu_cgroup_online
|
||||||
|
__traceiter_android_rvh_cpufreq_transition
|
||||||
|
__traceiter_android_rvh_dequeue_task
|
||||||
|
__traceiter_android_rvh_enqueue_task
|
||||||
|
__traceiter_android_rvh_find_busiest_queue
|
||||||
|
__traceiter_android_rvh_find_lowest_rq
|
||||||
|
__traceiter_android_rvh_flush_task
|
||||||
|
__traceiter_android_rvh_irqs_disable
|
||||||
|
__traceiter_android_rvh_irqs_enable
|
||||||
|
__traceiter_android_rvh_migrate_queued_task
|
||||||
|
__traceiter_android_rvh_new_task_stats
|
||||||
|
__traceiter_android_rvh_pick_next_entity
|
||||||
|
__traceiter_android_rvh_place_entity
|
||||||
|
__traceiter_android_rvh_preempt_disable
|
||||||
|
__traceiter_android_rvh_preempt_enable
|
||||||
|
__traceiter_android_rvh_replace_next_task_fair
|
||||||
|
__traceiter_android_rvh_resume_cpus
|
||||||
|
__traceiter_android_rvh_sched_balance_rt
|
||||||
|
__traceiter_android_rvh_sched_cpu_dying
|
||||||
|
__traceiter_android_rvh_sched_cpu_starting
|
||||||
|
__traceiter_android_rvh_sched_exec
|
||||||
|
__traceiter_android_rvh_sched_fork
|
||||||
|
__traceiter_android_rvh_sched_fork_init
|
||||||
|
__traceiter_android_rvh_sched_newidle_balance
|
||||||
|
__traceiter_android_rvh_sched_nohz_balancer_kick
|
||||||
|
__traceiter_android_rvh_sched_setaffinity
|
||||||
|
__traceiter_android_rvh_schedule
|
||||||
|
__traceiter_android_rvh_schedule_bug
|
||||||
|
__traceiter_android_rvh_select_task_rq_fair
|
||||||
|
__traceiter_android_rvh_select_task_rq_rt
|
||||||
|
__traceiter_android_rvh_set_gfp_zone_flags
|
||||||
|
__traceiter_android_rvh_set_readahead_gfp_mask
|
||||||
|
__traceiter_android_rvh_set_skip_swapcache_flags
|
||||||
|
__traceiter_android_rvh_set_task_cpu
|
||||||
|
__traceiter_android_rvh_tick_entry
|
||||||
|
__traceiter_android_rvh_try_to_wake_up
|
||||||
|
__traceiter_android_rvh_try_to_wake_up_success
|
||||||
|
__traceiter_android_rvh_ttwu_cond
|
||||||
|
__traceiter_android_rvh_update_cpu_capacity
|
||||||
|
__traceiter_android_rvh_update_cpus_allowed
|
||||||
|
__traceiter_android_rvh_update_misfit_status
|
||||||
|
__traceiter_android_rvh_wake_up_new_task
|
||||||
|
__traceiter_android_vh_allow_domain_state
|
||||||
|
__traceiter_android_vh_binder_restore_priority
|
||||||
|
__traceiter_android_vh_binder_set_priority
|
||||||
|
__traceiter_android_vh_binder_transaction_init
|
||||||
|
__traceiter_android_vh_binder_wakeup_ilocked
|
||||||
|
__traceiter_android_vh_cpu_idle_enter
|
||||||
|
__traceiter_android_vh_cpu_idle_exit
|
||||||
|
__traceiter_android_vh_dump_throttled_rt_tasks
|
||||||
|
__traceiter_android_vh_freq_table_limits
|
||||||
|
__traceiter_android_vh_ftrace_dump_buffer
|
||||||
|
__traceiter_android_vh_ftrace_format_check
|
||||||
|
__traceiter_android_vh_ftrace_oops_enter
|
||||||
|
__traceiter_android_vh_ftrace_oops_exit
|
||||||
|
__traceiter_android_vh_ftrace_size_check
|
||||||
|
__traceiter_android_vh_gpio_block_read
|
||||||
|
__traceiter_android_vh_iommu_setup_dma_ops
|
||||||
|
__traceiter_android_vh_ipi_stop
|
||||||
|
__traceiter_android_vh_jiffies_update
|
||||||
|
__traceiter_android_vh_logbuf
|
||||||
|
__traceiter_android_vh_printk_hotplug
|
||||||
|
__traceiter_android_vh_scheduler_tick
|
||||||
|
__traceiter_android_vh_show_max_freq
|
||||||
|
__traceiter_android_vh_show_resume_epoch_val
|
||||||
|
__traceiter_android_vh_show_suspend_epoch_val
|
||||||
|
__traceiter_android_vh_timer_calc_index
|
||||||
|
__traceiter_binder_transaction_received
|
||||||
__traceiter_cpu_frequency
|
__traceiter_cpu_frequency
|
||||||
|
__traceiter_cpu_frequency_limits
|
||||||
|
__traceiter_cpu_idle
|
||||||
__traceiter_gpu_mem_total
|
__traceiter_gpu_mem_total
|
||||||
|
__traceiter_ipi_entry
|
||||||
|
__traceiter_ipi_raise
|
||||||
|
__traceiter_irq_handler_entry
|
||||||
|
__traceiter_rwmmio_post_read
|
||||||
|
__traceiter_rwmmio_read
|
||||||
|
__traceiter_rwmmio_write
|
||||||
|
__traceiter_sched_switch
|
||||||
|
__traceiter_suspend_resume
|
||||||
__tracepoint_android_rvh_account_irq
|
__tracepoint_android_rvh_account_irq
|
||||||
__tracepoint_android_rvh_build_perf_domains
|
__tracepoint_android_rvh_build_perf_domains
|
||||||
__tracepoint_android_rvh_can_migrate_task
|
__tracepoint_android_rvh_can_migrate_task
|
||||||
@@ -2408,11 +2505,14 @@
|
|||||||
__tracepoint_android_vh_ftrace_oops_enter
|
__tracepoint_android_vh_ftrace_oops_enter
|
||||||
__tracepoint_android_vh_ftrace_oops_exit
|
__tracepoint_android_vh_ftrace_oops_exit
|
||||||
__tracepoint_android_vh_ftrace_size_check
|
__tracepoint_android_vh_ftrace_size_check
|
||||||
__tracepoint_android_vh_logbuf
|
__tracepoint_android_vh_gpio_block_read
|
||||||
__tracepoint_android_vh_iommu_setup_dma_ops
|
__tracepoint_android_vh_iommu_setup_dma_ops
|
||||||
__tracepoint_android_vh_ipi_stop
|
__tracepoint_android_vh_ipi_stop
|
||||||
__tracepoint_android_vh_jiffies_update
|
__tracepoint_android_vh_jiffies_update
|
||||||
|
__tracepoint_android_vh_logbuf
|
||||||
__tracepoint_android_vh_printk_hotplug
|
__tracepoint_android_vh_printk_hotplug
|
||||||
|
__tracepoint_android_vh_psi_event
|
||||||
|
__tracepoint_android_vh_psi_group
|
||||||
__tracepoint_android_vh_scheduler_tick
|
__tracepoint_android_vh_scheduler_tick
|
||||||
__tracepoint_android_vh_show_max_freq
|
__tracepoint_android_vh_show_max_freq
|
||||||
__tracepoint_android_vh_show_resume_epoch_val
|
__tracepoint_android_vh_show_resume_epoch_val
|
||||||
|
2571
android/abi_gki_aarch64_unisoc
Normal file
2571
android/abi_gki_aarch64_unisoc
Normal file
File diff suppressed because it is too large
Load Diff
@@ -391,6 +391,9 @@
|
|||||||
trace_event_raw_init
|
trace_event_raw_init
|
||||||
trace_event_reg
|
trace_event_reg
|
||||||
trace_handle_return
|
trace_handle_return
|
||||||
|
__traceiter_rwmmio_post_read
|
||||||
|
__traceiter_rwmmio_read
|
||||||
|
__traceiter_rwmmio_write
|
||||||
__tracepoint_rwmmio_post_read
|
__tracepoint_rwmmio_post_read
|
||||||
__tracepoint_rwmmio_read
|
__tracepoint_rwmmio_read
|
||||||
__tracepoint_rwmmio_write
|
__tracepoint_rwmmio_write
|
||||||
|
@@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
|
|||||||
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
||||||
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
|
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
|
||||||
|
|
||||||
return err;
|
return err ? -EFAULT : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
|
static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
|
||||||
@@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
|
|||||||
&(sf->uc.uc_mcontext.regs.scratch),
|
&(sf->uc.uc_mcontext.regs.scratch),
|
||||||
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return -EFAULT;
|
||||||
|
|
||||||
set_current_blocked(&set);
|
set_current_blocked(&set);
|
||||||
regs->bta = uregs.scratch.bta;
|
regs->bta = uregs.scratch.bta;
|
||||||
|
@@ -22,6 +22,11 @@
|
|||||||
i2c1 = &i2c2;
|
i2c1 = &i2c2;
|
||||||
i2c2 = &i2c3;
|
i2c2 = &i2c3;
|
||||||
i2c3 = &i2c4;
|
i2c3 = &i2c4;
|
||||||
|
mmc0 = &mmc1;
|
||||||
|
mmc1 = &mmc2;
|
||||||
|
mmc2 = &mmc3;
|
||||||
|
mmc3 = &mmc4;
|
||||||
|
mmc4 = &mmc5;
|
||||||
serial0 = &uart1;
|
serial0 = &uart1;
|
||||||
serial1 = &uart2;
|
serial1 = &uart2;
|
||||||
serial2 = &uart3;
|
serial2 = &uart3;
|
||||||
|
@@ -770,14 +770,6 @@
|
|||||||
ti,max-div = <2>;
|
ti,max-div = <2>;
|
||||||
};
|
};
|
||||||
|
|
||||||
sha2md5_fck: sha2md5_fck@15c8 {
|
|
||||||
#clock-cells = <0>;
|
|
||||||
compatible = "ti,gate-clock";
|
|
||||||
clocks = <&l3_div_ck>;
|
|
||||||
ti,bit-shift = <1>;
|
|
||||||
reg = <0x15c8>;
|
|
||||||
};
|
|
||||||
|
|
||||||
usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
|
usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
|
||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
compatible = "ti,gate-clock";
|
compatible = "ti,gate-clock";
|
||||||
|
@@ -25,6 +25,11 @@
|
|||||||
i2c2 = &i2c3;
|
i2c2 = &i2c3;
|
||||||
i2c3 = &i2c4;
|
i2c3 = &i2c4;
|
||||||
i2c4 = &i2c5;
|
i2c4 = &i2c5;
|
||||||
|
mmc0 = &mmc1;
|
||||||
|
mmc1 = &mmc2;
|
||||||
|
mmc2 = &mmc3;
|
||||||
|
mmc3 = &mmc4;
|
||||||
|
mmc4 = &mmc5;
|
||||||
serial0 = &uart1;
|
serial0 = &uart1;
|
||||||
serial1 = &uart2;
|
serial1 = &uart2;
|
||||||
serial2 = &uart3;
|
serial2 = &uart3;
|
||||||
|
@@ -15,14 +15,14 @@
|
|||||||
#include <asm/mach-types.h>
|
#include <asm/mach-types.h>
|
||||||
|
|
||||||
/* cats host-specific stuff */
|
/* cats host-specific stuff */
|
||||||
static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
|
static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
|
||||||
|
|
||||||
static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
|
static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
{
|
{
|
||||||
if (dev->irq >= 255)
|
if (dev->irq >= 255)
|
||||||
return -1; /* not a valid interrupt. */
|
return -1; /* not a valid interrupt. */
|
||||||
|
@@ -14,9 +14,9 @@
|
|||||||
#include <asm/mach/pci.h>
|
#include <asm/mach/pci.h>
|
||||||
#include <asm/mach-types.h>
|
#include <asm/mach-types.h>
|
||||||
|
|
||||||
static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
|
static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
|
||||||
|
|
||||||
static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
{
|
{
|
||||||
if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
|
if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
|
||||||
dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
|
dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
|
||||||
|
@@ -18,7 +18,7 @@
|
|||||||
* We now use the slot ID instead of the device identifiers to select
|
* We now use the slot ID instead of the device identifiers to select
|
||||||
* which interrupt is routed where.
|
* which interrupt is routed where.
|
||||||
*/
|
*/
|
||||||
static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
{
|
{
|
||||||
switch (slot) {
|
switch (slot) {
|
||||||
case 0: /* host bridge */
|
case 0: /* host bridge */
|
||||||
|
@@ -14,13 +14,12 @@
|
|||||||
#include <asm/mach/pci.h>
|
#include <asm/mach/pci.h>
|
||||||
#include <asm/mach-types.h>
|
#include <asm/mach-types.h>
|
||||||
|
|
||||||
static int irqmap_personal_server[] __initdata = {
|
static int irqmap_personal_server[] = {
|
||||||
IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
|
IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
|
||||||
IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
|
IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
|
static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||||
u8 pin)
|
|
||||||
{
|
{
|
||||||
unsigned char line;
|
unsigned char line;
|
||||||
|
|
||||||
|
@@ -65,7 +65,7 @@ static void __init keystone_init(void)
|
|||||||
static long long __init keystone_pv_fixup(void)
|
static long long __init keystone_pv_fixup(void)
|
||||||
{
|
{
|
||||||
long long offset;
|
long long offset;
|
||||||
phys_addr_t mem_start, mem_end;
|
u64 mem_start, mem_end;
|
||||||
|
|
||||||
mem_start = memblock_start_of_DRAM();
|
mem_start = memblock_start_of_DRAM();
|
||||||
mem_end = memblock_end_of_DRAM();
|
mem_end = memblock_end_of_DRAM();
|
||||||
@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
|
|||||||
if (mem_start < KEYSTONE_HIGH_PHYS_START ||
|
if (mem_start < KEYSTONE_HIGH_PHYS_START ||
|
||||||
mem_end > KEYSTONE_HIGH_PHYS_END) {
|
mem_end > KEYSTONE_HIGH_PHYS_END) {
|
||||||
pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
|
pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
|
||||||
(u64)mem_start, (u64)mem_end);
|
mem_start, mem_end);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -15,6 +15,7 @@
|
|||||||
#include <linux/platform_data/gpio-omap.h>
|
#include <linux/platform_data/gpio-omap.h>
|
||||||
|
|
||||||
#include <asm/assembler.h>
|
#include <asm/assembler.h>
|
||||||
|
#include <asm/irq.h>
|
||||||
|
|
||||||
#include "ams-delta-fiq.h"
|
#include "ams-delta-fiq.h"
|
||||||
#include "board-ams-delta.h"
|
#include "board-ams-delta.h"
|
||||||
|
@@ -33,7 +33,7 @@ static void __init __maybe_unused omap_generic_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Clocks are needed early, see drivers/clocksource for the rest */
|
/* Clocks are needed early, see drivers/clocksource for the rest */
|
||||||
void __init __maybe_unused omap_init_time_of(void)
|
static void __init __maybe_unused omap_init_time_of(void)
|
||||||
{
|
{
|
||||||
omap_clk_init();
|
omap_clk_init();
|
||||||
timer_probe();
|
timer_probe();
|
||||||
|
@@ -188,7 +188,7 @@ static const char * const dra7_sr_instances[] = {
|
|||||||
|
|
||||||
int __init omap_devinit_smartreflex(void)
|
int __init omap_devinit_smartreflex(void)
|
||||||
{
|
{
|
||||||
const char * const *sr_inst;
|
const char * const *sr_inst = NULL;
|
||||||
int i, nr_sr = 0;
|
int i, nr_sr = 0;
|
||||||
|
|
||||||
if (soc_is_omap44xx()) {
|
if (soc_is_omap44xx()) {
|
||||||
|
@@ -235,6 +235,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
|
|||||||
phys_addr_t mem_end;
|
phys_addr_t mem_end;
|
||||||
phys_addr_t reg_start, reg_end;
|
phys_addr_t reg_start, reg_end;
|
||||||
unsigned int mem_max_regions;
|
unsigned int mem_max_regions;
|
||||||
|
bool first = true;
|
||||||
int num;
|
int num;
|
||||||
u64 i;
|
u64 i;
|
||||||
|
|
||||||
@@ -263,7 +264,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
for_each_mem_range(i, ®_start, ®_end) {
|
for_each_mem_range(i, ®_start, ®_end) {
|
||||||
if (i == 0) {
|
if (first) {
|
||||||
phys_addr_t phys_offset = PHYS_OFFSET;
|
phys_addr_t phys_offset = PHYS_OFFSET;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -275,6 +276,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
|
|||||||
mem_start = reg_start;
|
mem_start = reg_start;
|
||||||
mem_end = reg_end;
|
mem_end = reg_end;
|
||||||
specified_mem_size = mem_end - mem_start;
|
specified_mem_size = mem_end - mem_start;
|
||||||
|
first = false;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* memblock auto merges contiguous blocks, remove
|
* memblock auto merges contiguous blocks, remove
|
||||||
|
@@ -95,10 +95,11 @@ void __init pmsav8_adjust_lowmem_bounds(void)
|
|||||||
{
|
{
|
||||||
phys_addr_t mem_end;
|
phys_addr_t mem_end;
|
||||||
phys_addr_t reg_start, reg_end;
|
phys_addr_t reg_start, reg_end;
|
||||||
|
bool first = true;
|
||||||
u64 i;
|
u64 i;
|
||||||
|
|
||||||
for_each_mem_range(i, ®_start, ®_end) {
|
for_each_mem_range(i, ®_start, ®_end) {
|
||||||
if (i == 0) {
|
if (first) {
|
||||||
phys_addr_t phys_offset = PHYS_OFFSET;
|
phys_addr_t phys_offset = PHYS_OFFSET;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -107,6 +108,7 @@ void __init pmsav8_adjust_lowmem_bounds(void)
|
|||||||
if (reg_start != phys_offset)
|
if (reg_start != phys_offset)
|
||||||
panic("First memory bank must be contiguous from PHYS_OFFSET");
|
panic("First memory bank must be contiguous from PHYS_OFFSET");
|
||||||
mem_end = reg_end;
|
mem_end = reg_end;
|
||||||
|
first = false;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* memblock auto merges contiguous blocks, remove
|
* memblock auto merges contiguous blocks, remove
|
||||||
|
@@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
|
|||||||
static struct undef_hook uprobes_arm_break_hook = {
|
static struct undef_hook uprobes_arm_break_hook = {
|
||||||
.instr_mask = 0x0fffffff,
|
.instr_mask = 0x0fffffff,
|
||||||
.instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
|
.instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
|
||||||
.cpsr_mask = MODE_MASK,
|
.cpsr_mask = (PSR_T_BIT | MODE_MASK),
|
||||||
.cpsr_val = USR_MODE,
|
.cpsr_val = USR_MODE,
|
||||||
.fn = uprobe_trap_handler,
|
.fn = uprobe_trap_handler,
|
||||||
};
|
};
|
||||||
@@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = {
|
|||||||
static struct undef_hook uprobes_arm_ss_hook = {
|
static struct undef_hook uprobes_arm_ss_hook = {
|
||||||
.instr_mask = 0x0fffffff,
|
.instr_mask = 0x0fffffff,
|
||||||
.instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff),
|
.instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff),
|
||||||
.cpsr_mask = MODE_MASK,
|
.cpsr_mask = (PSR_T_BIT | MODE_MASK),
|
||||||
.cpsr_val = USR_MODE,
|
.cpsr_val = USR_MODE,
|
||||||
.fn = uprobe_trap_handler,
|
.fn = uprobe_trap_handler,
|
||||||
};
|
};
|
||||||
|
@@ -1402,10 +1402,13 @@ config ARM64_PAN
|
|||||||
config AS_HAS_LDAPR
|
config AS_HAS_LDAPR
|
||||||
def_bool $(as-instr,.arch_extension rcpc)
|
def_bool $(as-instr,.arch_extension rcpc)
|
||||||
|
|
||||||
|
config AS_HAS_LSE_ATOMICS
|
||||||
|
def_bool $(as-instr,.arch_extension lse)
|
||||||
|
|
||||||
config ARM64_LSE_ATOMICS
|
config ARM64_LSE_ATOMICS
|
||||||
bool
|
bool
|
||||||
default ARM64_USE_LSE_ATOMICS
|
default ARM64_USE_LSE_ATOMICS
|
||||||
depends on $(as-instr,.arch_extension lse)
|
depends on AS_HAS_LSE_ATOMICS
|
||||||
|
|
||||||
config ARM64_USE_LSE_ATOMICS
|
config ARM64_USE_LSE_ATOMICS
|
||||||
bool "Atomic instructions"
|
bool "Atomic instructions"
|
||||||
@@ -1687,6 +1690,7 @@ config ARM64_MTE
|
|||||||
depends on AS_HAS_ARMV8_5
|
depends on AS_HAS_ARMV8_5
|
||||||
# Required for tag checking in the uaccess routines
|
# Required for tag checking in the uaccess routines
|
||||||
depends on ARM64_PAN
|
depends on ARM64_PAN
|
||||||
|
depends on AS_HAS_LSE_ATOMICS
|
||||||
select ARCH_USES_HIGH_VMA_FLAGS
|
select ARCH_USES_HIGH_VMA_FLAGS
|
||||||
help
|
help
|
||||||
Memory Tagging (part of the ARMv8.5 Extensions) provides
|
Memory Tagging (part of the ARMv8.5 Extensions) provides
|
||||||
|
@@ -8,3 +8,7 @@
|
|||||||
compatible = "pine64,pine64-lts", "allwinner,sun50i-r18",
|
compatible = "pine64,pine64-lts", "allwinner,sun50i-r18",
|
||||||
"allwinner,sun50i-a64";
|
"allwinner,sun50i-a64";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&mmc0 {
|
||||||
|
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
|
||||||
|
};
|
||||||
|
@@ -34,7 +34,7 @@
|
|||||||
vmmc-supply = <®_dcdc1>;
|
vmmc-supply = <®_dcdc1>;
|
||||||
disable-wp;
|
disable-wp;
|
||||||
bus-width = <4>;
|
bus-width = <4>;
|
||||||
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
|
cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -289,10 +289,6 @@
|
|||||||
vcc-pm-supply = <®_aldo1>;
|
vcc-pm-supply = <®_aldo1>;
|
||||||
};
|
};
|
||||||
|
|
||||||
&rtc {
|
|
||||||
clocks = <&ext_osc32k>;
|
|
||||||
};
|
|
||||||
|
|
||||||
&spdif {
|
&spdif {
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
@@ -13,6 +13,7 @@ CONFIG_IKCONFIG=y
|
|||||||
CONFIG_IKCONFIG_PROC=y
|
CONFIG_IKCONFIG_PROC=y
|
||||||
CONFIG_IKHEADERS=y
|
CONFIG_IKHEADERS=y
|
||||||
CONFIG_UCLAMP_TASK=y
|
CONFIG_UCLAMP_TASK=y
|
||||||
|
CONFIG_UCLAMP_BUCKETS_COUNT=20
|
||||||
CONFIG_CGROUPS=y
|
CONFIG_CGROUPS=y
|
||||||
CONFIG_BLK_CGROUP=y
|
CONFIG_BLK_CGROUP=y
|
||||||
CONFIG_CGROUP_SCHED=y
|
CONFIG_CGROUP_SCHED=y
|
||||||
@@ -653,6 +654,7 @@ CONFIG_SCHEDSTATS=y
|
|||||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||||
CONFIG_TRACE_MMIO_ACCESS=y
|
CONFIG_TRACE_MMIO_ACCESS=y
|
||||||
CONFIG_TRACEFS_DISABLE_AUTOMOUNT=y
|
CONFIG_TRACEFS_DISABLE_AUTOMOUNT=y
|
||||||
|
CONFIG_HIST_TRIGGERS=y
|
||||||
CONFIG_KUNIT=y
|
CONFIG_KUNIT=y
|
||||||
CONFIG_KUNIT_DEBUGFS=y
|
CONFIG_KUNIT_DEBUGFS=y
|
||||||
# CONFIG_RUNTIME_TESTING_MENU is not set
|
# CONFIG_RUNTIME_TESTING_MENU is not set
|
||||||
|
@@ -97,9 +97,9 @@
|
|||||||
.popsection
|
.popsection
|
||||||
.subsection 1
|
.subsection 1
|
||||||
663: \insn2
|
663: \insn2
|
||||||
664: .previous
|
664: .org . - (664b-663b) + (662b-661b)
|
||||||
.org . - (664b-663b) + (662b-661b)
|
|
||||||
.org . - (662b-661b) + (664b-663b)
|
.org . - (662b-661b) + (664b-663b)
|
||||||
|
.previous
|
||||||
.endif
|
.endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
@@ -169,11 +169,11 @@
|
|||||||
*/
|
*/
|
||||||
.macro alternative_endif
|
.macro alternative_endif
|
||||||
664:
|
664:
|
||||||
|
.org . - (664b-663b) + (662b-661b)
|
||||||
|
.org . - (662b-661b) + (664b-663b)
|
||||||
.if .Lasm_alt_mode==0
|
.if .Lasm_alt_mode==0
|
||||||
.previous
|
.previous
|
||||||
.endif
|
.endif
|
||||||
.org . - (664b-663b) + (662b-661b)
|
|
||||||
.org . - (662b-661b) + (664b-663b)
|
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -59,62 +59,32 @@ alternative_else_nop_endif
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Generate the assembly for UAO alternatives with exception table entries.
|
* Generate the assembly for LDTR/STTR with exception table entries.
|
||||||
* This is complicated as there is no post-increment or pair versions of the
|
* This is complicated as there is no post-increment or pair versions of the
|
||||||
* unprivileged instructions, and USER() only works for single instructions.
|
* unprivileged instructions, and USER() only works for single instructions.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_ARM64_UAO
|
|
||||||
.macro uao_ldp l, reg1, reg2, addr, post_inc
|
.macro uao_ldp l, reg1, reg2, addr, post_inc
|
||||||
alternative_if_not ARM64_HAS_UAO
|
8888: ldtr \reg1, [\addr];
|
||||||
8888: ldp \reg1, \reg2, [\addr], \post_inc;
|
8889: ldtr \reg2, [\addr, #8];
|
||||||
8889: nop;
|
add \addr, \addr, \post_inc;
|
||||||
nop;
|
|
||||||
alternative_else
|
|
||||||
ldtr \reg1, [\addr];
|
|
||||||
ldtr \reg2, [\addr, #8];
|
|
||||||
add \addr, \addr, \post_inc;
|
|
||||||
alternative_endif
|
|
||||||
|
|
||||||
_asm_extable 8888b,\l;
|
_asm_extable 8888b,\l;
|
||||||
_asm_extable 8889b,\l;
|
_asm_extable 8889b,\l;
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro uao_stp l, reg1, reg2, addr, post_inc
|
.macro uao_stp l, reg1, reg2, addr, post_inc
|
||||||
alternative_if_not ARM64_HAS_UAO
|
8888: sttr \reg1, [\addr];
|
||||||
8888: stp \reg1, \reg2, [\addr], \post_inc;
|
8889: sttr \reg2, [\addr, #8];
|
||||||
8889: nop;
|
add \addr, \addr, \post_inc;
|
||||||
nop;
|
|
||||||
alternative_else
|
|
||||||
sttr \reg1, [\addr];
|
|
||||||
sttr \reg2, [\addr, #8];
|
|
||||||
add \addr, \addr, \post_inc;
|
|
||||||
alternative_endif
|
|
||||||
|
|
||||||
_asm_extable 8888b,\l;
|
_asm_extable 8888b,\l;
|
||||||
_asm_extable 8889b,\l;
|
_asm_extable 8889b,\l;
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
|
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
|
||||||
alternative_if_not ARM64_HAS_UAO
|
8888: \alt_inst \reg, [\addr];
|
||||||
8888: \inst \reg, [\addr], \post_inc;
|
add \addr, \addr, \post_inc;
|
||||||
nop;
|
|
||||||
alternative_else
|
|
||||||
\alt_inst \reg, [\addr];
|
|
||||||
add \addr, \addr, \post_inc;
|
|
||||||
alternative_endif
|
|
||||||
|
|
||||||
_asm_extable 8888b,\l;
|
_asm_extable 8888b,\l;
|
||||||
.endm
|
.endm
|
||||||
#else
|
|
||||||
.macro uao_ldp l, reg1, reg2, addr, post_inc
|
|
||||||
USER(\l, ldp \reg1, \reg2, [\addr], \post_inc)
|
|
||||||
.endm
|
|
||||||
.macro uao_stp l, reg1, reg2, addr, post_inc
|
|
||||||
USER(\l, stp \reg1, \reg2, [\addr], \post_inc)
|
|
||||||
.endm
|
|
||||||
.macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc
|
|
||||||
USER(\l, \inst \reg, [\addr], \post_inc)
|
|
||||||
.endm
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -70,6 +70,7 @@
|
|||||||
#define ARM64_HAS_LDAPR 59
|
#define ARM64_HAS_LDAPR 59
|
||||||
#define ARM64_KVM_PROTECTED_MODE 60
|
#define ARM64_KVM_PROTECTED_MODE 60
|
||||||
|
|
||||||
#define ARM64_NCAPS 61
|
/* kabi: reserve 62 - 76 for future cpu capabilities */
|
||||||
|
#define ARM64_NCAPS 76
|
||||||
|
|
||||||
#endif /* __ASM_CPUCAPS_H */
|
#endif /* __ASM_CPUCAPS_H */
|
||||||
|
@@ -16,7 +16,7 @@
|
|||||||
do { \
|
do { \
|
||||||
unsigned int loops = FUTEX_MAX_LOOPS; \
|
unsigned int loops = FUTEX_MAX_LOOPS; \
|
||||||
\
|
\
|
||||||
uaccess_enable(); \
|
uaccess_enable_privileged(); \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
" prfm pstl1strm, %2\n" \
|
" prfm pstl1strm, %2\n" \
|
||||||
"1: ldxr %w1, %2\n" \
|
"1: ldxr %w1, %2\n" \
|
||||||
@@ -39,7 +39,7 @@ do { \
|
|||||||
"+r" (loops) \
|
"+r" (loops) \
|
||||||
: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
|
: "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN) \
|
||||||
: "memory"); \
|
: "memory"); \
|
||||||
uaccess_disable(); \
|
uaccess_disable_privileged(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
@@ -95,7 +95,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
uaddr = __uaccess_mask_ptr(_uaddr);
|
uaddr = __uaccess_mask_ptr(_uaddr);
|
||||||
uaccess_enable();
|
uaccess_enable_privileged();
|
||||||
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
|
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
|
||||||
" prfm pstl1strm, %2\n"
|
" prfm pstl1strm, %2\n"
|
||||||
"1: ldxr %w1, %2\n"
|
"1: ldxr %w1, %2\n"
|
||||||
@@ -118,7 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
|||||||
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
|
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
|
||||||
: "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
|
: "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
|
||||||
: "memory");
|
: "memory");
|
||||||
uaccess_disable();
|
uaccess_disable_privileged();
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
*uval = val;
|
*uval = val;
|
||||||
|
@@ -232,7 +232,8 @@ static inline const void *__tag_set(const void *addr, u8 tag)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KASAN_HW_TAGS
|
#ifdef CONFIG_KASAN_HW_TAGS
|
||||||
#define arch_enable_tagging() mte_enable_kernel()
|
#define arch_enable_tagging_sync() mte_enable_kernel_sync()
|
||||||
|
#define arch_enable_tagging_async() mte_enable_kernel_async()
|
||||||
#define arch_set_tagging_report_once(state) mte_set_report_once(state)
|
#define arch_set_tagging_report_once(state) mte_set_report_once(state)
|
||||||
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
|
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
|
||||||
#define arch_get_random_tag() mte_get_random_tag()
|
#define arch_get_random_tag() mte_get_random_tag()
|
||||||
|
@@ -87,7 +87,8 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void mte_enable_kernel(void);
|
void mte_enable_kernel_sync(void);
|
||||||
|
void mte_enable_kernel_async(void);
|
||||||
void mte_init_tags(u64 max_tag);
|
void mte_init_tags(u64 max_tag);
|
||||||
|
|
||||||
void mte_set_report_once(bool state);
|
void mte_set_report_once(bool state);
|
||||||
@@ -115,7 +116,11 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size,
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mte_enable_kernel(void)
|
static inline void mte_enable_kernel_sync(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mte_enable_kernel_async(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -24,6 +24,8 @@
|
|||||||
#include <asm/memory.h>
|
#include <asm/memory.h>
|
||||||
#include <asm/extable.h>
|
#include <asm/extable.h>
|
||||||
|
|
||||||
|
#define HAVE_GET_KERNEL_NOFAULT
|
||||||
|
|
||||||
#define get_fs() (current_thread_info()->addr_limit)
|
#define get_fs() (current_thread_info()->addr_limit)
|
||||||
|
|
||||||
static inline void set_fs(mm_segment_t fs)
|
static inline void set_fs(mm_segment_t fs)
|
||||||
@@ -217,18 +219,28 @@ do { \
|
|||||||
* The Tag check override (TCO) bit disables temporarily the tag checking
|
* The Tag check override (TCO) bit disables temporarily the tag checking
|
||||||
* preventing the issue.
|
* preventing the issue.
|
||||||
*/
|
*/
|
||||||
static inline void uaccess_disable(void)
|
static inline void __uaccess_disable_tco(void)
|
||||||
{
|
{
|
||||||
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
|
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
|
||||||
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
|
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __uaccess_enable_tco(void)
|
||||||
|
{
|
||||||
|
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
|
||||||
|
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void uaccess_disable_privileged(void)
|
||||||
|
{
|
||||||
|
__uaccess_disable_tco();
|
||||||
|
|
||||||
__uaccess_disable(ARM64_HAS_PAN);
|
__uaccess_disable(ARM64_HAS_PAN);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void uaccess_enable(void)
|
static inline void uaccess_enable_privileged(void)
|
||||||
{
|
{
|
||||||
asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
|
__uaccess_enable_tco();
|
||||||
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
|
|
||||||
|
|
||||||
__uaccess_enable(ARM64_HAS_PAN);
|
__uaccess_enable(ARM64_HAS_PAN);
|
||||||
}
|
}
|
||||||
@@ -276,10 +288,9 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
|||||||
* The "__xxx_error" versions set the third argument to -EFAULT if an error
|
* The "__xxx_error" versions set the third argument to -EFAULT if an error
|
||||||
* occurs, and leave it unchanged on success.
|
* occurs, and leave it unchanged on success.
|
||||||
*/
|
*/
|
||||||
#define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
#define __get_mem_asm(load, reg, x, addr, err) \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
"1: " load " " reg "1, [%2]\n" \
|
||||||
alt_instr " " reg "1, [%2]\n", feature) \
|
|
||||||
"2:\n" \
|
"2:\n" \
|
||||||
" .section .fixup, \"ax\"\n" \
|
" .section .fixup, \"ax\"\n" \
|
||||||
" .align 2\n" \
|
" .align 2\n" \
|
||||||
@@ -291,35 +302,36 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
|||||||
: "+r" (err), "=&r" (x) \
|
: "+r" (err), "=&r" (x) \
|
||||||
: "r" (addr), "i" (-EFAULT))
|
: "r" (addr), "i" (-EFAULT))
|
||||||
|
|
||||||
#define __raw_get_user(x, ptr, err) \
|
#define __raw_get_mem(ldr, x, ptr, err) \
|
||||||
do { \
|
do { \
|
||||||
unsigned long __gu_val; \
|
unsigned long __gu_val; \
|
||||||
__chk_user_ptr(ptr); \
|
|
||||||
uaccess_enable_not_uao(); \
|
|
||||||
switch (sizeof(*(ptr))) { \
|
switch (sizeof(*(ptr))) { \
|
||||||
case 1: \
|
case 1: \
|
||||||
__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
|
__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
|
||||||
(err), ARM64_HAS_UAO); \
|
|
||||||
break; \
|
break; \
|
||||||
case 2: \
|
case 2: \
|
||||||
__get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
|
__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
|
||||||
(err), ARM64_HAS_UAO); \
|
|
||||||
break; \
|
break; \
|
||||||
case 4: \
|
case 4: \
|
||||||
__get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
|
__get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
|
||||||
(err), ARM64_HAS_UAO); \
|
|
||||||
break; \
|
break; \
|
||||||
case 8: \
|
case 8: \
|
||||||
__get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
|
__get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
|
||||||
(err), ARM64_HAS_UAO); \
|
|
||||||
break; \
|
break; \
|
||||||
default: \
|
default: \
|
||||||
BUILD_BUG(); \
|
BUILD_BUG(); \
|
||||||
} \
|
} \
|
||||||
uaccess_disable_not_uao(); \
|
|
||||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define __raw_get_user(x, ptr, err) \
|
||||||
|
do { \
|
||||||
|
__chk_user_ptr(ptr); \
|
||||||
|
uaccess_enable_not_uao(); \
|
||||||
|
__raw_get_mem("ldtr", x, ptr, err); \
|
||||||
|
uaccess_disable_not_uao(); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#define __get_user_error(x, ptr, err) \
|
#define __get_user_error(x, ptr, err) \
|
||||||
do { \
|
do { \
|
||||||
__typeof__(*(ptr)) __user *__p = (ptr); \
|
__typeof__(*(ptr)) __user *__p = (ptr); \
|
||||||
@@ -341,10 +353,19 @@ do { \
|
|||||||
|
|
||||||
#define get_user __get_user
|
#define get_user __get_user
|
||||||
|
|
||||||
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
|
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||||
|
do { \
|
||||||
|
int __gkn_err = 0; \
|
||||||
|
\
|
||||||
|
__raw_get_mem("ldr", *((type *)(dst)), \
|
||||||
|
(__force type *)(src), __gkn_err); \
|
||||||
|
if (unlikely(__gkn_err)) \
|
||||||
|
goto err_label; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define __put_mem_asm(store, reg, x, addr, err) \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
|
"1: " store " " reg "1, [%2]\n" \
|
||||||
alt_instr " " reg "1, [%2]\n", feature) \
|
|
||||||
"2:\n" \
|
"2:\n" \
|
||||||
" .section .fixup,\"ax\"\n" \
|
" .section .fixup,\"ax\"\n" \
|
||||||
" .align 2\n" \
|
" .align 2\n" \
|
||||||
@@ -355,31 +376,32 @@ do { \
|
|||||||
: "+r" (err) \
|
: "+r" (err) \
|
||||||
: "r" (x), "r" (addr), "i" (-EFAULT))
|
: "r" (x), "r" (addr), "i" (-EFAULT))
|
||||||
|
|
||||||
#define __raw_put_user(x, ptr, err) \
|
#define __raw_put_mem(str, x, ptr, err) \
|
||||||
do { \
|
do { \
|
||||||
__typeof__(*(ptr)) __pu_val = (x); \
|
__typeof__(*(ptr)) __pu_val = (x); \
|
||||||
__chk_user_ptr(ptr); \
|
|
||||||
uaccess_enable_not_uao(); \
|
|
||||||
switch (sizeof(*(ptr))) { \
|
switch (sizeof(*(ptr))) { \
|
||||||
case 1: \
|
case 1: \
|
||||||
__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
|
__put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
|
||||||
(err), ARM64_HAS_UAO); \
|
|
||||||
break; \
|
break; \
|
||||||
case 2: \
|
case 2: \
|
||||||
__put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
|
__put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
|
||||||
(err), ARM64_HAS_UAO); \
|
|
||||||
break; \
|
break; \
|
||||||
case 4: \
|
case 4: \
|
||||||
__put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
|
__put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
|
||||||
(err), ARM64_HAS_UAO); \
|
|
||||||
break; \
|
break; \
|
||||||
case 8: \
|
case 8: \
|
||||||
__put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
|
__put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
|
||||||
(err), ARM64_HAS_UAO); \
|
|
||||||
break; \
|
break; \
|
||||||
default: \
|
default: \
|
||||||
BUILD_BUG(); \
|
BUILD_BUG(); \
|
||||||
} \
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define __raw_put_user(x, ptr, err) \
|
||||||
|
do { \
|
||||||
|
__chk_user_ptr(ptr); \
|
||||||
|
uaccess_enable_not_uao(); \
|
||||||
|
__raw_put_mem("sttr", x, ptr, err); \
|
||||||
uaccess_disable_not_uao(); \
|
uaccess_disable_not_uao(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
@@ -404,6 +426,16 @@ do { \
|
|||||||
|
|
||||||
#define put_user __put_user
|
#define put_user __put_user
|
||||||
|
|
||||||
|
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||||
|
do { \
|
||||||
|
int __pkn_err = 0; \
|
||||||
|
\
|
||||||
|
__raw_put_mem("str", *((type *)(src)), \
|
||||||
|
(__force type *)(dst), __pkn_err); \
|
||||||
|
if (unlikely(__pkn_err)) \
|
||||||
|
goto err_label; \
|
||||||
|
} while(0)
|
||||||
|
|
||||||
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||||
#define raw_copy_from_user(to, from, n) \
|
#define raw_copy_from_user(to, from, n) \
|
||||||
({ \
|
({ \
|
||||||
|
@@ -277,7 +277,7 @@ static void __init register_insn_emulation_sysctl(void)
|
|||||||
|
|
||||||
#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
|
#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
|
||||||
do { \
|
do { \
|
||||||
uaccess_enable(); \
|
uaccess_enable_privileged(); \
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
" mov %w3, %w7\n" \
|
" mov %w3, %w7\n" \
|
||||||
"0: ldxr"B" %w2, [%4]\n" \
|
"0: ldxr"B" %w2, [%4]\n" \
|
||||||
@@ -302,7 +302,7 @@ do { \
|
|||||||
"i" (-EFAULT), \
|
"i" (-EFAULT), \
|
||||||
"i" (__SWP_LL_SC_LOOPS) \
|
"i" (__SWP_LL_SC_LOOPS) \
|
||||||
: "memory"); \
|
: "memory"); \
|
||||||
uaccess_disable(); \
|
uaccess_disable_privileged(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __user_swp_asm(data, addr, res, temp, temp2) \
|
#define __user_swp_asm(data, addr, res, temp, temp2) \
|
||||||
|
@@ -148,16 +148,18 @@ alternative_cb_end
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* Check for MTE asynchronous tag check faults */
|
/* Check for MTE asynchronous tag check faults */
|
||||||
.macro check_mte_async_tcf, flgs, tmp
|
.macro check_mte_async_tcf, tmp, ti_flags
|
||||||
#ifdef CONFIG_ARM64_MTE
|
#ifdef CONFIG_ARM64_MTE
|
||||||
|
.arch_extension lse
|
||||||
alternative_if_not ARM64_MTE
|
alternative_if_not ARM64_MTE
|
||||||
b 1f
|
b 1f
|
||||||
alternative_else_nop_endif
|
alternative_else_nop_endif
|
||||||
mrs_s \tmp, SYS_TFSRE0_EL1
|
mrs_s \tmp, SYS_TFSRE0_EL1
|
||||||
tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
|
tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
|
||||||
/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
|
/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
|
||||||
orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
|
mov \tmp, #_TIF_MTE_ASYNC_FAULT
|
||||||
str \flgs, [tsk, #TSK_TI_FLAGS]
|
add \ti_flags, tsk, #TSK_TI_FLAGS
|
||||||
|
stset \tmp, [\ti_flags]
|
||||||
msr_s SYS_TFSRE0_EL1, xzr
|
msr_s SYS_TFSRE0_EL1, xzr
|
||||||
1:
|
1:
|
||||||
#endif
|
#endif
|
||||||
@@ -244,7 +246,7 @@ alternative_else_nop_endif
|
|||||||
disable_step_tsk x19, x20
|
disable_step_tsk x19, x20
|
||||||
|
|
||||||
/* Check for asynchronous tag check faults in user space */
|
/* Check for asynchronous tag check faults in user space */
|
||||||
check_mte_async_tcf x19, x22
|
check_mte_async_tcf x22, x23
|
||||||
apply_ssbd 1, x22, x23
|
apply_ssbd 1, x22, x23
|
||||||
|
|
||||||
ptrauth_keys_install_kernel tsk, x20, x22, x23
|
ptrauth_keys_install_kernel tsk, x20, x22, x23
|
||||||
|
@@ -107,11 +107,23 @@ void mte_init_tags(u64 max_tag)
|
|||||||
write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
|
write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mte_enable_kernel(void)
|
static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
|
||||||
{
|
{
|
||||||
/* Enable MTE Sync Mode for EL1. */
|
/* Enable MTE Sync Mode for EL1. */
|
||||||
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
|
sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
|
||||||
isb();
|
isb();
|
||||||
|
|
||||||
|
pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mte_enable_kernel_sync(void)
|
||||||
|
{
|
||||||
|
__mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
|
||||||
|
}
|
||||||
|
|
||||||
|
void mte_enable_kernel_async(void)
|
||||||
|
{
|
||||||
|
__mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mte_set_report_once(bool state)
|
void mte_set_report_once(bool state)
|
||||||
|
@@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m
|
|||||||
CONFIG_SCSI_FC_ATTRS=y
|
CONFIG_SCSI_FC_ATTRS=y
|
||||||
CONFIG_SCSI_SYM53C8XX_2=y
|
CONFIG_SCSI_SYM53C8XX_2=y
|
||||||
CONFIG_SCSI_QLOGIC_1280=y
|
CONFIG_SCSI_QLOGIC_1280=y
|
||||||
CONFIG_ATA=y
|
|
||||||
CONFIG_ATA_PIIX=y
|
|
||||||
CONFIG_SATA_VITESSE=y
|
CONFIG_SATA_VITESSE=y
|
||||||
CONFIG_MD=y
|
CONFIG_MD=y
|
||||||
CONFIG_BLK_DEV_MD=m
|
CONFIG_BLK_DEV_MD=m
|
||||||
|
@@ -144,7 +144,7 @@ config ARCH_FLATMEM_ENABLE
|
|||||||
config ARCH_SPARSEMEM_ENABLE
|
config ARCH_SPARSEMEM_ENABLE
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on MMU
|
depends on MMU
|
||||||
select SPARSEMEM_STATIC if 32BIT && SPARSMEM
|
select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
|
||||||
select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
|
select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
|
||||||
|
|
||||||
config ARCH_SELECT_MEMORY_MODEL
|
config ARCH_SELECT_MEMORY_MODEL
|
||||||
|
@@ -15,6 +15,7 @@ CONFIG_IKCONFIG=y
|
|||||||
CONFIG_IKCONFIG_PROC=y
|
CONFIG_IKCONFIG_PROC=y
|
||||||
CONFIG_IKHEADERS=y
|
CONFIG_IKHEADERS=y
|
||||||
CONFIG_UCLAMP_TASK=y
|
CONFIG_UCLAMP_TASK=y
|
||||||
|
CONFIG_UCLAMP_BUCKETS_COUNT=20
|
||||||
CONFIG_CGROUPS=y
|
CONFIG_CGROUPS=y
|
||||||
CONFIG_BLK_CGROUP=y
|
CONFIG_BLK_CGROUP=y
|
||||||
CONFIG_CGROUP_SCHED=y
|
CONFIG_CGROUP_SCHED=y
|
||||||
|
@@ -1051,9 +1051,6 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
|
|
||||||
cleanup_highmap();
|
cleanup_highmap();
|
||||||
|
|
||||||
/* Look for ACPI tables and reserve memory occupied by them. */
|
|
||||||
acpi_boot_table_init();
|
|
||||||
|
|
||||||
memblock_set_current_limit(ISA_END_ADDRESS);
|
memblock_set_current_limit(ISA_END_ADDRESS);
|
||||||
e820__memblock_setup();
|
e820__memblock_setup();
|
||||||
|
|
||||||
@@ -1132,6 +1129,8 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
reserve_initrd();
|
reserve_initrd();
|
||||||
|
|
||||||
acpi_table_upgrade();
|
acpi_table_upgrade();
|
||||||
|
/* Look for ACPI tables and reserve memory occupied by them. */
|
||||||
|
acpi_boot_table_init();
|
||||||
|
|
||||||
vsmp_init();
|
vsmp_init();
|
||||||
|
|
||||||
|
@@ -3329,7 +3329,11 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
|||||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||||
enum vm_entry_failure_code entry_failure_code;
|
enum vm_entry_failure_code entry_failure_code;
|
||||||
bool evaluate_pending_interrupts;
|
bool evaluate_pending_interrupts;
|
||||||
u32 exit_reason, failed_index;
|
union vmx_exit_reason exit_reason = {
|
||||||
|
.basic = EXIT_REASON_INVALID_STATE,
|
||||||
|
.failed_vmentry = 1,
|
||||||
|
};
|
||||||
|
u32 failed_index;
|
||||||
|
|
||||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
|
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
|
||||||
kvm_vcpu_flush_tlb_current(vcpu);
|
kvm_vcpu_flush_tlb_current(vcpu);
|
||||||
@@ -3381,7 +3385,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
|||||||
|
|
||||||
if (nested_vmx_check_guest_state(vcpu, vmcs12,
|
if (nested_vmx_check_guest_state(vcpu, vmcs12,
|
||||||
&entry_failure_code)) {
|
&entry_failure_code)) {
|
||||||
exit_reason = EXIT_REASON_INVALID_STATE;
|
exit_reason.basic = EXIT_REASON_INVALID_STATE;
|
||||||
vmcs12->exit_qualification = entry_failure_code;
|
vmcs12->exit_qualification = entry_failure_code;
|
||||||
goto vmentry_fail_vmexit;
|
goto vmentry_fail_vmexit;
|
||||||
}
|
}
|
||||||
@@ -3392,7 +3396,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
|||||||
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
|
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
|
||||||
|
|
||||||
if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
|
if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
|
||||||
exit_reason = EXIT_REASON_INVALID_STATE;
|
exit_reason.basic = EXIT_REASON_INVALID_STATE;
|
||||||
vmcs12->exit_qualification = entry_failure_code;
|
vmcs12->exit_qualification = entry_failure_code;
|
||||||
goto vmentry_fail_vmexit_guest_mode;
|
goto vmentry_fail_vmexit_guest_mode;
|
||||||
}
|
}
|
||||||
@@ -3402,7 +3406,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
|
|||||||
vmcs12->vm_entry_msr_load_addr,
|
vmcs12->vm_entry_msr_load_addr,
|
||||||
vmcs12->vm_entry_msr_load_count);
|
vmcs12->vm_entry_msr_load_count);
|
||||||
if (failed_index) {
|
if (failed_index) {
|
||||||
exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
|
exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
|
||||||
vmcs12->exit_qualification = failed_index;
|
vmcs12->exit_qualification = failed_index;
|
||||||
goto vmentry_fail_vmexit_guest_mode;
|
goto vmentry_fail_vmexit_guest_mode;
|
||||||
}
|
}
|
||||||
@@ -3470,7 +3474,7 @@ vmentry_fail_vmexit:
|
|||||||
return NVMX_VMENTRY_VMEXIT;
|
return NVMX_VMENTRY_VMEXIT;
|
||||||
|
|
||||||
load_vmcs12_host_state(vcpu, vmcs12);
|
load_vmcs12_host_state(vcpu, vmcs12);
|
||||||
vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
|
vmcs12->vm_exit_reason = exit_reason.full;
|
||||||
if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
|
if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
|
||||||
vmx->nested.need_vmcs12_to_shadow_sync = true;
|
vmx->nested.need_vmcs12_to_shadow_sync = true;
|
||||||
return NVMX_VMENTRY_VMEXIT;
|
return NVMX_VMENTRY_VMEXIT;
|
||||||
@@ -5533,7 +5537,12 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
|
|||||||
return kvm_skip_emulated_instruction(vcpu);
|
return kvm_skip_emulated_instruction(vcpu);
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
nested_vmx_vmexit(vcpu, vmx->exit_reason,
|
/*
|
||||||
|
* This is effectively a reflected VM-Exit, as opposed to a synthesized
|
||||||
|
* nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
|
||||||
|
* EXIT_REASON_VMFUNC as the exit reason.
|
||||||
|
*/
|
||||||
|
nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
|
||||||
vmx_get_intr_info(vcpu),
|
vmx_get_intr_info(vcpu),
|
||||||
vmx_get_exit_qual(vcpu));
|
vmx_get_exit_qual(vcpu));
|
||||||
return 1;
|
return 1;
|
||||||
@@ -5601,7 +5610,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
|
|||||||
* MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
|
* MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
|
||||||
*/
|
*/
|
||||||
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
|
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
|
||||||
struct vmcs12 *vmcs12, u32 exit_reason)
|
struct vmcs12 *vmcs12,
|
||||||
|
union vmx_exit_reason exit_reason)
|
||||||
{
|
{
|
||||||
u32 msr_index = kvm_rcx_read(vcpu);
|
u32 msr_index = kvm_rcx_read(vcpu);
|
||||||
gpa_t bitmap;
|
gpa_t bitmap;
|
||||||
@@ -5615,7 +5625,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
|
|||||||
* First we need to figure out which of the four to use:
|
* First we need to figure out which of the four to use:
|
||||||
*/
|
*/
|
||||||
bitmap = vmcs12->msr_bitmap;
|
bitmap = vmcs12->msr_bitmap;
|
||||||
if (exit_reason == EXIT_REASON_MSR_WRITE)
|
if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
|
||||||
bitmap += 2048;
|
bitmap += 2048;
|
||||||
if (msr_index >= 0xc0000000) {
|
if (msr_index >= 0xc0000000) {
|
||||||
msr_index -= 0xc0000000;
|
msr_index -= 0xc0000000;
|
||||||
@@ -5752,11 +5762,12 @@ static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
|
|||||||
* Return true if L0 wants to handle an exit from L2 regardless of whether or not
|
* Return true if L0 wants to handle an exit from L2 regardless of whether or not
|
||||||
* L1 wants the exit. Only call this when in is_guest_mode (L2).
|
* L1 wants the exit. Only call this when in is_guest_mode (L2).
|
||||||
*/
|
*/
|
||||||
static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
|
static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
|
||||||
|
union vmx_exit_reason exit_reason)
|
||||||
{
|
{
|
||||||
u32 intr_info;
|
u32 intr_info;
|
||||||
|
|
||||||
switch ((u16)exit_reason) {
|
switch ((u16)exit_reason.basic) {
|
||||||
case EXIT_REASON_EXCEPTION_NMI:
|
case EXIT_REASON_EXCEPTION_NMI:
|
||||||
intr_info = vmx_get_intr_info(vcpu);
|
intr_info = vmx_get_intr_info(vcpu);
|
||||||
if (is_nmi(intr_info))
|
if (is_nmi(intr_info))
|
||||||
@@ -5812,12 +5823,13 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
|
|||||||
* Return 1 if L1 wants to intercept an exit from L2. Only call this when in
|
* Return 1 if L1 wants to intercept an exit from L2. Only call this when in
|
||||||
* is_guest_mode (L2).
|
* is_guest_mode (L2).
|
||||||
*/
|
*/
|
||||||
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
|
static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
|
||||||
|
union vmx_exit_reason exit_reason)
|
||||||
{
|
{
|
||||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||||
u32 intr_info;
|
u32 intr_info;
|
||||||
|
|
||||||
switch ((u16)exit_reason) {
|
switch ((u16)exit_reason.basic) {
|
||||||
case EXIT_REASON_EXCEPTION_NMI:
|
case EXIT_REASON_EXCEPTION_NMI:
|
||||||
intr_info = vmx_get_intr_info(vcpu);
|
intr_info = vmx_get_intr_info(vcpu);
|
||||||
if (is_nmi(intr_info))
|
if (is_nmi(intr_info))
|
||||||
@@ -5936,7 +5948,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu, u32 exit_reason)
|
|||||||
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
|
bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
u32 exit_reason = vmx->exit_reason;
|
union vmx_exit_reason exit_reason = vmx->exit_reason;
|
||||||
unsigned long exit_qual;
|
unsigned long exit_qual;
|
||||||
u32 exit_intr_info;
|
u32 exit_intr_info;
|
||||||
|
|
||||||
@@ -5955,7 +5967,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
|
|||||||
goto reflect_vmexit;
|
goto reflect_vmexit;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_kvm_nested_vmexit(exit_reason, vcpu, KVM_ISA_VMX);
|
trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX);
|
||||||
|
|
||||||
/* If L0 (KVM) wants the exit, it trumps L1's desires. */
|
/* If L0 (KVM) wants the exit, it trumps L1's desires. */
|
||||||
if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
|
if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
|
||||||
@@ -5981,7 +5993,7 @@ bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
|
|||||||
exit_qual = vmx_get_exit_qual(vcpu);
|
exit_qual = vmx_get_exit_qual(vcpu);
|
||||||
|
|
||||||
reflect_vmexit:
|
reflect_vmexit:
|
||||||
nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, exit_qual);
|
nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1578,7 +1578,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
|
|||||||
* i.e. we end up advancing IP with some random value.
|
* i.e. we end up advancing IP with some random value.
|
||||||
*/
|
*/
|
||||||
if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
|
if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
|
||||||
to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
|
to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
|
||||||
orig_rip = kvm_rip_read(vcpu);
|
orig_rip = kvm_rip_read(vcpu);
|
||||||
rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
|
rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
@@ -5687,7 +5687,7 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
|
|||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
|
||||||
*info1 = vmx_get_exit_qual(vcpu);
|
*info1 = vmx_get_exit_qual(vcpu);
|
||||||
if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
|
if (!(vmx->exit_reason.failed_vmentry)) {
|
||||||
*info2 = vmx->idt_vectoring_info;
|
*info2 = vmx->idt_vectoring_info;
|
||||||
*intr_info = vmx_get_intr_info(vcpu);
|
*intr_info = vmx_get_intr_info(vcpu);
|
||||||
if (is_exception_with_error_code(*intr_info))
|
if (is_exception_with_error_code(*intr_info))
|
||||||
@@ -5931,8 +5931,9 @@ void dump_vmcs(void)
|
|||||||
static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
u32 exit_reason = vmx->exit_reason;
|
union vmx_exit_reason exit_reason = vmx->exit_reason;
|
||||||
u32 vectoring_info = vmx->idt_vectoring_info;
|
u32 vectoring_info = vmx->idt_vectoring_info;
|
||||||
|
u16 exit_handler_index;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
|
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
|
||||||
@@ -5974,11 +5975,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
|
if (exit_reason.failed_vmentry) {
|
||||||
dump_vmcs();
|
dump_vmcs();
|
||||||
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
|
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
|
||||||
vcpu->run->fail_entry.hardware_entry_failure_reason
|
vcpu->run->fail_entry.hardware_entry_failure_reason
|
||||||
= exit_reason;
|
= exit_reason.full;
|
||||||
vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
|
vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -6000,24 +6001,24 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
|||||||
* will cause infinite loop.
|
* will cause infinite loop.
|
||||||
*/
|
*/
|
||||||
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
|
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
|
||||||
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
|
(exit_reason.basic != EXIT_REASON_EXCEPTION_NMI &&
|
||||||
exit_reason != EXIT_REASON_EPT_VIOLATION &&
|
exit_reason.basic != EXIT_REASON_EPT_VIOLATION &&
|
||||||
exit_reason != EXIT_REASON_PML_FULL &&
|
exit_reason.basic != EXIT_REASON_PML_FULL &&
|
||||||
exit_reason != EXIT_REASON_APIC_ACCESS &&
|
exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
|
||||||
exit_reason != EXIT_REASON_TASK_SWITCH)) {
|
exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
|
||||||
|
int ndata = 3;
|
||||||
|
|
||||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||||
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
|
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
|
||||||
vcpu->run->internal.ndata = 3;
|
|
||||||
vcpu->run->internal.data[0] = vectoring_info;
|
vcpu->run->internal.data[0] = vectoring_info;
|
||||||
vcpu->run->internal.data[1] = exit_reason;
|
vcpu->run->internal.data[1] = exit_reason.full;
|
||||||
vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
|
vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
|
||||||
if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
|
if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
|
||||||
vcpu->run->internal.ndata++;
|
vcpu->run->internal.data[ndata++] =
|
||||||
vcpu->run->internal.data[3] =
|
|
||||||
vmcs_read64(GUEST_PHYSICAL_ADDRESS);
|
vmcs_read64(GUEST_PHYSICAL_ADDRESS);
|
||||||
}
|
}
|
||||||
vcpu->run->internal.data[vcpu->run->internal.ndata++] =
|
vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
|
||||||
vcpu->arch.last_vmentry_cpu;
|
vcpu->run->internal.ndata = ndata;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -6043,38 +6044,39 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
|||||||
if (exit_fastpath != EXIT_FASTPATH_NONE)
|
if (exit_fastpath != EXIT_FASTPATH_NONE)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
if (exit_reason >= kvm_vmx_max_exit_handlers)
|
if (exit_reason.basic >= kvm_vmx_max_exit_handlers)
|
||||||
goto unexpected_vmexit;
|
goto unexpected_vmexit;
|
||||||
#ifdef CONFIG_RETPOLINE
|
#ifdef CONFIG_RETPOLINE
|
||||||
if (exit_reason == EXIT_REASON_MSR_WRITE)
|
if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
|
||||||
return kvm_emulate_wrmsr(vcpu);
|
return kvm_emulate_wrmsr(vcpu);
|
||||||
else if (exit_reason == EXIT_REASON_PREEMPTION_TIMER)
|
else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
|
||||||
return handle_preemption_timer(vcpu);
|
return handle_preemption_timer(vcpu);
|
||||||
else if (exit_reason == EXIT_REASON_INTERRUPT_WINDOW)
|
else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
|
||||||
return handle_interrupt_window(vcpu);
|
return handle_interrupt_window(vcpu);
|
||||||
else if (exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
|
else if (exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
|
||||||
return handle_external_interrupt(vcpu);
|
return handle_external_interrupt(vcpu);
|
||||||
else if (exit_reason == EXIT_REASON_HLT)
|
else if (exit_reason.basic == EXIT_REASON_HLT)
|
||||||
return kvm_emulate_halt(vcpu);
|
return kvm_emulate_halt(vcpu);
|
||||||
else if (exit_reason == EXIT_REASON_EPT_MISCONFIG)
|
else if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG)
|
||||||
return handle_ept_misconfig(vcpu);
|
return handle_ept_misconfig(vcpu);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
exit_reason = array_index_nospec(exit_reason,
|
exit_handler_index = array_index_nospec((u16)exit_reason.basic,
|
||||||
kvm_vmx_max_exit_handlers);
|
kvm_vmx_max_exit_handlers);
|
||||||
if (!kvm_vmx_exit_handlers[exit_reason])
|
if (!kvm_vmx_exit_handlers[exit_handler_index])
|
||||||
goto unexpected_vmexit;
|
goto unexpected_vmexit;
|
||||||
|
|
||||||
return kvm_vmx_exit_handlers[exit_reason](vcpu);
|
return kvm_vmx_exit_handlers[exit_handler_index](vcpu);
|
||||||
|
|
||||||
unexpected_vmexit:
|
unexpected_vmexit:
|
||||||
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason);
|
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
|
||||||
|
exit_reason.full);
|
||||||
dump_vmcs();
|
dump_vmcs();
|
||||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||||
vcpu->run->internal.suberror =
|
vcpu->run->internal.suberror =
|
||||||
KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
|
KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
|
||||||
vcpu->run->internal.ndata = 2;
|
vcpu->run->internal.ndata = 2;
|
||||||
vcpu->run->internal.data[0] = exit_reason;
|
vcpu->run->internal.data[0] = exit_reason.full;
|
||||||
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
|
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -6393,9 +6395,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
|
|||||||
{
|
{
|
||||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||||
|
|
||||||
if (vmx->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
|
if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
|
||||||
handle_external_interrupt_irqoff(vcpu);
|
handle_external_interrupt_irqoff(vcpu);
|
||||||
else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)
|
else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
|
||||||
handle_exception_nmi_irqoff(vmx);
|
handle_exception_nmi_irqoff(vmx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -6583,7 +6585,7 @@ void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
|
|||||||
|
|
||||||
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
switch (to_vmx(vcpu)->exit_reason) {
|
switch (to_vmx(vcpu)->exit_reason.basic) {
|
||||||
case EXIT_REASON_MSR_WRITE:
|
case EXIT_REASON_MSR_WRITE:
|
||||||
return handle_fastpath_set_msr_irqoff(vcpu);
|
return handle_fastpath_set_msr_irqoff(vcpu);
|
||||||
case EXIT_REASON_PREEMPTION_TIMER:
|
case EXIT_REASON_PREEMPTION_TIMER:
|
||||||
@@ -6782,17 +6784,17 @@ reenter_guest:
|
|||||||
vmx->idt_vectoring_info = 0;
|
vmx->idt_vectoring_info = 0;
|
||||||
|
|
||||||
if (unlikely(vmx->fail)) {
|
if (unlikely(vmx->fail)) {
|
||||||
vmx->exit_reason = 0xdead;
|
vmx->exit_reason.full = 0xdead;
|
||||||
return EXIT_FASTPATH_NONE;
|
return EXIT_FASTPATH_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
|
vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
|
||||||
if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY))
|
if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
|
||||||
kvm_machine_check();
|
kvm_machine_check();
|
||||||
|
|
||||||
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
|
trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
|
||||||
|
|
||||||
if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
|
if (unlikely(vmx->exit_reason.failed_vmentry))
|
||||||
return EXIT_FASTPATH_NONE;
|
return EXIT_FASTPATH_NONE;
|
||||||
|
|
||||||
vmx->loaded_vmcs->launched = 1;
|
vmx->loaded_vmcs->launched = 1;
|
||||||
|
@@ -70,6 +70,29 @@ struct pt_desc {
|
|||||||
struct pt_ctx guest;
|
struct pt_ctx guest;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
union vmx_exit_reason {
|
||||||
|
struct {
|
||||||
|
u32 basic : 16;
|
||||||
|
u32 reserved16 : 1;
|
||||||
|
u32 reserved17 : 1;
|
||||||
|
u32 reserved18 : 1;
|
||||||
|
u32 reserved19 : 1;
|
||||||
|
u32 reserved20 : 1;
|
||||||
|
u32 reserved21 : 1;
|
||||||
|
u32 reserved22 : 1;
|
||||||
|
u32 reserved23 : 1;
|
||||||
|
u32 reserved24 : 1;
|
||||||
|
u32 reserved25 : 1;
|
||||||
|
u32 reserved26 : 1;
|
||||||
|
u32 enclave_mode : 1;
|
||||||
|
u32 smi_pending_mtf : 1;
|
||||||
|
u32 smi_from_vmx_root : 1;
|
||||||
|
u32 reserved30 : 1;
|
||||||
|
u32 failed_vmentry : 1;
|
||||||
|
};
|
||||||
|
u32 full;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
|
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
|
||||||
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
|
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
|
||||||
@@ -244,7 +267,7 @@ struct vcpu_vmx {
|
|||||||
int vpid;
|
int vpid;
|
||||||
bool emulation_required;
|
bool emulation_required;
|
||||||
|
|
||||||
u32 exit_reason;
|
union vmx_exit_reason exit_reason;
|
||||||
|
|
||||||
/* Posted interrupt descriptor */
|
/* Posted interrupt descriptor */
|
||||||
struct pi_desc pi_desc;
|
struct pi_desc pi_desc;
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
BRANCH=android12-5.10
|
BRANCH=android12-5.10
|
||||||
KMI_GENERATION=1
|
KMI_GENERATION=2
|
||||||
|
|
||||||
LLVM=1
|
LLVM=1
|
||||||
DEPMOD=depmod
|
DEPMOD=depmod
|
||||||
|
@@ -52,6 +52,7 @@
|
|||||||
#include <trace/hooks/signal.h>
|
#include <trace/hooks/signal.h>
|
||||||
#include <trace/hooks/logbuf.h>
|
#include <trace/hooks/logbuf.h>
|
||||||
#include <trace/hooks/vmscan.h>
|
#include <trace/hooks/vmscan.h>
|
||||||
|
#include <trace/hooks/psi.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
||||||
@@ -243,3 +244,18 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpm_adj_current_limit);
|
|||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_logbuf);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_cpus_allowed_comm);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_setaffinity_early);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_task);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_after_enqueue_task);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_after_dequeue_task);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_entity);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_entity);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_entity_tick);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_task_fair);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task_fair);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_prepare_update_load_avg_se);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_finish_update_load_avg_se);
|
||||||
|
@@ -429,40 +429,29 @@ EXPORT_SYMBOL_GPL(dev_pm_genpd_set_performance_state);
|
|||||||
* @dev: Device to handle
|
* @dev: Device to handle
|
||||||
* @next: impending interrupt/wakeup for the device
|
* @next: impending interrupt/wakeup for the device
|
||||||
*
|
*
|
||||||
* Allow devices to inform of the next wakeup. But, if the domain were already
|
*
|
||||||
* powered off, we will not wakeup the domain to recompute it's idle duration.
|
* Allow devices to inform of the next wakeup. It's assumed that the users
|
||||||
|
* guarantee that the genpd wouldn't be detached while this routine is getting
|
||||||
|
* called. Additionally, it's also assumed that @dev isn't runtime suspended
|
||||||
|
* (RPM_SUSPENDED)."
|
||||||
* Although devices are expected to update the next_wakeup after the end of
|
* Although devices are expected to update the next_wakeup after the end of
|
||||||
* their usecase as well, it is possible the devices themselves may not know
|
* their usecase as well, it is possible the devices themselves may not know
|
||||||
* about that. Stale @next will be ignored when powering off the domain.
|
* about that, so stale @next will be ignored when powering off the domain.
|
||||||
*/
|
*/
|
||||||
int dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
|
void dev_pm_genpd_set_next_wakeup(struct device *dev, ktime_t next)
|
||||||
{
|
{
|
||||||
struct generic_pm_domain *genpd;
|
|
||||||
struct generic_pm_domain_data *gpd_data;
|
struct generic_pm_domain_data *gpd_data;
|
||||||
int ret = -EINVAL;
|
struct generic_pm_domain *genpd;
|
||||||
|
|
||||||
genpd = dev_to_genpd_safe(dev);
|
genpd = dev_to_genpd_safe(dev);
|
||||||
if (!genpd)
|
if (!genpd)
|
||||||
return -ENODEV;
|
return;
|
||||||
|
|
||||||
if (WARN_ON(!dev->power.subsys_data ||
|
|
||||||
!dev->power.subsys_data->domain_data))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
genpd_lock(genpd);
|
|
||||||
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
|
gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
|
||||||
if (ktime_before(ktime_get(), next)) {
|
gpd_data->next_wakeup = next;
|
||||||
gpd_data->next_wakeup = next;
|
|
||||||
genpd->flags |= GENPD_FLAG_GOV_NEXT_WAKEUP;
|
|
||||||
ret = 0;
|
|
||||||
}
|
|
||||||
genpd_unlock(genpd);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
|
EXPORT_SYMBOL_GPL(dev_pm_genpd_set_next_wakeup);
|
||||||
|
|
||||||
|
|
||||||
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
|
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
|
||||||
{
|
{
|
||||||
unsigned int state_idx = genpd->state_idx;
|
unsigned int state_idx = genpd->state_idx;
|
||||||
@@ -1804,24 +1793,6 @@ int dev_pm_genpd_remove_notifier(struct device *dev)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
|
EXPORT_SYMBOL_GPL(dev_pm_genpd_remove_notifier);
|
||||||
|
|
||||||
/**
|
|
||||||
* genpd_enable_next_wakeup - Enable genpd gov to use next_wakeup
|
|
||||||
*
|
|
||||||
* @genpd: The genpd to be updated
|
|
||||||
* @enable: Enable/disable genpd gov to use next wakeup
|
|
||||||
*/
|
|
||||||
void genpd_enable_next_wakeup(struct generic_pm_domain *genpd, bool enable)
|
|
||||||
{
|
|
||||||
genpd_lock(genpd);
|
|
||||||
if (enable)
|
|
||||||
genpd->flags |= GENPD_FLAG_GOV_NEXT_WAKEUP;
|
|
||||||
else
|
|
||||||
genpd->flags &= ~GENPD_FLAG_GOV_NEXT_WAKEUP;
|
|
||||||
genpd->next_wakeup = KTIME_MAX;
|
|
||||||
genpd_unlock(genpd);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(genpd_enable_next_wakeup);
|
|
||||||
|
|
||||||
static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|
static int genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||||
struct generic_pm_domain *subdomain)
|
struct generic_pm_domain *subdomain)
|
||||||
{
|
{
|
||||||
|
@@ -126,7 +126,7 @@ static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t n
|
|||||||
struct pm_domain_data *pdd;
|
struct pm_domain_data *pdd;
|
||||||
struct gpd_link *link;
|
struct gpd_link *link;
|
||||||
|
|
||||||
if (!genpd_may_use_next_wakeup(genpd))
|
if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -275,7 +275,7 @@ static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
|
|||||||
* cannot be met.
|
* cannot be met.
|
||||||
*/
|
*/
|
||||||
update_domain_next_wakeup(genpd, now);
|
update_domain_next_wakeup(genpd, now);
|
||||||
if (genpd->next_wakeup != KTIME_MAX) {
|
if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) {
|
||||||
/* Let's find out the deepest domain idle state, the devices prefer */
|
/* Let's find out the deepest domain idle state, the devices prefer */
|
||||||
while (state_idx >= 0) {
|
while (state_idx >= 0) {
|
||||||
if (next_wakeup_allows_state(genpd, state_idx, now)) {
|
if (next_wakeup_allows_state(genpd, state_idx, now)) {
|
||||||
|
@@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
|
|||||||
kfree(chan->dev);
|
kfree(chan->dev);
|
||||||
err_free_local:
|
err_free_local:
|
||||||
free_percpu(chan->local);
|
free_percpu(chan->local);
|
||||||
|
chan->local = NULL;
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -10,6 +10,7 @@ config DW_DMAC_CORE
|
|||||||
|
|
||||||
config DW_DMAC
|
config DW_DMAC
|
||||||
tristate "Synopsys DesignWare AHB DMA platform driver"
|
tristate "Synopsys DesignWare AHB DMA platform driver"
|
||||||
|
depends on HAS_IOMEM
|
||||||
select DW_DMAC_CORE
|
select DW_DMAC_CORE
|
||||||
help
|
help
|
||||||
Support the Synopsys DesignWare AHB DMA controller. This
|
Support the Synopsys DesignWare AHB DMA controller. This
|
||||||
@@ -18,6 +19,7 @@ config DW_DMAC
|
|||||||
config DW_DMAC_PCI
|
config DW_DMAC_PCI
|
||||||
tristate "Synopsys DesignWare AHB DMA PCI driver"
|
tristate "Synopsys DesignWare AHB DMA PCI driver"
|
||||||
depends on PCI
|
depends on PCI
|
||||||
|
depends on HAS_IOMEM
|
||||||
select DW_DMAC_CORE
|
select DW_DMAC_CORE
|
||||||
help
|
help
|
||||||
Support the Synopsys DesignWare AHB DMA controller on the
|
Support the Synopsys DesignWare AHB DMA controller on the
|
||||||
|
@@ -263,6 +263,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
|
|||||||
idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
|
idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void idxd_wq_reset(struct idxd_wq *wq)
|
||||||
|
{
|
||||||
|
struct idxd_device *idxd = wq->idxd;
|
||||||
|
struct device *dev = &idxd->pdev->dev;
|
||||||
|
u32 operand;
|
||||||
|
|
||||||
|
if (wq->state != IDXD_WQ_ENABLED) {
|
||||||
|
dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
|
||||||
|
idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
|
||||||
|
wq->state = IDXD_WQ_DISABLED;
|
||||||
|
}
|
||||||
|
|
||||||
int idxd_wq_map_portal(struct idxd_wq *wq)
|
int idxd_wq_map_portal(struct idxd_wq *wq)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = wq->idxd;
|
struct idxd_device *idxd = wq->idxd;
|
||||||
@@ -291,8 +307,6 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
|
|||||||
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
|
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = wq->idxd;
|
struct idxd_device *idxd = wq->idxd;
|
||||||
struct device *dev = &idxd->pdev->dev;
|
|
||||||
int i, wq_offset;
|
|
||||||
|
|
||||||
lockdep_assert_held(&idxd->dev_lock);
|
lockdep_assert_held(&idxd->dev_lock);
|
||||||
memset(wq->wqcfg, 0, idxd->wqcfg_size);
|
memset(wq->wqcfg, 0, idxd->wqcfg_size);
|
||||||
@@ -303,14 +317,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
|
|||||||
wq->priority = 0;
|
wq->priority = 0;
|
||||||
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
|
||||||
memset(wq->name, 0, WQ_NAME_SIZE);
|
memset(wq->name, 0, WQ_NAME_SIZE);
|
||||||
|
|
||||||
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
|
|
||||||
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
|
|
||||||
iowrite32(0, idxd->reg_base + wq_offset);
|
|
||||||
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
|
|
||||||
wq->id, i, wq_offset,
|
|
||||||
ioread32(idxd->reg_base + wq_offset));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Device control bits */
|
/* Device control bits */
|
||||||
@@ -560,7 +566,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
|
|||||||
if (!wq->group)
|
if (!wq->group)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
memset(wq->wqcfg, 0, idxd->wqcfg_size);
|
/*
|
||||||
|
* Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
|
||||||
|
* wq reset. This will copy back the sticky values that are present on some devices.
|
||||||
|
*/
|
||||||
|
for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
|
||||||
|
wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
|
||||||
|
wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
|
||||||
|
}
|
||||||
|
|
||||||
/* byte 0-3 */
|
/* byte 0-3 */
|
||||||
wq->wqcfg->wq_size = wq->size;
|
wq->wqcfg->wq_size = wq->size;
|
||||||
|
@@ -295,6 +295,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
|
|||||||
int idxd_wq_enable(struct idxd_wq *wq);
|
int idxd_wq_enable(struct idxd_wq *wq);
|
||||||
int idxd_wq_disable(struct idxd_wq *wq);
|
int idxd_wq_disable(struct idxd_wq *wq);
|
||||||
void idxd_wq_drain(struct idxd_wq *wq);
|
void idxd_wq_drain(struct idxd_wq *wq);
|
||||||
|
void idxd_wq_reset(struct idxd_wq *wq);
|
||||||
int idxd_wq_map_portal(struct idxd_wq *wq);
|
int idxd_wq_map_portal(struct idxd_wq *wq);
|
||||||
void idxd_wq_unmap_portal(struct idxd_wq *wq);
|
void idxd_wq_unmap_portal(struct idxd_wq *wq);
|
||||||
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
|
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
|
||||||
|
@@ -66,7 +66,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
|||||||
for (i = 0; i < 4; i++)
|
for (i = 0; i < 4; i++)
|
||||||
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
|
idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
|
||||||
IDXD_SWERR_OFFSET + i * sizeof(u64));
|
IDXD_SWERR_OFFSET + i * sizeof(u64));
|
||||||
iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
|
|
||||||
|
iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
|
||||||
|
idxd->reg_base + IDXD_SWERR_OFFSET);
|
||||||
|
|
||||||
if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
|
if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
|
||||||
int id = idxd->sw_err.wq_idx;
|
int id = idxd->sw_err.wq_idx;
|
||||||
|
@@ -241,7 +241,6 @@ static void disable_wq(struct idxd_wq *wq)
|
|||||||
{
|
{
|
||||||
struct idxd_device *idxd = wq->idxd;
|
struct idxd_device *idxd = wq->idxd;
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
int rc;
|
|
||||||
|
|
||||||
mutex_lock(&wq->wq_lock);
|
mutex_lock(&wq->wq_lock);
|
||||||
dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
|
dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
|
||||||
@@ -262,17 +261,13 @@ static void disable_wq(struct idxd_wq *wq)
|
|||||||
idxd_wq_unmap_portal(wq);
|
idxd_wq_unmap_portal(wq);
|
||||||
|
|
||||||
idxd_wq_drain(wq);
|
idxd_wq_drain(wq);
|
||||||
rc = idxd_wq_disable(wq);
|
idxd_wq_reset(wq);
|
||||||
|
|
||||||
idxd_wq_free_resources(wq);
|
idxd_wq_free_resources(wq);
|
||||||
wq->client_count = 0;
|
wq->client_count = 0;
|
||||||
mutex_unlock(&wq->wq_lock);
|
mutex_unlock(&wq->wq_lock);
|
||||||
|
|
||||||
if (rc < 0)
|
dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
|
||||||
dev_warn(dev, "Failed to disable %s: %d\n",
|
|
||||||
dev_name(&wq->conf_dev), rc);
|
|
||||||
else
|
|
||||||
dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int idxd_config_bus_remove(struct device *dev)
|
static int idxd_config_bus_remove(struct device *dev)
|
||||||
@@ -923,7 +918,7 @@ static ssize_t wq_size_store(struct device *dev,
|
|||||||
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (wq->state != IDXD_WQ_DISABLED)
|
if (idxd->state == IDXD_DEV_ENABLED)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
|
if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
|
||||||
@@ -1259,8 +1254,14 @@ static ssize_t op_cap_show(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct idxd_device *idxd =
|
struct idxd_device *idxd =
|
||||||
container_of(dev, struct idxd_device, conf_dev);
|
container_of(dev, struct idxd_device, conf_dev);
|
||||||
|
int i, rc = 0;
|
||||||
|
|
||||||
return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
|
for (i = 0; i < 4; i++)
|
||||||
|
rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
|
||||||
|
|
||||||
|
rc--;
|
||||||
|
rc += sysfs_emit_at(buf, rc, "\n");
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(op_cap);
|
static DEVICE_ATTR_RO(op_cap);
|
||||||
|
|
||||||
|
@@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
|
|||||||
|
|
||||||
rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
|
rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
|
||||||
KBUILD_MODNAME, plxdev);
|
KBUILD_MODNAME, plxdev);
|
||||||
if (rc) {
|
if (rc)
|
||||||
kfree(plxdev);
|
goto free_plx;
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_init(&plxdev->ring_lock);
|
spin_lock_init(&plxdev->ring_lock);
|
||||||
tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
|
tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
|
||||||
@@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
|
|||||||
rc = dma_async_device_register(dma);
|
rc = dma_async_device_register(dma);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
pci_err(pdev, "Failed to register dma device: %d\n", rc);
|
pci_err(pdev, "Failed to register dma device: %d\n", rc);
|
||||||
free_irq(pci_irq_vector(pdev, 0), plxdev);
|
goto put_device;
|
||||||
kfree(plxdev);
|
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_set_drvdata(pdev, plxdev);
|
pci_set_drvdata(pdev, plxdev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
put_device:
|
||||||
|
put_device(&pdev->dev);
|
||||||
|
free_irq(pci_irq_vector(pdev, 0), plxdev);
|
||||||
|
free_plx:
|
||||||
|
kfree(plxdev);
|
||||||
|
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int plx_dma_probe(struct pci_dev *pdev,
|
static int plx_dma_probe(struct pci_dev *pdev,
|
||||||
|
@@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
|
|||||||
long gpio;
|
long gpio;
|
||||||
struct gpio_desc *desc;
|
struct gpio_desc *desc;
|
||||||
int status;
|
int status;
|
||||||
|
struct gpio_chip *gc;
|
||||||
|
int offset;
|
||||||
|
|
||||||
status = kstrtol(buf, 0, &gpio);
|
status = kstrtol(buf, 0, &gpio);
|
||||||
if (status < 0)
|
if (status < 0)
|
||||||
@@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
|
|||||||
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
gc = desc->gdev->chip;
|
||||||
|
offset = gpio_chip_hwgpio(desc);
|
||||||
|
if (!gpiochip_line_is_valid(gc, offset)) {
|
||||||
|
pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
/* No extra locking here; FLAG_SYSFS just signifies that the
|
/* No extra locking here; FLAG_SYSFS just signifies that the
|
||||||
* request and export were done by on behalf of userspace, so
|
* request and export were done by on behalf of userspace, so
|
||||||
|
@@ -1240,8 +1240,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
|
|||||||
|
|
||||||
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
||||||
{
|
{
|
||||||
*value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
|
*value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
|
||||||
REG_A5XX_RBBM_PERFCTR_CP_0_HI);
|
REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -1073,8 +1073,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
|
|||||||
/* Force the GPU power on so we can read this register */
|
/* Force the GPU power on so we can read this register */
|
||||||
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
|
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
|
||||||
|
|
||||||
*value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
|
*value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
|
||||||
REG_A6XX_RBBM_PERFCTR_CP_0_HI);
|
REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
|
||||||
|
|
||||||
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
|
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
|
||||||
mutex_unlock(&perfcounter_oob);
|
mutex_unlock(&perfcounter_oob);
|
||||||
|
@@ -531,7 +531,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
|
|||||||
drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
|
drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
|
||||||
if (IS_ERR(drm_dev)) {
|
if (IS_ERR(drm_dev)) {
|
||||||
ret = PTR_ERR(drm_dev);
|
ret = PTR_ERR(drm_dev);
|
||||||
goto fail;
|
goto fail_dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_info->drm_dev = drm_dev;
|
drm_info->drm_dev = drm_dev;
|
||||||
@@ -561,8 +561,10 @@ fail_modeset:
|
|||||||
drm_kms_helper_poll_fini(drm_dev);
|
drm_kms_helper_poll_fini(drm_dev);
|
||||||
drm_mode_config_cleanup(drm_dev);
|
drm_mode_config_cleanup(drm_dev);
|
||||||
drm_dev_put(drm_dev);
|
drm_dev_put(drm_dev);
|
||||||
fail:
|
fail_dev:
|
||||||
kfree(drm_info);
|
kfree(drm_info);
|
||||||
|
front_info->drm_info = NULL;
|
||||||
|
fail:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
|
|||||||
{
|
{
|
||||||
struct wacom_features *features = &wacom_wac->features;
|
struct wacom_features *features = &wacom_wac->features;
|
||||||
|
|
||||||
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
|
|
||||||
|
|
||||||
if (!(features->device_type & WACOM_DEVICETYPE_PEN))
|
if (!(features->device_type & WACOM_DEVICETYPE_PEN))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
@@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
|
||||||
__set_bit(BTN_TOUCH, input_dev->keybit);
|
__set_bit(BTN_TOUCH, input_dev->keybit);
|
||||||
__set_bit(ABS_MISC, input_dev->absbit);
|
__set_bit(ABS_MISC, input_dev->absbit);
|
||||||
|
|
||||||
@@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
|
|||||||
{
|
{
|
||||||
struct wacom_features *features = &wacom_wac->features;
|
struct wacom_features *features = &wacom_wac->features;
|
||||||
|
|
||||||
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
|
|
||||||
|
|
||||||
if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
|
if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
@@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
|
|||||||
/* setup has already been done */
|
/* setup has already been done */
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
|
||||||
__set_bit(BTN_TOUCH, input_dev->keybit);
|
__set_bit(BTN_TOUCH, input_dev->keybit);
|
||||||
|
|
||||||
if (features->touch_max == 1) {
|
if (features->touch_max == 1) {
|
||||||
|
@@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
|
|||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
|
static int nspire_keypad_open(struct input_dev *input)
|
||||||
{
|
{
|
||||||
|
struct nspire_keypad *keypad = input_get_drvdata(input);
|
||||||
unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
|
unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
|
||||||
|
int error;
|
||||||
|
|
||||||
|
error = clk_prepare_enable(keypad->clk);
|
||||||
|
if (error)
|
||||||
|
return error;
|
||||||
|
|
||||||
cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
|
cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
|
||||||
if (cycles_per_us == 0)
|
if (cycles_per_us == 0)
|
||||||
@@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
|
|||||||
keypad->int_mask = 1 << 1;
|
keypad->int_mask = 1 << 1;
|
||||||
writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
|
writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
|
||||||
|
|
||||||
/* Disable GPIO interrupts to prevent hanging on touchpad */
|
|
||||||
/* Possibly used to detect touchpad events */
|
|
||||||
writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
|
|
||||||
/* Acknowledge existing interrupts */
|
|
||||||
writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int nspire_keypad_open(struct input_dev *input)
|
|
||||||
{
|
|
||||||
struct nspire_keypad *keypad = input_get_drvdata(input);
|
|
||||||
int error;
|
|
||||||
|
|
||||||
error = clk_prepare_enable(keypad->clk);
|
|
||||||
if (error)
|
|
||||||
return error;
|
|
||||||
|
|
||||||
error = nspire_keypad_chip_init(keypad);
|
|
||||||
if (error) {
|
|
||||||
clk_disable_unprepare(keypad->clk);
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
|
|||||||
{
|
{
|
||||||
struct nspire_keypad *keypad = input_get_drvdata(input);
|
struct nspire_keypad *keypad = input_get_drvdata(input);
|
||||||
|
|
||||||
|
/* Disable interrupts */
|
||||||
|
writel(0, keypad->reg_base + KEYPAD_INTMSK);
|
||||||
|
/* Acknowledge existing interrupts */
|
||||||
|
writel(~0, keypad->reg_base + KEYPAD_INT);
|
||||||
|
|
||||||
clk_disable_unprepare(keypad->clk);
|
clk_disable_unprepare(keypad->clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
error = clk_prepare_enable(keypad->clk);
|
||||||
|
if (error) {
|
||||||
|
dev_err(&pdev->dev, "failed to enable clock\n");
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Disable interrupts */
|
||||||
|
writel(0, keypad->reg_base + KEYPAD_INTMSK);
|
||||||
|
/* Acknowledge existing interrupts */
|
||||||
|
writel(~0, keypad->reg_base + KEYPAD_INT);
|
||||||
|
|
||||||
|
/* Disable GPIO interrupts to prevent hanging on touchpad */
|
||||||
|
/* Possibly used to detect touchpad events */
|
||||||
|
writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
|
||||||
|
/* Acknowledge existing GPIO interrupts */
|
||||||
|
writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
|
||||||
|
|
||||||
|
clk_disable_unprepare(keypad->clk);
|
||||||
|
|
||||||
input_set_drvdata(input, keypad);
|
input_set_drvdata(input, keypad);
|
||||||
|
|
||||||
input->id.bustype = BUS_HOST;
|
input->id.bustype = BUS_HOST;
|
||||||
|
@@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
|
|||||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||||
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
|
DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||||
DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
|
DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
|
||||||
|
@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
|
|||||||
u8 major = event[4];
|
u8 major = event[4];
|
||||||
u8 minor = event[5];
|
u8 minor = event[5];
|
||||||
u8 z = event[6] & S6SY761_MASK_Z;
|
u8 z = event[6] & S6SY761_MASK_Z;
|
||||||
u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
|
u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
|
||||||
u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
|
u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
|
||||||
|
|
||||||
input_mt_slot(sdata->input, tid);
|
input_mt_slot(sdata->input, tid);
|
||||||
|
|
||||||
|
@@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
|
|||||||
return 0;
|
return 0;
|
||||||
case NAND_OP_WAITRDY_INSTR:
|
case NAND_OP_WAITRDY_INSTR:
|
||||||
return readl_poll_timeout(nfc->regs + NFI_STA, status,
|
return readl_poll_timeout(nfc->regs + NFI_STA, status,
|
||||||
status & STA_BUSY, 20,
|
!(status & STA_BUSY), 20,
|
||||||
instr->ctx.waitrdy.timeout_ms);
|
instr->ctx.waitrdy.timeout_ms * 1000);
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@@ -2994,10 +2994,17 @@ out_resources:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* prod_id for switch families which do not have a PHY model number */
|
||||||
|
static const u16 family_prod_id_table[] = {
|
||||||
|
[MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
|
||||||
|
[MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
|
||||||
|
};
|
||||||
|
|
||||||
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
|
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
|
||||||
{
|
{
|
||||||
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
|
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
|
||||||
struct mv88e6xxx_chip *chip = mdio_bus->chip;
|
struct mv88e6xxx_chip *chip = mdio_bus->chip;
|
||||||
|
u16 prod_id;
|
||||||
u16 val;
|
u16 val;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@@ -3008,23 +3015,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
|
|||||||
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
|
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
|
||||||
mv88e6xxx_reg_unlock(chip);
|
mv88e6xxx_reg_unlock(chip);
|
||||||
|
|
||||||
if (reg == MII_PHYSID2) {
|
/* Some internal PHYs don't have a model number. */
|
||||||
/* Some internal PHYs don't have a model number. */
|
if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
|
||||||
if (chip->info->family != MV88E6XXX_FAMILY_6165)
|
chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
|
||||||
/* Then there is the 6165 family. It gets is
|
prod_id = family_prod_id_table[chip->info->family];
|
||||||
* PHYs correct. But it can also have two
|
if (prod_id)
|
||||||
* SERDES interfaces in the PHY address
|
val |= prod_id >> 4;
|
||||||
* space. And these don't have a model
|
|
||||||
* number. But they are not PHYs, so we don't
|
|
||||||
* want to give them something a PHY driver
|
|
||||||
* will recognise.
|
|
||||||
*
|
|
||||||
* Use the mv88e6390 family model number
|
|
||||||
* instead, for anything which really could be
|
|
||||||
* a PHY,
|
|
||||||
*/
|
|
||||||
if (!(val & 0x3f0))
|
|
||||||
val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err ? err : val;
|
return err ? err : val;
|
||||||
|
@@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
}
|
}
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
ioaddr = pci_resource_start(pdev, 0);
|
if (!pci_resource_len(pdev, 0)) {
|
||||||
if (!ioaddr) {
|
|
||||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||||
pr_err("card has no PCI IO resources, aborting\n");
|
pr_err("card has no PCI IO resources, aborting\n");
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
@@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
|
pr_err("architecture does not support 32bit PCI busmaster DMA\n");
|
||||||
goto err_disable_dev;
|
goto err_disable_dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ioaddr = pci_resource_start(pdev, 0);
|
||||||
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
|
if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
|
||||||
if (pcnet32_debug & NETIF_MSG_PROBE)
|
if (pcnet32_debug & NETIF_MSG_PROBE)
|
||||||
pr_err("io address range already allocated\n");
|
pr_err("io address range already allocated\n");
|
||||||
|
@@ -3777,6 +3777,7 @@ static int macb_init(struct platform_device *pdev)
|
|||||||
reg = gem_readl(bp, DCFG8);
|
reg = gem_readl(bp, DCFG8);
|
||||||
bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
|
bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
|
||||||
GEM_BFEXT(T2SCR, reg));
|
GEM_BFEXT(T2SCR, reg));
|
||||||
|
INIT_LIST_HEAD(&bp->rx_fs_list.list);
|
||||||
if (bp->max_tuples > 0) {
|
if (bp->max_tuples > 0) {
|
||||||
/* also needs one ethtype match to check IPv4 */
|
/* also needs one ethtype match to check IPv4 */
|
||||||
if (GEM_BFEXT(SCR2ETH, reg) > 0) {
|
if (GEM_BFEXT(SCR2ETH, reg) > 0) {
|
||||||
@@ -3787,7 +3788,6 @@ static int macb_init(struct platform_device *pdev)
|
|||||||
/* Filtering is supported in hw but don't enable it in kernel now */
|
/* Filtering is supported in hw but don't enable it in kernel now */
|
||||||
dev->hw_features |= NETIF_F_NTUPLE;
|
dev->hw_features |= NETIF_F_NTUPLE;
|
||||||
/* init Rx flow definitions */
|
/* init Rx flow definitions */
|
||||||
INIT_LIST_HEAD(&bp->rx_fs_list.list);
|
|
||||||
bp->rx_fs_list.count = 0;
|
bp->rx_fs_list.count = 0;
|
||||||
spin_lock_init(&bp->rx_fs_lock);
|
spin_lock_init(&bp->rx_fs_lock);
|
||||||
} else
|
} else
|
||||||
|
@@ -354,18 +354,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
|
|||||||
return cxgb4_ofld_send(tx_info->netdev, skb);
|
return cxgb4_ofld_send(tx_info->netdev, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
|
|
||||||
* @tx_info - driver specific tls info.
|
|
||||||
* return: NET_TX_OK/NET_XMIT_DROP.
|
|
||||||
*/
|
|
||||||
static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
|
|
||||||
{
|
|
||||||
return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
|
|
||||||
TCB_T_STATE_V(TCB_T_STATE_M),
|
|
||||||
CHCR_TCB_STATE_CLOSED, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* chcr_ktls_dev_del: call back for tls_dev_del.
|
* chcr_ktls_dev_del: call back for tls_dev_del.
|
||||||
* Remove the tid and l2t entry and close the connection.
|
* Remove the tid and l2t entry and close the connection.
|
||||||
@@ -400,8 +388,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
|
|||||||
|
|
||||||
/* clear tid */
|
/* clear tid */
|
||||||
if (tx_info->tid != -1) {
|
if (tx_info->tid != -1) {
|
||||||
/* clear tcb state and then release tid */
|
|
||||||
chcr_ktls_mark_tcb_close(tx_info);
|
|
||||||
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
|
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
|
||||||
tx_info->tid, tx_info->ip_family);
|
tx_info->tid, tx_info->ip_family);
|
||||||
}
|
}
|
||||||
@@ -579,7 +565,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_tid:
|
free_tid:
|
||||||
chcr_ktls_mark_tcb_close(tx_info);
|
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
/* clear clip entry */
|
/* clear clip entry */
|
||||||
if (tx_info->ip_family == AF_INET6)
|
if (tx_info->ip_family == AF_INET6)
|
||||||
@@ -677,10 +662,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
|
|||||||
if (tx_info->pending_close) {
|
if (tx_info->pending_close) {
|
||||||
spin_unlock(&tx_info->lock);
|
spin_unlock(&tx_info->lock);
|
||||||
if (!status) {
|
if (!status) {
|
||||||
/* it's a late success, tcb status is establised,
|
|
||||||
* mark it close.
|
|
||||||
*/
|
|
||||||
chcr_ktls_mark_tcb_close(tx_info);
|
|
||||||
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
|
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
|
||||||
tid, tx_info->ip_family);
|
tid, tx_info->ip_family);
|
||||||
}
|
}
|
||||||
@@ -1668,54 +1649,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
|
|||||||
refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
|
refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
|
|
||||||
* sending the same segment again. It will discard the segment which is before
|
|
||||||
* the current tx max.
|
|
||||||
* @tx_info - driver specific tls info.
|
|
||||||
* @q - TX queue.
|
|
||||||
* return: NET_TX_OK/NET_XMIT_DROP.
|
|
||||||
*/
|
|
||||||
static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
|
|
||||||
struct sge_eth_txq *q)
|
|
||||||
{
|
|
||||||
struct fw_ulptx_wr *wr;
|
|
||||||
unsigned int ndesc;
|
|
||||||
int credits;
|
|
||||||
void *pos;
|
|
||||||
u32 len;
|
|
||||||
|
|
||||||
len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
|
|
||||||
ndesc = DIV_ROUND_UP(len, 64);
|
|
||||||
|
|
||||||
credits = chcr_txq_avail(&q->q) - ndesc;
|
|
||||||
if (unlikely(credits < 0)) {
|
|
||||||
chcr_eth_txq_stop(q);
|
|
||||||
return NETDEV_TX_BUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
pos = &q->q.desc[q->q.pidx];
|
|
||||||
|
|
||||||
wr = pos;
|
|
||||||
/* ULPTX wr */
|
|
||||||
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
|
|
||||||
wr->cookie = 0;
|
|
||||||
/* fill len in wr field */
|
|
||||||
wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
|
|
||||||
|
|
||||||
pos += sizeof(*wr);
|
|
||||||
|
|
||||||
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
|
|
||||||
TCB_SND_UNA_RAW_W,
|
|
||||||
TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
|
|
||||||
TCB_SND_UNA_RAW_V(0), 0);
|
|
||||||
|
|
||||||
chcr_txq_advance(&q->q, ndesc);
|
|
||||||
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* chcr_end_part_handler: This handler will handle the record which
|
* chcr_end_part_handler: This handler will handle the record which
|
||||||
* is complete or if record's end part is received. T6 adapter has a issue that
|
* is complete or if record's end part is received. T6 adapter has a issue that
|
||||||
@@ -1740,7 +1673,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
|
|||||||
struct sge_eth_txq *q, u32 skb_offset,
|
struct sge_eth_txq *q, u32 skb_offset,
|
||||||
u32 tls_end_offset, bool last_wr)
|
u32 tls_end_offset, bool last_wr)
|
||||||
{
|
{
|
||||||
|
bool free_skb_if_tx_fails = false;
|
||||||
struct sk_buff *nskb = NULL;
|
struct sk_buff *nskb = NULL;
|
||||||
|
|
||||||
/* check if it is a complete record */
|
/* check if it is a complete record */
|
||||||
if (tls_end_offset == record->len) {
|
if (tls_end_offset == record->len) {
|
||||||
nskb = skb;
|
nskb = skb;
|
||||||
@@ -1763,6 +1698,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
|
|||||||
|
|
||||||
if (last_wr)
|
if (last_wr)
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
|
else
|
||||||
|
free_skb_if_tx_fails = true;
|
||||||
|
|
||||||
last_wr = true;
|
last_wr = true;
|
||||||
|
|
||||||
@@ -1774,6 +1711,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
|
|||||||
record->num_frags,
|
record->num_frags,
|
||||||
(last_wr && tcp_push_no_fin),
|
(last_wr && tcp_push_no_fin),
|
||||||
mss)) {
|
mss)) {
|
||||||
|
if (free_skb_if_tx_fails)
|
||||||
|
dev_kfree_skb_any(skb);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
tx_info->prev_seq = record->end_seq;
|
tx_info->prev_seq = record->end_seq;
|
||||||
@@ -1910,11 +1849,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
|
|||||||
/* reset tcp_seq as per the prior_data_required len */
|
/* reset tcp_seq as per the prior_data_required len */
|
||||||
tcp_seq -= prior_data_len;
|
tcp_seq -= prior_data_len;
|
||||||
}
|
}
|
||||||
/* reset snd una, so the middle record won't send the already
|
|
||||||
* sent part.
|
|
||||||
*/
|
|
||||||
if (chcr_ktls_update_snd_una(tx_info, q))
|
|
||||||
goto out;
|
|
||||||
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
|
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
|
||||||
} else {
|
} else {
|
||||||
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
|
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
|
||||||
@@ -2015,12 +1949,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
* we will send the complete record again.
|
* we will send the complete record again.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
spin_lock_irqsave(&tx_ctx->base.lock, flags);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
int i;
|
|
||||||
|
|
||||||
cxgb4_reclaim_completed_tx(adap, &q->q, true);
|
cxgb4_reclaim_completed_tx(adap, &q->q, true);
|
||||||
/* lock taken */
|
|
||||||
spin_lock_irqsave(&tx_ctx->base.lock, flags);
|
|
||||||
/* fetch the tls record */
|
/* fetch the tls record */
|
||||||
record = tls_get_record(&tx_ctx->base, tcp_seq,
|
record = tls_get_record(&tx_ctx->base, tcp_seq,
|
||||||
&tx_info->record_no);
|
&tx_info->record_no);
|
||||||
@@ -2079,11 +2012,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
tls_end_offset, skb_offset,
|
tls_end_offset, skb_offset,
|
||||||
0);
|
0);
|
||||||
|
|
||||||
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/* free the refcount taken earlier */
|
/* free the refcount taken earlier */
|
||||||
if (tls_end_offset < data_len)
|
if (tls_end_offset < data_len)
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
|
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2093,16 +2026,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* increase page reference count of the record, so that there
|
|
||||||
* won't be any chance of page free in middle if in case stack
|
|
||||||
* receives ACK and try to delete the record.
|
|
||||||
*/
|
|
||||||
for (i = 0; i < record->num_frags; i++)
|
|
||||||
__skb_frag_ref(&record->frags[i]);
|
|
||||||
/* lock cleared */
|
|
||||||
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
|
|
||||||
|
|
||||||
|
|
||||||
/* if a tls record is finishing in this SKB */
|
/* if a tls record is finishing in this SKB */
|
||||||
if (tls_end_offset <= data_len) {
|
if (tls_end_offset <= data_len) {
|
||||||
ret = chcr_end_part_handler(tx_info, skb, record,
|
ret = chcr_end_part_handler(tx_info, skb, record,
|
||||||
@@ -2127,13 +2050,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
data_len = 0;
|
data_len = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* clear the frag ref count which increased locally before */
|
|
||||||
for (i = 0; i < record->num_frags; i++) {
|
|
||||||
/* clear the frag ref count */
|
|
||||||
__skb_frag_unref(&record->frags[i]);
|
|
||||||
}
|
|
||||||
/* if any failure, come out from the loop. */
|
/* if any failure, come out from the loop. */
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
|
||||||
if (th->fin)
|
if (th->fin)
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
|
|
||||||
@@ -2148,6 +2067,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||||||
|
|
||||||
} while (data_len > 0);
|
} while (data_len > 0);
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
|
||||||
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
|
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
|
||||||
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
|
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
|
||||||
|
|
||||||
|
@@ -1474,8 +1474,10 @@ dm9000_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
/* Init network device */
|
/* Init network device */
|
||||||
ndev = alloc_etherdev(sizeof(struct board_info));
|
ndev = alloc_etherdev(sizeof(struct board_info));
|
||||||
if (!ndev)
|
if (!ndev) {
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
goto out_regulator_disable;
|
||||||
|
}
|
||||||
|
|
||||||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||||
|
|
||||||
|
@@ -1159,19 +1159,13 @@ static int __ibmvnic_open(struct net_device *netdev)
|
|||||||
|
|
||||||
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
|
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
ibmvnic_napi_disable(adapter);
|
||||||
napi_disable(&adapter->napi[i]);
|
|
||||||
release_resources(adapter);
|
release_resources(adapter);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
netif_tx_start_all_queues(netdev);
|
netif_tx_start_all_queues(netdev);
|
||||||
|
|
||||||
if (prev_state == VNIC_CLOSED) {
|
|
||||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
|
||||||
napi_schedule(&adapter->napi[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
adapter->state = VNIC_OPEN;
|
adapter->state = VNIC_OPEN;
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
@@ -1942,7 +1936,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||||||
u64 old_num_rx_queues, old_num_tx_queues;
|
u64 old_num_rx_queues, old_num_tx_queues;
|
||||||
u64 old_num_rx_slots, old_num_tx_slots;
|
u64 old_num_rx_slots, old_num_tx_slots;
|
||||||
struct net_device *netdev = adapter->netdev;
|
struct net_device *netdev = adapter->netdev;
|
||||||
int i, rc;
|
int rc;
|
||||||
|
|
||||||
netdev_dbg(adapter->netdev,
|
netdev_dbg(adapter->netdev,
|
||||||
"[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
|
"[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
|
||||||
@@ -2088,10 +2082,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||||||
/* refresh device's multicast list */
|
/* refresh device's multicast list */
|
||||||
ibmvnic_set_multi(netdev);
|
ibmvnic_set_multi(netdev);
|
||||||
|
|
||||||
/* kick napi */
|
|
||||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
|
||||||
napi_schedule(&adapter->napi[i]);
|
|
||||||
|
|
||||||
if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
|
if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
|
||||||
adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||||
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
|
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
|
||||||
|
@@ -11863,6 +11863,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
|
|||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int size;
|
int size;
|
||||||
|
u16 pow;
|
||||||
|
|
||||||
/* Set default capability flags */
|
/* Set default capability flags */
|
||||||
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
|
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
|
||||||
@@ -11881,6 +11882,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
|
|||||||
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
|
pf->rss_table_size = pf->hw.func_caps.rss_table_size;
|
||||||
pf->rss_size_max = min_t(int, pf->rss_size_max,
|
pf->rss_size_max = min_t(int, pf->rss_size_max,
|
||||||
pf->hw.func_caps.num_tx_qp);
|
pf->hw.func_caps.num_tx_qp);
|
||||||
|
|
||||||
|
/* find the next higher power-of-2 of num cpus */
|
||||||
|
pow = roundup_pow_of_two(num_online_cpus());
|
||||||
|
pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
|
||||||
|
|
||||||
if (pf->hw.func_caps.rss) {
|
if (pf->hw.func_caps.rss) {
|
||||||
pf->flags |= I40E_FLAG_RSS_ENABLED;
|
pf->flags |= I40E_FLAG_RSS_ENABLED;
|
||||||
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
|
pf->alloc_rss_size = min_t(int, pf->rss_size_max,
|
||||||
|
@@ -747,8 +747,8 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
|
|||||||
struct ice_port_info *pi)
|
struct ice_port_info *pi)
|
||||||
{
|
{
|
||||||
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
|
u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
|
||||||
u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
|
u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
|
||||||
u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
|
u8 i, err, sync, oper, app_index, ice_app_sel_type;
|
||||||
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
|
u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
|
||||||
u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
|
u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
|
||||||
struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
|
struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
|
||||||
|
@@ -6896,6 +6896,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d)
|
|||||||
|
|
||||||
adapter->hw.hw_addr = adapter->io_addr;
|
adapter->hw.hw_addr = adapter->io_addr;
|
||||||
|
|
||||||
|
err = pci_enable_device_mem(pdev);
|
||||||
|
if (err) {
|
||||||
|
e_dev_err("Cannot enable PCI device from suspend\n");
|
||||||
|
return err;
|
||||||
|
}
|
||||||
smp_mb__before_atomic();
|
smp_mb__before_atomic();
|
||||||
clear_bit(__IXGBE_DISABLED, &adapter->state);
|
clear_bit(__IXGBE_DISABLED, &adapter->state);
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
@@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode {
|
|||||||
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
|
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \
|
|
||||||
do { \
|
|
||||||
unsigned long policy_long; \
|
|
||||||
u16 *__policy = &(policy); \
|
|
||||||
bool _write = (write); \
|
|
||||||
\
|
|
||||||
policy_long = *__policy; \
|
|
||||||
if (_write && *__policy) \
|
|
||||||
*__policy = find_first_bit(&policy_long, \
|
|
||||||
sizeof(policy_long) * BITS_PER_BYTE);\
|
|
||||||
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \
|
|
||||||
if (!_write && *__policy) \
|
|
||||||
*__policy = 1 << *__policy; \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/* get/set FEC admin field for a given speed */
|
/* get/set FEC admin field for a given speed */
|
||||||
static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
|
static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
|
||||||
enum mlx5e_fec_supported_link_mode link_mode)
|
enum mlx5e_fec_supported_link_mode link_mode)
|
||||||
@@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
|
|||||||
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
|
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
|
||||||
break;
|
break;
|
||||||
case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
|
case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
|
||||||
MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
|
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x);
|
||||||
break;
|
break;
|
||||||
case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
|
case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
|
||||||
MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
|
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x);
|
||||||
break;
|
break;
|
||||||
case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
|
case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
|
||||||
MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
|
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x);
|
||||||
break;
|
break;
|
||||||
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
|
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
|
||||||
MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
|
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@@ -2196,6 +2196,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
flow_rule_match_meta(rule, &match);
|
flow_rule_match_meta(rule, &match);
|
||||||
|
if (!match.mask->ingress_ifindex)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
|
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
|
||||||
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
|
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
@@ -2378,13 +2378,14 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
|
|||||||
static void rtl_jumbo_config(struct rtl8169_private *tp)
|
static void rtl_jumbo_config(struct rtl8169_private *tp)
|
||||||
{
|
{
|
||||||
bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
|
bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
|
||||||
|
int readrq = 4096;
|
||||||
|
|
||||||
rtl_unlock_config_regs(tp);
|
rtl_unlock_config_regs(tp);
|
||||||
switch (tp->mac_version) {
|
switch (tp->mac_version) {
|
||||||
case RTL_GIGA_MAC_VER_12:
|
case RTL_GIGA_MAC_VER_12:
|
||||||
case RTL_GIGA_MAC_VER_17:
|
case RTL_GIGA_MAC_VER_17:
|
||||||
if (jumbo) {
|
if (jumbo) {
|
||||||
pcie_set_readrq(tp->pci_dev, 512);
|
readrq = 512;
|
||||||
r8168b_1_hw_jumbo_enable(tp);
|
r8168b_1_hw_jumbo_enable(tp);
|
||||||
} else {
|
} else {
|
||||||
r8168b_1_hw_jumbo_disable(tp);
|
r8168b_1_hw_jumbo_disable(tp);
|
||||||
@@ -2392,7 +2393,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
|
|||||||
break;
|
break;
|
||||||
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
|
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
|
||||||
if (jumbo) {
|
if (jumbo) {
|
||||||
pcie_set_readrq(tp->pci_dev, 512);
|
readrq = 512;
|
||||||
r8168c_hw_jumbo_enable(tp);
|
r8168c_hw_jumbo_enable(tp);
|
||||||
} else {
|
} else {
|
||||||
r8168c_hw_jumbo_disable(tp);
|
r8168c_hw_jumbo_disable(tp);
|
||||||
@@ -2417,8 +2418,15 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
|
|||||||
}
|
}
|
||||||
rtl_lock_config_regs(tp);
|
rtl_lock_config_regs(tp);
|
||||||
|
|
||||||
if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
|
if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
|
||||||
pcie_set_readrq(tp->pci_dev, 4096);
|
pcie_set_readrq(tp->pci_dev, readrq);
|
||||||
|
|
||||||
|
/* Chip doesn't support pause in jumbo mode */
|
||||||
|
linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
|
||||||
|
tp->phydev->advertising, !jumbo);
|
||||||
|
linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
|
||||||
|
tp->phydev->advertising, !jumbo);
|
||||||
|
phy_start_aneg(tp->phydev);
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLARE_RTL_COND(rtl_chipcmd_cond)
|
DECLARE_RTL_COND(rtl_chipcmd_cond)
|
||||||
@@ -4710,8 +4718,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
|
|||||||
if (!tp->supports_gmii)
|
if (!tp->supports_gmii)
|
||||||
phy_set_max_speed(phydev, SPEED_100);
|
phy_set_max_speed(phydev, SPEED_100);
|
||||||
|
|
||||||
phy_support_asym_pause(phydev);
|
|
||||||
|
|
||||||
phy_attached_info(phydev);
|
phy_attached_info(phydev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -2913,9 +2913,35 @@ static struct phy_driver marvell_drivers[] = {
|
|||||||
.get_stats = marvell_get_stats,
|
.get_stats = marvell_get_stats,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.phy_id = MARVELL_PHY_ID_88E6390,
|
.phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
|
||||||
.phy_id_mask = MARVELL_PHY_ID_MASK,
|
.phy_id_mask = MARVELL_PHY_ID_MASK,
|
||||||
.name = "Marvell 88E6390",
|
.name = "Marvell 88E6341 Family",
|
||||||
|
/* PHY_GBIT_FEATURES */
|
||||||
|
.flags = PHY_POLL_CABLE_TEST,
|
||||||
|
.probe = m88e1510_probe,
|
||||||
|
.config_init = marvell_config_init,
|
||||||
|
.config_aneg = m88e6390_config_aneg,
|
||||||
|
.read_status = marvell_read_status,
|
||||||
|
.ack_interrupt = marvell_ack_interrupt,
|
||||||
|
.config_intr = marvell_config_intr,
|
||||||
|
.did_interrupt = m88e1121_did_interrupt,
|
||||||
|
.resume = genphy_resume,
|
||||||
|
.suspend = genphy_suspend,
|
||||||
|
.read_page = marvell_read_page,
|
||||||
|
.write_page = marvell_write_page,
|
||||||
|
.get_sset_count = marvell_get_sset_count,
|
||||||
|
.get_strings = marvell_get_strings,
|
||||||
|
.get_stats = marvell_get_stats,
|
||||||
|
.get_tunable = m88e1540_get_tunable,
|
||||||
|
.set_tunable = m88e1540_set_tunable,
|
||||||
|
.cable_test_start = marvell_vct7_cable_test_start,
|
||||||
|
.cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
|
||||||
|
.cable_test_get_status = marvell_vct7_cable_test_get_status,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
|
||||||
|
.phy_id_mask = MARVELL_PHY_ID_MASK,
|
||||||
|
.name = "Marvell 88E6390 Family",
|
||||||
/* PHY_GBIT_FEATURES */
|
/* PHY_GBIT_FEATURES */
|
||||||
.flags = PHY_POLL_CABLE_TEST,
|
.flags = PHY_POLL_CABLE_TEST,
|
||||||
.probe = m88e6390_probe,
|
.probe = m88e6390_probe,
|
||||||
@@ -3001,7 +3027,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
|
|||||||
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
|
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
|
||||||
{ MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
|
{ MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
|
||||||
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
|
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
|
||||||
{ MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
|
{ MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
|
||||||
|
{ MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
|
||||||
{ MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
|
{ MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
|
||||||
{ MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
|
{ MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
|
||||||
{ }
|
{ }
|
||||||
|
@@ -684,6 +684,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
|||||||
IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
|
IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
|
||||||
IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
|
IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
|
||||||
IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
|
IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
|
||||||
|
IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
|
||||||
|
|
||||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||||
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
|
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
|
||||||
|
@@ -1181,6 +1181,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|||||||
u32 cmd_pos;
|
u32 cmd_pos;
|
||||||
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
|
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
|
||||||
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
|
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (WARN(!trans->wide_cmd_header &&
|
if (WARN(!trans->wide_cmd_header &&
|
||||||
group_id > IWL_ALWAYS_LONG_GROUP,
|
group_id > IWL_ALWAYS_LONG_GROUP,
|
||||||
@@ -1264,10 +1265,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|||||||
goto free_dup_buf;
|
goto free_dup_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_bh(&txq->lock);
|
spin_lock_irqsave(&txq->lock, flags);
|
||||||
|
|
||||||
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
||||||
spin_unlock_bh(&txq->lock);
|
spin_unlock_irqrestore(&txq->lock, flags);
|
||||||
|
|
||||||
IWL_ERR(trans, "No space in command queue\n");
|
IWL_ERR(trans, "No space in command queue\n");
|
||||||
iwl_op_mode_cmd_queue_full(trans->op_mode);
|
iwl_op_mode_cmd_queue_full(trans->op_mode);
|
||||||
@@ -1427,7 +1428,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
|||||||
unlock_reg:
|
unlock_reg:
|
||||||
spin_unlock(&trans_pcie->reg_lock);
|
spin_unlock(&trans_pcie->reg_lock);
|
||||||
out:
|
out:
|
||||||
spin_unlock_bh(&txq->lock);
|
spin_unlock_irqrestore(&txq->lock, flags);
|
||||||
free_dup_buf:
|
free_dup_buf:
|
||||||
if (idx < 0)
|
if (idx < 0)
|
||||||
kfree(dup_buf);
|
kfree(dup_buf);
|
||||||
|
@@ -1239,6 +1239,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
|
|||||||
|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
|
|| !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
|
/* Test if an explicit flush function is defined */
|
||||||
|
if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
/* Test if any flush hints for the region are available */
|
||||||
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
||||||
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
||||||
struct nvdimm *nvdimm = nd_mapping->nvdimm;
|
struct nvdimm *nvdimm = nd_mapping->nvdimm;
|
||||||
@@ -1249,8 +1254,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The platform defines dimm devices without hints, assume
|
* The platform defines dimm devices without hints nor explicit flush,
|
||||||
* platform persistence mechanism like ADR
|
* assume platform persistence mechanism like ADR
|
||||||
*/
|
*/
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -1996,6 +1996,11 @@ int rproc_add(struct rproc *rproc)
|
|||||||
struct device *dev = &rproc->dev;
|
struct device *dev = &rproc->dev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* add char device for this remoteproc */
|
||||||
|
ret = rproc_char_device_add(rproc);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ret = device_add(dev);
|
ret = device_add(dev);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
@@ -2009,11 +2014,6 @@ int rproc_add(struct rproc *rproc)
|
|||||||
/* create debugfs entries */
|
/* create debugfs entries */
|
||||||
rproc_create_debug_dir(rproc);
|
rproc_create_debug_dir(rproc);
|
||||||
|
|
||||||
/* add char device for this remoteproc */
|
|
||||||
ret = rproc_char_device_add(rproc);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remind ourselves the remote processor has been attached to rather
|
* Remind ourselves the remote processor has been attached to rather
|
||||||
* than booted by the remoteproc core. This is important because the
|
* than booted by the remoteproc core. This is important because the
|
||||||
|
@@ -201,18 +201,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
|
|||||||
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
|
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
|
||||||
task->total_xfer_len = qc->nbytes;
|
task->total_xfer_len = qc->nbytes;
|
||||||
task->num_scatter = qc->n_elem;
|
task->num_scatter = qc->n_elem;
|
||||||
|
task->data_dir = qc->dma_dir;
|
||||||
|
} else if (qc->tf.protocol == ATA_PROT_NODATA) {
|
||||||
|
task->data_dir = DMA_NONE;
|
||||||
} else {
|
} else {
|
||||||
for_each_sg(qc->sg, sg, qc->n_elem, si)
|
for_each_sg(qc->sg, sg, qc->n_elem, si)
|
||||||
xfer += sg_dma_len(sg);
|
xfer += sg_dma_len(sg);
|
||||||
|
|
||||||
task->total_xfer_len = xfer;
|
task->total_xfer_len = xfer;
|
||||||
task->num_scatter = si;
|
task->num_scatter = si;
|
||||||
}
|
|
||||||
|
|
||||||
if (qc->tf.protocol == ATA_PROT_NODATA)
|
|
||||||
task->data_dir = DMA_NONE;
|
|
||||||
else
|
|
||||||
task->data_dir = qc->dma_dir;
|
task->data_dir = qc->dma_dir;
|
||||||
|
}
|
||||||
task->scatter = qc->sg;
|
task->scatter = qc->sg;
|
||||||
task->ata_task.retry_count = 1;
|
task->ata_task.retry_count = 1;
|
||||||
task->task_state_flags = SAS_TASK_STATE_PENDING;
|
task->task_state_flags = SAS_TASK_STATE_PENDING;
|
||||||
|
@@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
|
|||||||
res = mutex_lock_interruptible(&rport->mutex);
|
res = mutex_lock_interruptible(&rport->mutex);
|
||||||
if (res)
|
if (res)
|
||||||
goto out;
|
goto out;
|
||||||
if (rport->state != SRP_RPORT_FAIL_FAST)
|
if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
|
||||||
/*
|
/*
|
||||||
* sdev state must be SDEV_TRANSPORT_OFFLINE, transition
|
* sdev state must be SDEV_TRANSPORT_OFFLINE, transition
|
||||||
* to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
|
* to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
|
||||||
|
@@ -128,7 +128,7 @@ static int debug_kinfo_probe(struct platform_device *pdev)
|
|||||||
all_info_addr = rmem->priv;
|
all_info_addr = rmem->priv;
|
||||||
all_info_size = rmem->size;
|
all_info_size = rmem->size;
|
||||||
|
|
||||||
memset(all_info_addr, 0, all_info_size);
|
memset(all_info_addr, 0, sizeof(struct kernel_all_info));
|
||||||
all_info = (struct kernel_all_info *)all_info_addr;
|
all_info = (struct kernel_all_info *)all_info_addr;
|
||||||
info = &(all_info->info);
|
info = &(all_info->info);
|
||||||
info->enabled_all = IS_ENABLED(CONFIG_KALLSYMS_ALL);
|
info->enabled_all = IS_ENABLED(CONFIG_KALLSYMS_ALL);
|
||||||
|
@@ -1286,6 +1286,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
|
|||||||
"snps,usb3_lpm_capable");
|
"snps,usb3_lpm_capable");
|
||||||
dwc->usb2_lpm_disable = device_property_read_bool(dev,
|
dwc->usb2_lpm_disable = device_property_read_bool(dev,
|
||||||
"snps,usb2-lpm-disable");
|
"snps,usb2-lpm-disable");
|
||||||
|
dwc->usb2_gadget_lpm_disable = device_property_read_bool(dev,
|
||||||
|
"snps,usb2-gadget-lpm-disable");
|
||||||
device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
|
device_property_read_u8(dev, "snps,rx-thr-num-pkt-prd",
|
||||||
&rx_thr_num_pkt_prd);
|
&rx_thr_num_pkt_prd);
|
||||||
device_property_read_u8(dev, "snps,rx-max-burst-prd",
|
device_property_read_u8(dev, "snps,rx-max-burst-prd",
|
||||||
|
@@ -1040,7 +1040,8 @@ struct dwc3_scratchpad_array {
|
|||||||
* @dis_start_transfer_quirk: set if start_transfer failure SW workaround is
|
* @dis_start_transfer_quirk: set if start_transfer failure SW workaround is
|
||||||
* not needed for DWC_usb31 version 1.70a-ea06 and below
|
* not needed for DWC_usb31 version 1.70a-ea06 and below
|
||||||
* @usb3_lpm_capable: set if hadrware supports Link Power Management
|
* @usb3_lpm_capable: set if hadrware supports Link Power Management
|
||||||
* @usb2_lpm_disable: set to disable usb2 lpm
|
* @usb2_lpm_disable: set to disable usb2 lpm for host
|
||||||
|
* @usb2_gadget_lpm_disable: set to disable usb2 lpm for gadget
|
||||||
* @disable_scramble_quirk: set if we enable the disable scramble quirk
|
* @disable_scramble_quirk: set if we enable the disable scramble quirk
|
||||||
* @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
|
* @u2exit_lfps_quirk: set if we enable u2exit lfps quirk
|
||||||
* @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
|
* @u2ss_inp3_quirk: set if we enable P3 OK for U2/SS Inactive quirk
|
||||||
@@ -1246,6 +1247,7 @@ struct dwc3 {
|
|||||||
unsigned dis_start_transfer_quirk:1;
|
unsigned dis_start_transfer_quirk:1;
|
||||||
unsigned usb3_lpm_capable:1;
|
unsigned usb3_lpm_capable:1;
|
||||||
unsigned usb2_lpm_disable:1;
|
unsigned usb2_lpm_disable:1;
|
||||||
|
unsigned usb2_gadget_lpm_disable:1;
|
||||||
|
|
||||||
unsigned disable_scramble_quirk:1;
|
unsigned disable_scramble_quirk:1;
|
||||||
unsigned u2exit_lfps_quirk:1;
|
unsigned u2exit_lfps_quirk:1;
|
||||||
|
@@ -308,13 +308,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
|
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
|
||||||
int needs_wakeup;
|
int link_state;
|
||||||
|
|
||||||
needs_wakeup = (dwc->link_state == DWC3_LINK_STATE_U1 ||
|
link_state = dwc3_gadget_get_link_state(dwc);
|
||||||
dwc->link_state == DWC3_LINK_STATE_U2 ||
|
if (link_state == DWC3_LINK_STATE_U1 ||
|
||||||
dwc->link_state == DWC3_LINK_STATE_U3);
|
link_state == DWC3_LINK_STATE_U2 ||
|
||||||
|
link_state == DWC3_LINK_STATE_U3) {
|
||||||
if (unlikely(needs_wakeup)) {
|
|
||||||
ret = __dwc3_gadget_wakeup(dwc);
|
ret = __dwc3_gadget_wakeup(dwc);
|
||||||
dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
|
dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
|
||||||
ret);
|
ret);
|
||||||
@@ -609,12 +608,14 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
|
|||||||
u8 bInterval_m1;
|
u8 bInterval_m1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
|
* Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
|
||||||
* must be set to 0 when the controller operates in full-speed.
|
*
|
||||||
|
* NOTE: The programming guide incorrectly stated bInterval_m1
|
||||||
|
* must be set to 0 when operating in fullspeed. Internally the
|
||||||
|
* controller does not have this limitation. See DWC_usb3x
|
||||||
|
* programming guide section 3.2.2.1.
|
||||||
*/
|
*/
|
||||||
bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
|
bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
|
||||||
if (dwc->gadget->speed == USB_SPEED_FULL)
|
|
||||||
bInterval_m1 = 0;
|
|
||||||
|
|
||||||
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
|
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
|
||||||
dwc->gadget->speed == USB_SPEED_FULL)
|
dwc->gadget->speed == USB_SPEED_FULL)
|
||||||
@@ -1983,6 +1984,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
|
|||||||
case DWC3_LINK_STATE_RESET:
|
case DWC3_LINK_STATE_RESET:
|
||||||
case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
|
case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
|
||||||
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
|
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
|
||||||
|
case DWC3_LINK_STATE_U2: /* in HS, means Sleep (L1) */
|
||||||
|
case DWC3_LINK_STATE_U1:
|
||||||
case DWC3_LINK_STATE_RESUME:
|
case DWC3_LINK_STATE_RESUME:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@@ -3519,6 +3522,7 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
|
|||||||
/* Enable USB2 LPM Capability */
|
/* Enable USB2 LPM Capability */
|
||||||
|
|
||||||
if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
|
if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
|
||||||
|
!dwc->usb2_gadget_lpm_disable &&
|
||||||
(speed != DWC3_DSTS_SUPERSPEED) &&
|
(speed != DWC3_DSTS_SUPERSPEED) &&
|
||||||
(speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
|
(speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
|
||||||
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
|
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
|
||||||
@@ -3545,6 +3549,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
|
|||||||
|
|
||||||
dwc3_gadget_dctl_write_safe(dwc, reg);
|
dwc3_gadget_dctl_write_safe(dwc, reg);
|
||||||
} else {
|
} else {
|
||||||
|
if (dwc->usb2_gadget_lpm_disable) {
|
||||||
|
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
|
||||||
|
reg &= ~DWC3_DCFG_LPM_CAP;
|
||||||
|
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
|
||||||
|
}
|
||||||
|
|
||||||
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
|
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
|
||||||
reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
|
reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
|
||||||
dwc3_gadget_dctl_write_safe(dwc, reg);
|
dwc3_gadget_dctl_write_safe(dwc, reg);
|
||||||
@@ -3993,7 +4003,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
|
|||||||
dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
|
dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
|
||||||
dwc->gadget->sg_supported = true;
|
dwc->gadget->sg_supported = true;
|
||||||
dwc->gadget->name = "dwc3-gadget";
|
dwc->gadget->name = "dwc3-gadget";
|
||||||
dwc->gadget->lpm_capable = true;
|
dwc->gadget->lpm_capable = !dwc->usb2_gadget_lpm_disable;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME We might be setting max_speed to <SUPER, however versions
|
* FIXME We might be setting max_speed to <SUPER, however versions
|
||||||
|
@@ -194,9 +194,13 @@ EXPORT_SYMBOL_GPL(usb_assign_descriptors);
|
|||||||
void usb_free_all_descriptors(struct usb_function *f)
|
void usb_free_all_descriptors(struct usb_function *f)
|
||||||
{
|
{
|
||||||
usb_free_descriptors(f->fs_descriptors);
|
usb_free_descriptors(f->fs_descriptors);
|
||||||
|
f->fs_descriptors = NULL;
|
||||||
usb_free_descriptors(f->hs_descriptors);
|
usb_free_descriptors(f->hs_descriptors);
|
||||||
|
f->hs_descriptors = NULL;
|
||||||
usb_free_descriptors(f->ss_descriptors);
|
usb_free_descriptors(f->ss_descriptors);
|
||||||
|
f->ss_descriptors = NULL;
|
||||||
usb_free_descriptors(f->ssp_descriptors);
|
usb_free_descriptors(f->ssp_descriptors);
|
||||||
|
f->ssp_descriptors = NULL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
|
EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
|
||||||
|
|
||||||
|
@@ -463,6 +463,12 @@ struct tcpm_port {
|
|||||||
/* Auto vbus discharge status */
|
/* Auto vbus discharge status */
|
||||||
bool auto_vbus_discharge_enabled;
|
bool auto_vbus_discharge_enabled;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
|
||||||
|
* the actual currrent limit after RX of PD_CTRL_PSRDY for PD link,
|
||||||
|
* SNK_READY for non-pd link.
|
||||||
|
*/
|
||||||
|
bool slow_charger_loop;
|
||||||
#ifdef CONFIG_DEBUG_FS
|
#ifdef CONFIG_DEBUG_FS
|
||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
struct mutex logbuffer_lock; /* log buffer access lock */
|
struct mutex logbuffer_lock; /* log buffer access lock */
|
||||||
@@ -3153,10 +3159,10 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
|
|||||||
port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
|
port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
|
||||||
pdo_pps_apdo_max_voltage(snk));
|
pdo_pps_apdo_max_voltage(snk));
|
||||||
port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
|
port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
|
||||||
port->pps_data.req_out_volt = min(port->pps_data.max_volt,
|
port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
|
||||||
max(port->pps_data.min_volt,
|
max(port->pps_data.req_min_volt,
|
||||||
port->pps_data.req_out_volt));
|
port->pps_data.req_out_volt));
|
||||||
port->pps_data.req_op_curr = min(port->pps_data.max_curr,
|
port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
|
||||||
port->pps_data.req_op_curr);
|
port->pps_data.req_op_curr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4129,6 +4135,8 @@ static void run_state_machine(struct tcpm_port *port)
|
|||||||
port->supply_voltage,
|
port->supply_voltage,
|
||||||
port->pd_capable,
|
port->pd_capable,
|
||||||
¤t_limit, &adjust);
|
¤t_limit, &adjust);
|
||||||
|
if (port->slow_charger_loop || (current_limit > PD_P_SNK_STDBY_MW / 5))
|
||||||
|
current_limit = PD_P_SNK_STDBY_MW / 5;
|
||||||
tcpm_set_current_limit(port, current_limit, 5000);
|
tcpm_set_current_limit(port, current_limit, 5000);
|
||||||
tcpm_set_charge(port, true);
|
tcpm_set_charge(port, true);
|
||||||
tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
|
tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
|
||||||
@@ -4257,6 +4265,8 @@ static void run_state_machine(struct tcpm_port *port)
|
|||||||
if (adjust)
|
if (adjust)
|
||||||
tcpm_set_current_limit(port, current_limit, 5000);
|
tcpm_set_current_limit(port, current_limit, 5000);
|
||||||
|
|
||||||
|
if (!port->pd_capable && port->slow_charger_loop)
|
||||||
|
tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
|
||||||
tcpm_swap_complete(port, 0);
|
tcpm_swap_complete(port, 0);
|
||||||
tcpm_typec_connect(port);
|
tcpm_typec_connect(port);
|
||||||
mod_enable_frs_delayed_work(port, 0);
|
mod_enable_frs_delayed_work(port, 0);
|
||||||
@@ -5879,6 +5889,7 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
|
|||||||
port->typec_caps.type = ret;
|
port->typec_caps.type = ret;
|
||||||
port->port_type = port->typec_caps.type;
|
port->port_type = port->typec_caps.type;
|
||||||
|
|
||||||
|
port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
|
||||||
if (port->port_type == TYPEC_PORT_SNK)
|
if (port->port_type == TYPEC_PORT_SNK)
|
||||||
goto sink;
|
goto sink;
|
||||||
|
|
||||||
|
@@ -1658,6 +1658,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
|
|||||||
|
|
||||||
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
|
index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
|
||||||
|
|
||||||
|
if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
|
||||||
|
return -EINVAL;
|
||||||
if (vma->vm_end < vma->vm_start)
|
if (vma->vm_end < vma->vm_start)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if ((vma->vm_flags & VM_SHARED) == 0)
|
if ((vma->vm_flags & VM_SHARED) == 0)
|
||||||
@@ -1666,7 +1668,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
|
|||||||
int regnum = index - VFIO_PCI_NUM_REGIONS;
|
int regnum = index - VFIO_PCI_NUM_REGIONS;
|
||||||
struct vfio_pci_region *region = vdev->region + regnum;
|
struct vfio_pci_region *region = vdev->region + regnum;
|
||||||
|
|
||||||
if (region && region->ops && region->ops->mmap &&
|
if (region->ops && region->ops->mmap &&
|
||||||
(region->flags & VFIO_REGION_INFO_FLAG_MMAP))
|
(region->flags & VFIO_REGION_INFO_FLAG_MMAP))
|
||||||
return region->ops->mmap(vdev, region, vma);
|
return region->ops->mmap(vdev, region, vma);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@@ -745,6 +745,9 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
|
|||||||
.fault = ext4_filemap_fault,
|
.fault = ext4_filemap_fault,
|
||||||
.map_pages = filemap_map_pages,
|
.map_pages = filemap_map_pages,
|
||||||
.page_mkwrite = ext4_page_mkwrite,
|
.page_mkwrite = ext4_page_mkwrite,
|
||||||
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
|
.allow_speculation = filemap_allow_speculation,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
|
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||||
|
@@ -171,6 +171,9 @@ static const struct vm_operations_struct f2fs_file_vm_ops = {
|
|||||||
.fault = f2fs_filemap_fault,
|
.fault = f2fs_filemap_fault,
|
||||||
.map_pages = filemap_map_pages,
|
.map_pages = filemap_map_pages,
|
||||||
.page_mkwrite = f2fs_vm_page_mkwrite,
|
.page_mkwrite = f2fs_vm_page_mkwrite,
|
||||||
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
|
.allow_speculation = filemap_allow_speculation,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
static int get_parent_ino(struct inode *inode, nid_t *pino)
|
static int get_parent_ino(struct inode *inode, nid_t *pino)
|
||||||
|
@@ -7,6 +7,7 @@ incrementalfs-y := \
|
|||||||
integrity.o \
|
integrity.o \
|
||||||
main.o \
|
main.o \
|
||||||
pseudo_files.o \
|
pseudo_files.o \
|
||||||
|
sysfs.o \
|
||||||
vfs.o
|
vfs.o
|
||||||
|
|
||||||
incrementalfs-$(CONFIG_FS_VERITY) += verity.o
|
incrementalfs-$(CONFIG_FS_VERITY) += verity.o
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user