Merge branch 'android12-5.10' into android12-5.10-lts

Sync up with android12-5.10 for the following commits:

870488eb07 ANDROID: GKI: 7/14/2021 KMI update
e9742a9ea5 ANDROID: Update the ABI symbol list
ec2190fd3f FROMLIST: arm64: avoid double ISB on kernel entry
98b2c1dd1c FROMLIST: arm64: mte: optimize GCR_EL1 modification on kernel entry/exit
a20103c331 BACKPORT: FROMLIST: arm64: mte: avoid TFSR related operations unless in async mode
3972be647a FROMLIST: Documentation: document the preferred tag checking mode feature
5adf29adb5 FROMLIST: arm64: mte: introduce a per-CPU tag checking mode preference
ce5ba15abc FROMLIST: arm64: move preemption disablement to prctl handlers
6c08feaa27 FROMLIST: arm64: mte: change ASYNC and SYNC TCF settings into bitfields
f438cf16cd FROMLIST: arm64: mte: rename gcr_user_excl to mte_ctrl
a4c9e551b6 BACKPORT: arm64: pac: Optimize kernel entry/exit key installation code paths
50829b8901 BACKPORT: arm64: Introduce prctl(PR_PAC_{SET,GET}_ENABLED_KEYS)
6119d18df7 ANDROID: cleancache: add oem data to cleancache_ops
a0c429e8e1 ANDROID: blkdev: add oem data to block_device_operations
26cd2564e1 FROMLIST: psi: stop relying on timer_pending for poll_work rescheduling
e85b291d7d ANDROID: GKI: Enable CONFIG_MEMCG
0ed7424fa0 ANDROID: GKI: net: add vendor hooks for 'struct sock' lifecycle
4d30956478 ANDROID: GKI: net: add vendor hooks for 'struct nf_conn' lifecycle
7786463e48 ANDROID: GKI: add vendor padding variable in struct sock
280c9b98aa ANDROID: GKI: add vendor padding variable in struct nf_conn
9d1b55d20a ANDROID: vendor_hooks: add a field in mem_cgroup
65115fdbf8 ANDROID: vendor_hooks: add a field in pglist_data
26920e0f3a FROMLIST: usb: dwc3: avoid NULL access of usb_gadget_driver
5bb2dd8d39 FROMGIT: usb: dwc3: dwc3-qcom: Enable tx-fifo-resize property by default
79274dbb00 FROMGIT: usb: dwc3: Resize TX FIFOs to meet EP bursting requirements
1e11f36199 FROMGIT: usb: gadget: configfs: Check USB configuration before adding
6da5e7afbf FROMGIT: usb: gadget: udc: core: Introduce check_config to verify USB configuration
2ed5fbf261 ANDROID: GKI: fscrypt: add OEM data to struct fscrypt_operations
194fd9239a ANDROID: GKI: fscrypt: add ABI padding to struct fscrypt_operations
8011eb2215 ANDROID: mm: provision to add shmem pages to inactive file lru head
9bb1247653 ANDROID: GKI: Enable CONFIG_CGROUP_NET_PRIO
a1ce719ca7 ANDROID: Delete the DMA-BUF attachment sysfs statistics
a2b3afb2f7 ANDROID: android: Add symbols to debug_symbols driver
914a7b14a0 UPSTREAM: USB: UDC core: Add udc_async_callbacks gadget op
9af9ef8dfa ANDROID: vendor_hooks: Add oem data to file struct
37485a3025 ANDROID: add kabi padding for structures for the android12 release
429c78f9b0 ANDROID: GKI: device.h: add Android ABI padding to some structures
aea5e1c230 ANDROID: GKI: elevator: add Android ABI padding to some structures
1b79ef2754 ANDROID: GKI: scsi: add Android ABI padding to some structures
33175403b9 ANDROID: GKI: workqueue.h: add Android ABI padding to some structures
d5c344a498 ANDROID: GKI: sched: add Android ABI padding to some structures
9c4854fa5a ANDROID: GKI: phy: add Android ABI padding to some structures
f4872b2353 ANDROID: GKI: fs.h: add Android ABI padding to some structures
48cddc7c42 ANDROID: GKI: dentry: add Android ABI padding to some structures
b9081a2925 ANDROID: GKI: bio: add Android ABI padding to some structures
99bf8cf8fa ANDROID: GKI: ufs: add Android ABI padding to some structures
9df147298f ANDROID: Update the generic symbol list
12f48605e8 ANDROID: mm: cma do not sleep for __GFP_NORETRY
0e688e972d ANDROID: mm: cma: skip problematic pageblock
9938b82be1 ANDROID: mm: bail out tlb free batching on page zapping when cma is going on
c8578a3e90 ANDROID: mm: lru_cache_disable skips lru cache drainnig
c01ce3b5ef ANDROID: mm: do not try test_page_isoalte if migration fails
675e504598 ANDROID: mm: add cma allocation statistics
b1e4543c27 UPSTREAM: mm, page_alloc: move draining pcplists to page isolation users
13bc06efd9 ANDROID: ALSA: compress: add vendor hook to support pause in draining
2faed77792 ANDROID: vendor_hooks: add vendor hook in blk_mq_rq_ctx_init()
292baba45a ANDROID: abi_gki_aarch64_qcom: Add I3C core symbols to qcom tree
eecc725a8e ANDROID: vendor_hooks: add vendor hook in blk_mq_alloc_rqs()
9c2958f454 ANDROID: GKI: Export put_task_stack symbol
288805c86a ANDROID: abi_gki_aarch64_qcom: Add idr_alloc_u32
e8516fd3af ANDROID: sound: usb: add vendor hook for cpu suspend support
d820d22b5d ANDROID: mm: page_pinner: use EXPORT_SYMBOL_GPL
efc09793ea ANDROID: GKI: update allowed GKI symbol for Exynosauto SoC
67e3e39eb1 ANDROID: GKI: sync allowed list for exynosauto SoC
d25e256373 ANDROID: ABI: add new symbols required by fips140.ko
50661975be ANDROID: fips140: add/update module help text
b7397e89db ANDROID: fips140: add power-up cryptographic self-tests
bd7d13c36e ANDROID: arm64: disable LSE when building the FIPS140 module
1061ef0493 ANDROID: jump_label: disable jump labels in fips140.ko
dcf509fea7 ANDROID: ipv6: add vendor hook for gen ipv6 link-local addr
018332e871 ANDROID: Revert "scsi: block: Do not accept any requests while suspended"
2ad2c3a25b ANDROID: abi_gki_aarch64_qcom: whitelist vm_event_states
7bcfde2601 ANDROID: ashmem: Export is_ashmem_file

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I7d60121fa9f25f007dab97dd666adcdb1964afc8
This commit is contained in:
Greg Kroah-Hartman
2021-07-15 17:16:28 +02:00
186 changed files with 20013 additions and 14794 deletions

View File

@@ -648,3 +648,21 @@ Description: SPURR ticks for cpuX when it was idle.
This sysfs interface exposes the number of SPURR ticks
for cpuX when it was idle.
What: /sys/devices/system/cpu/cpuX/mte_tcf_preferred
Date: July 2021
Contact: Linux ARM Kernel Mailing list <linux-arm-kernel@lists.infradead.org>
Description: Preferred MTE tag checking mode
When a user program specifies more than one MTE tag checking
mode, this sysfs node is used to specify which mode should
be preferred when running on that CPU. Possible values:
================ ==============================================
"sync" Prefer synchronous mode
"async" Prefer asynchronous mode
================ ==============================================
Changes to this sysfs node may not take effect immediately.
See also: Documentation/arm64/memory-tagging-extension.rst

View File

@@ -22,31 +22,3 @@ KernelVersion: v5.12
Contact: Hridya Valsaraju <hridya@google.com>
Description: This file is read-only and specifies the size of the DMA-BUF in
bytes.
What: /sys/kernel/dmabuf/buffers/<inode_number>/attachments
Date: January 2021
KernelVersion: v5.12
Contact: Hridya Valsaraju <hridya@google.com>
Description: This directory will contain subdirectories representing every
attachment of the DMA-BUF.
What: /sys/kernel/dmabuf/buffers/<inode_number>/attachments/<attachment_uid>
Date: January 2021
KernelVersion: v5.12
Contact: Hridya Valsaraju <hridya@google.com>
Description: This directory will contain information on the attaching device
and the number of current distinct device mappings.
What: /sys/kernel/dmabuf/buffers/<inode_number>/attachments/<attachment_uid>/device
Date: January 2021
KernelVersion: v5.12
Contact: Hridya Valsaraju <hridya@google.com>
Description: This file is read-only and is a symlink to the attaching devices's
sysfs entry.
What: /sys/kernel/dmabuf/buffers/<inode_number>/attachments/<attachment_uid>/map_counter
Date: January 2021
KernelVersion: v5.12
Contact: Hridya Valsaraju <hridya@google.com>
Description: This file is read-only and contains a map_counter indicating the
number of distinct device mappings of the attachment.

View File

@@ -77,14 +77,20 @@ configurable behaviours:
address is unknown).
The user can select the above modes, per thread, using the
``prctl(PR_SET_TAGGED_ADDR_CTRL, flags, 0, 0, 0)`` system call where
``flags`` contain one of the following values in the ``PR_MTE_TCF_MASK``
``prctl(PR_SET_TAGGED_ADDR_CTRL, flags, 0, 0, 0)`` system call where ``flags``
contains any number of the following values in the ``PR_MTE_TCF_MASK``
bit-field:
- ``PR_MTE_TCF_NONE`` - *Ignore* tag check faults
- ``PR_MTE_TCF_NONE``  - *Ignore* tag check faults
(ignored if combined with other options)
- ``PR_MTE_TCF_SYNC`` - *Synchronous* tag check fault mode
- ``PR_MTE_TCF_ASYNC`` - *Asynchronous* tag check fault mode
If no modes are specified, tag check faults are ignored. If a single
mode is specified, the program will run in that mode. If multiple
modes are specified, the mode is selected as described in the "Per-CPU
preferred tag checking modes" section below.
The current tag check fault mode can be read using the
``prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)`` system call.
@@ -120,13 +126,39 @@ in the ``PR_MTE_TAG_MASK`` bit-field.
interface provides an include mask. An include mask of ``0`` (exclusion
mask ``0xffff``) results in the CPU always generating tag ``0``.
Per-CPU preferred tag checking mode
-----------------------------------
On some CPUs the performance of MTE in stricter tag checking modes
is similar to that of less strict tag checking modes. This makes it
worthwhile to enable stricter checks on those CPUs when a less strict
checking mode is requested, in order to gain the error detection
benefits of the stricter checks without the performance downsides. To
support this scenario, a privileged user may configure a stricter
tag checking mode as the CPU's preferred tag checking mode.
The preferred tag checking mode for each CPU is controlled by
``/sys/devices/system/cpu/cpu<N>/mte_tcf_preferred``, to which a
privileged user may write the value ``async`` or ``sync``. The default
preferred mode for each CPU is ``async``.
To allow a program to potentially run in the CPU's preferred tag
checking mode, the user program may set multiple tag check fault mode
bits in the ``flags`` argument to the ``prctl(PR_SET_TAGGED_ADDR_CTRL,
flags, 0, 0, 0)`` system call. If the CPU's preferred tag checking
mode is in the task's set of provided tag checking modes (this will
always be the case at present because the kernel only supports two
tag checking modes, but future kernels may support more modes), that
mode will be selected. Otherwise, one of the modes in the task's mode
set will be selected in a currently unspecified manner.
Initial process state
---------------------
On ``execve()``, the new process has the following configuration:
- ``PR_TAGGED_ADDR_ENABLE`` set to 0 (disabled)
- Tag checking mode set to ``PR_MTE_TCF_NONE``
- No tag checking modes are selected (tag check faults ignored)
- ``PR_MTE_TAG_MASK`` set to 0 (all tags excluded)
- ``PSTATE.TCO`` set to 0
- ``PROT_MTE`` not set on any of the initial memory maps
@@ -251,11 +283,13 @@ Example of correct usage
return EXIT_FAILURE;
/*
* Enable the tagged address ABI, synchronous MTE tag check faults and
* allow all non-zero tags in the randomly generated set.
* Enable the tagged address ABI, synchronous or asynchronous MTE
* tag check faults (based on per-CPU preference) and allow all
* non-zero tags in the randomly generated set.
*/
if (prctl(PR_SET_TAGGED_ADDR_CTRL,
PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | (0xfffe << PR_MTE_TAG_SHIFT),
PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC |
(0xfffe << PR_MTE_TAG_SHIFT),
0, 0, 0)) {
perror("prctl() failed");
return EXIT_FAILURE;

View File

@@ -107,3 +107,37 @@ filter out the Pointer Authentication system key registers from
KVM_GET/SET_REG_* ioctls and mask those features from cpufeature ID
register. Any attempt to use the Pointer Authentication instructions will
result in an UNDEFINED exception being injected into the guest.
Enabling and disabling keys
---------------------------
The prctl PR_PAC_SET_ENABLED_KEYS allows the user program to control which
PAC keys are enabled in a particular task. It takes two arguments, the
first being a bitmask of PR_PAC_APIAKEY, PR_PAC_APIBKEY, PR_PAC_APDAKEY
and PR_PAC_APDBKEY specifying which keys shall be affected by this prctl,
and the second being a bitmask of the same bits specifying whether the key
should be enabled or disabled. For example::
prctl(PR_PAC_SET_ENABLED_KEYS,
PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY,
PR_PAC_APIBKEY, 0, 0);
disables all keys except the IB key.
The main reason why this is useful is to enable a userspace ABI that uses PAC
instructions to sign and authenticate function pointers and other pointers
exposed outside of the function, while still allowing binaries conforming to
the ABI to interoperate with legacy binaries that do not sign or authenticate
pointers.
The idea is that a dynamic loader or early startup code would issue this
prctl very early after establishing that a process may load legacy binaries,
but before executing any PAC instructions.
For compatibility with previous kernel versions, processes start up with IA,
IB, DA and DB enabled, and are reset to this state on exec(). Processes created
via fork() and clone() inherit the key enabled state from the calling process.
It is recommended to avoid disabling the IA key, as this has higher performance
overhead than disabling any of the other keys.

File diff suppressed because it is too large Load Diff

View File

@@ -120,6 +120,7 @@
dma_buf_unmap_attachment
dma_buf_vmap
dma_buf_vunmap
dmaengine_unmap_put
dma_fence_add_callback
dma_fence_context_alloc
dma_fence_default_wait
@@ -338,6 +339,7 @@
of_property_read_string
of_property_read_string_helper
of_property_read_u32_index
of_property_read_variable_u16_array
of_property_read_variable_u32_array
of_property_read_variable_u8_array
of_prop_next_string
@@ -348,6 +350,7 @@
panic_notifier_list
param_ops_bool
param_ops_int
param_ops_string
param_ops_uint
PDE_DATA
__per_cpu_offset
@@ -358,7 +361,6 @@
pinctrl_lookup_state
pinctrl_select_state
platform_bus_type
platform_device_unregister
__platform_driver_register
platform_driver_unregister
platform_get_irq
@@ -563,6 +565,7 @@
wait_for_completion_timeout
__wake_up
wake_up_process
wakeup_source_register
wakeup_source_unregister
__warn_printk
work_busy
@@ -598,6 +601,20 @@
debugfs_create_x32
kernel_kobj
# required by dmatest.ko
dmaengine_get_unmap_data
__dma_request_channel
dma_sync_wait
kthread_should_stop
param_get_bool
param_get_string
param_set_bool
param_set_copystring
prandom_bytes
set_freezable
set_user_nice
strim
# required by dss.ko
android_debug_symbol
arch_timer_read_counter
@@ -657,6 +674,7 @@
platform_device_alloc
platform_device_del
platform_device_put
platform_device_unregister
platform_get_irq_byname_optional
__pm_relax
pm_runtime_allow
@@ -723,16 +741,20 @@
__drm_atomic_helper_plane_destroy_state
__drm_atomic_helper_plane_duplicate_state
__drm_atomic_helper_plane_reset
__drm_atomic_helper_private_obj_duplicate_state
drm_atomic_helper_set_config
drm_atomic_helper_shutdown
drm_atomic_helper_update_plane
drm_atomic_helper_wait_for_vblanks
drm_atomic_normalize_zpos
drm_atomic_private_obj_init
drm_bridge_attach
drm_compat_ioctl
drm_connector_list_iter_begin
drm_connector_list_iter_end
drm_connector_list_iter_next
drm_connector_set_path_property
drm_connector_set_tile_property
drm_connector_unregister
drm_crtc_arm_vblank_event
drm_crtc_cleanup
@@ -758,7 +780,9 @@
drm_dp_aux_register
drm_dp_aux_unregister
drm_dp_bw_code_to_link_rate
drm_dp_calc_pbn_mode
drm_dp_channel_eq_ok
drm_dp_check_act_status
drm_dp_clock_recovery_ok
drm_dp_downstream_debug
drm_dp_dpcd_read
@@ -766,11 +790,23 @@
drm_dp_dpcd_write
drm_dp_dsc_sink_line_buf_depth
drm_dp_dsc_sink_max_slice_count
drm_dp_find_vcpi_slots
drm_dp_get_adjust_request_pre_emphasis
drm_dp_get_adjust_request_voltage
drm_dp_link_train_channel_eq_delay
drm_dp_link_train_clock_recovery_delay
drm_dp_mst_allocate_vcpi
drm_dp_mst_deallocate_vcpi
drm_dp_mst_detect_port
drm_dp_mst_dump_topology
drm_dp_mst_get_port_malloc
drm_dp_mst_hpd_irq
drm_dp_mst_put_port_malloc
drm_dp_mst_reset_vcpi_slots
drm_dp_mst_topology_mgr_init
drm_dp_mst_topology_mgr_set_mst
drm_dp_update_payload_part1
drm_dp_update_payload_part2
drm_dsc_compute_rc_parameters
drm_dsc_pps_payload_pack
drm_encoder_cleanup
@@ -798,6 +834,7 @@
drm_helper_connector_dpms
drm_helper_hpd_irq_event
drm_helper_mode_fill_fb_struct
drm_helper_probe_detect
drm_ioctl
drm_kms_helper_poll_fini
drm_kms_helper_poll_init
@@ -831,9 +868,11 @@
kstrtobool
mipi_dsi_host_register
mipi_dsi_host_unregister
mutex_is_locked
of_drm_find_bridge
of_drm_find_panel
of_graph_get_endpoint_by_regs
of_graph_get_endpoint_count
of_graph_get_next_endpoint
of_graph_get_port_by_id
of_graph_get_remote_port
@@ -841,9 +880,7 @@
of_graph_parse_endpoint
of_phandle_iterator_init
of_phandle_iterator_next
platform_find_device_by_driver
seq_hex_dump
strnstr
# required by exynos_mfc.ko
iommu_dma_reserve_iova
@@ -857,6 +894,13 @@
_raw_write_lock_irqsave
_raw_write_unlock_irqrestore
# required by exynos_thermal.ko
of_thermal_get_ntrips
of_thermal_is_trip_valid
thermal_zone_device_update
thermal_zone_of_sensor_register
thermal_zone_of_sensor_unregister
# required by exynos_tty.ko
dma_get_slave_caps
do_SAK
@@ -901,10 +945,6 @@
# required by i2c-exynosauto.ko
i2c_del_adapter
# required by lt8912.ko
drm_mode_duplicate
of_get_drm_display_mode
# required by mali_kbase.ko
anon_inode_getfd
__arch_clear_user
@@ -1065,7 +1105,6 @@
dma_async_device_register
dma_async_device_unregister
dma_async_tx_descriptor_init
dmaengine_unmap_put
dma_get_slave_channel
dma_map_resource
dma_unmap_resource
@@ -1125,7 +1164,6 @@
kthread_flush_work
kthread_flush_worker
ktime_get_with_offset
param_ops_string
phy_configure
pm_relax
pm_stay_awake
@@ -1371,14 +1409,11 @@
# required by ufs-exynosauto-core.ko
blk_ksm_init_passthrough
of_property_read_variable_u16_array
__traceiter_android_vh_ufs_fill_prdt
__tracepoint_android_vh_ufs_fill_prdt
ufshcd_alloc_host
ufshcd_config_pwr_mode
ufshcd_dme_get_attr
ufshcd_dme_set_attr
ufshcd_init
ufshcd_link_recovery
ufshcd_pltfrm_init
ufshcd_remove
@@ -1386,9 +1421,6 @@
ufshcd_system_resume
ufshcd_system_suspend
# required by vbpipe-module.ko
wakeup_source_register
# required by vbufq-be-module.ko
drain_workqueue
radix_tree_delete

View File

@@ -1,12 +1,17 @@
[abi_symbol_list]
# commonly used symbols
module_layout
__put_task_struct
# required by fips140.ko
add_random_ready_callback
aead_register_instance
arm64_const_caps_ready
bcmp
cancel_work_sync
__cfi_slowpath
cpu_have_feature
cpu_hwcap_keys
crypto_aead_decrypt
crypto_aead_encrypt
crypto_aead_setauthsize
@@ -16,11 +21,14 @@
crypto_alg_list
crypto_alg_mod_lookup
crypto_alg_sem
crypto_alloc_aead
crypto_alloc_base
crypto_alloc_rng
crypto_alloc_shash
crypto_alloc_skcipher
crypto_attr_alg_name
crypto_check_attr_type
crypto_cipher_decrypt_one
crypto_cipher_encrypt_one
crypto_cipher_setkey
crypto_destroy_tfm
@@ -43,6 +51,7 @@
crypto_remove_final
crypto_remove_spawns
crypto_req_done
crypto_rng_reset
crypto_shash_alg_has_setkey
crypto_shash_digest
crypto_shash_final
@@ -77,6 +86,7 @@
kmalloc_caches
kmalloc_order_trace
kmem_cache_alloc_trace
kmemdup
__list_add_valid
__list_del_entry_valid
memcpy
@@ -89,6 +99,7 @@
preempt_schedule_notrace
printk
queue_work_on
refcount_warn_saturate
scatterwalk_ffwd
scatterwalk_map_and_copy
sg_init_one

View File

@@ -24,6 +24,8 @@
amba_driver_unregister
android_rvh_probe_register
anon_inode_getfd
arc4_crypt
arc4_setkey
__arch_clear_user
__arch_copy_from_user
__arch_copy_in_user
@@ -105,6 +107,7 @@
bus_unregister
bus_unregister_notifier
cache_line_size
call_rcu
cancel_delayed_work
cancel_delayed_work_sync
cancel_work_sync
@@ -225,6 +228,8 @@
cpus_read_lock
cpus_read_unlock
cpu_subsys
crc32_be
crc32_le
crc8
crc8_populate_msb
crypto_aead_decrypt
@@ -234,19 +239,24 @@
crypto_alloc_aead
crypto_alloc_base
crypto_alloc_shash
crypto_alloc_skcipher
crypto_alloc_sync_skcipher
crypto_comp_compress
crypto_comp_decompress
crypto_destroy_tfm
__crypto_memneq
crypto_register_alg
crypto_register_scomp
crypto_shash_digest
crypto_shash_finup
crypto_shash_setkey
crypto_shash_update
crypto_skcipher_decrypt
crypto_skcipher_encrypt
crypto_skcipher_setkey
crypto_unregister_alg
crypto_unregister_scomp
__crypto_xor
csum_ipv6_magic
csum_tcpudp_nofold
_ctype
@@ -264,6 +274,7 @@
debugfs_create_x32
debugfs_lookup
debugfs_remove
debugfs_rename
dec_zone_page_state
default_llseek
deferred_free
@@ -273,11 +284,14 @@
del_timer_sync
desc_to_gpio
destroy_workqueue
dev_alloc_name
dev_change_net_namespace
dev_close
_dev_crit
dev_driver_string
_dev_emerg
_dev_err
dev_fetch_sw_netstats
devfreq_add_device
devfreq_add_governor
devfreq_monitor_resume
@@ -290,12 +304,16 @@
devfreq_unregister_opp_notifier
devfreq_update_interval
dev_fwnode
__dev_get_by_index
dev_get_by_index
dev_get_by_name
device_add
device_add_disk
device_add_groups
device_create
device_create_file
device_create_with_groups
device_del
device_destroy
device_find_child
device_for_each_child
@@ -311,6 +329,7 @@
device_register
device_remove_file
device_remove_groups
device_rename
device_set_wakeup_capable
device_set_wakeup_enable
device_unregister
@@ -390,6 +409,8 @@
devm_thermal_zone_of_sensor_unregister
devm_usb_get_phy_by_phandle
_dev_notice
dev_pm_domain_attach_by_name
dev_pm_domain_detach
dev_pm_opp_add
dev_pm_opp_disable
dev_pm_opp_find_freq_ceil
@@ -406,6 +427,7 @@
dev_pm_opp_set_regulators
dev_pm_qos_read_value
dev_pm_qos_update_request
dev_printk
dev_printk_emit
dev_queue_xmit
devres_add
@@ -690,6 +712,9 @@
emergency_restart
enable_irq
enable_percpu_irq
ether_setup
eth_mac_addr
ethtool_op_get_link
eth_type_trans
eventfd_ctx_fdget
eventfd_ctx_put
@@ -719,6 +744,7 @@
find_vma
finish_wait
flush_dcache_page
flush_delayed_work
flush_work
flush_workqueue
fput
@@ -745,6 +771,7 @@
generic_file_llseek
generic_handle_irq
generic_iommu_put_resv_regions
genlmsg_multicast_allns
genlmsg_put
genl_register_family
genl_unregister_family
@@ -763,6 +790,8 @@
get_device
__get_free_pages
get_governor_parent_kobj
get_net_ns_by_fd
get_net_ns_by_pid
get_pid_task
get_random_bytes
get_random_bytes_arch
@@ -830,6 +859,9 @@
hrtimer_sleeper_start_expires
hrtimer_start_range_ns
hrtimer_try_to_cancel
__hw_addr_init
__hw_addr_sync
__hw_addr_unsync
hwrng_register
hwrng_unregister
i2c_adapter_type
@@ -858,6 +890,7 @@
idr_destroy
idr_find
idr_for_each
idr_get_next
idr_preload
idr_remove
iio_device_unregister
@@ -865,6 +898,7 @@
in6_pton
in_aton
inc_zone_page_state
inet_csk_get_port
init_dummy_netdev
init_net
init_pseudo
@@ -872,6 +906,7 @@
__init_swait_queue_head
init_task
init_timer_key
init_uts_ns
init_wait_entry
__init_waitqueue_head
input_allocate_device
@@ -960,9 +995,14 @@
kasprintf
kernel_cpustat
kernel_kobj
kernel_param_lock
kernel_param_unlock
kernel_restart
kern_mount
kern_unmount
key_create_or_update
key_put
keyring_alloc
__kfifo_alloc
__kfifo_free
__kfifo_in
@@ -971,6 +1011,7 @@
kfree
kfree_sensitive
kfree_skb
kfree_skb_list
kill_anon_super
kill_fasync
kimage_voffset
@@ -1026,11 +1067,13 @@
kthread_stop
kthread_worker_fn
ktime_get
ktime_get_coarse_with_offset
ktime_get_mono_fast_ns
ktime_get_raw
ktime_get_raw_ts64
ktime_get_real_seconds
ktime_get_real_ts64
ktime_get_seconds
ktime_get_ts64
ktime_get_with_offset
kvfree
@@ -1049,6 +1092,8 @@
log_threaded_irq_wakeup_reason
__log_write_mmio
loops_per_jiffy
lru_cache_disable
lru_cache_enable
lzo1x_1_compress
lzo1x_decompress_safe
lzorle1x_1_compress
@@ -1116,19 +1161,31 @@
__napi_schedule
napi_schedule_prep
__netdev_alloc_skb
netdev_err
netdev_info
netdev_set_default_ethtool_ops
netdev_state_change
netdev_update_features
netif_carrier_off
netif_carrier_on
netif_napi_add
__netif_napi_del
netif_receive_skb
netif_receive_skb_list
netif_rx
netif_rx_ni
netif_tx_stop_all_queues
netif_tx_wake_queue
netlink_broadcast
__netlink_kernel_create
netlink_kernel_release
netlink_register_notifier
netlink_unicast
netlink_unregister_notifier
net_ns_type_operations
net_ratelimit
nla_append
nla_find
nla_memcpy
__nla_parse
nla_put
@@ -1136,6 +1193,7 @@
nla_put_nohdr
nla_reserve
nla_strlcpy
__nla_validate
__nlmsg_put
no_llseek
nonseekable_open
@@ -1370,8 +1428,10 @@
proc_remove
pskb_expand_head
__pskb_pull_tail
___pskb_trim
put_device
put_disk
__put_net
__put_page
put_sg_io_hdr
__put_task_struct
@@ -1409,6 +1469,7 @@
rb_next
rb_prev
rb_replace_node
rcu_barrier
__rcu_read_lock
__rcu_read_unlock
rdev_get_drvdata
@@ -1430,6 +1491,7 @@
register_netdev
register_netdevice
register_netdevice_notifier
register_pernet_device
register_pernet_subsys
register_pm_notifier
register_reboot_notifier
@@ -1489,11 +1551,24 @@
return_address
revalidate_disk_size
rfkill_alloc
rfkill_blocked
rfkill_destroy
rfkill_init_sw_state
rfkill_pause_polling
rfkill_register
rfkill_resume_polling
rfkill_set_hw_state
rfkill_unregister
rhashtable_free_and_destroy
rhashtable_insert_slow
rhltable_init
__rht_bucket_nested
rht_bucket_nested
rht_bucket_nested_insert
root_task_group
round_jiffies
round_jiffies_relative
round_jiffies_up
rps_needed
rtc_class_close
rtc_class_open
@@ -1583,11 +1658,20 @@
single_open
single_open_size
single_release
skb_add_rx_frag
skb_checksum
skb_checksum_help
skb_clone
skb_clone_sk
skb_complete_wifi_ack
skb_copy
skb_copy_bits
skb_copy_expand
skb_dequeue
skb_dequeue_tail
skb_ensure_writable
__skb_get_hash
__skb_gso_segment
skb_pull
skb_push
skb_put
@@ -1677,6 +1761,8 @@
snd_soc_unregister_component
snprintf
soc_device_register
__sock_create
sock_release
sock_wfree
softnet_data
sort
@@ -1735,8 +1821,10 @@
submit_bio
submit_bio_wait
subsys_system_register
__sw_hweight16
__sw_hweight32
__sw_hweight64
__sw_hweight8
sync_file_create
sync_file_get_fence
synchronize_irq
@@ -1761,6 +1849,7 @@
sysfs_streq
sysfs_update_group
sysrq_mask
system_freezable_wq
system_freezing_cnt
system_highpri_wq
system_long_wq
@@ -1821,6 +1910,7 @@
__traceiter_android_rvh_post_init_entity_util_avg
__traceiter_android_rvh_preempt_disable
__traceiter_android_rvh_preempt_enable
__traceiter_android_rvh_sched_fork
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_set_iowait
__traceiter_android_rvh_typec_tcpci_chk_contaminant
@@ -1841,6 +1931,8 @@
__traceiter_android_vh_pagecache_get_page
__traceiter_android_vh_rmqueue
__traceiter_android_vh_setscheduler_uclamp
__traceiter_android_vh_snd_compr_use_pause_in_drain
__traceiter_android_vh_sound_usb_support_cpu_suspend
__traceiter_android_vh_thermal_pm_notify_suspend
__traceiter_android_vh_timerfd_create
__traceiter_android_vh_typec_store_partner_src_caps
@@ -1891,6 +1983,7 @@
__tracepoint_android_rvh_post_init_entity_util_avg
__tracepoint_android_rvh_preempt_disable
__tracepoint_android_rvh_preempt_enable
__tracepoint_android_rvh_sched_fork
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_set_iowait
__tracepoint_android_rvh_typec_tcpci_chk_contaminant
@@ -1911,6 +2004,8 @@
__tracepoint_android_vh_pagecache_get_page
__tracepoint_android_vh_rmqueue
__tracepoint_android_vh_setscheduler_uclamp
__tracepoint_android_vh_snd_compr_use_pause_in_drain
__tracepoint_android_vh_sound_usb_support_cpu_suspend
__tracepoint_android_vh_thermal_pm_notify_suspend
__tracepoint_android_vh_timerfd_create
__tracepoint_android_vh_typec_store_partner_src_caps
@@ -2004,8 +2099,10 @@
unregister_inet6addr_notifier
unregister_inetaddr_notifier
unregister_netdev
unregister_netdevice_many
unregister_netdevice_notifier
unregister_netdevice_queue
unregister_pernet_device
unregister_pernet_subsys
unregister_pm_notifier
unregister_reboot_notifier
@@ -2113,6 +2210,7 @@
vb2_streamon
vb2_vmalloc_memops
vb2_wait_for_all_buffers
verify_pkcs7_signature
vfree
video_devdata
video_device_alloc
@@ -2164,6 +2262,7 @@
watchdog_register_device
watchdog_set_restart_priority
watchdog_unregister_device
wireless_nlevent_flush
woken_wake_function
work_busy
__xfrm_state_destroy
@@ -2178,6 +2277,7 @@
xhci_handle_event
xhci_init_driver
xhci_resume
xhci_ring_alloc
xhci_ring_free
xhci_run
xhci_suspend

View File

@@ -1052,6 +1052,20 @@
i2c_register_driver
i2c_transfer
i2c_transfer_buffer_flags
i3c_generic_ibi_alloc_pool
i3c_generic_ibi_free_pool
i3c_generic_ibi_get_free_slot
i3c_generic_ibi_recycle_slot
i3c_master_add_i3c_dev_locked
i3c_master_disec_locked
i3c_master_do_daa
i3c_master_enec_locked
i3c_master_entdaa_locked
i3c_master_get_free_addr
i3c_master_queue_ibi
i3c_master_register
i3c_master_set_info
i3c_master_unregister
icc_disable
icc_enable
icc_get
@@ -1069,6 +1083,7 @@
ida_free
idr_alloc
idr_alloc_cyclic
idr_alloc_u32
idr_destroy
idr_find
idr_for_each
@@ -2923,6 +2938,7 @@
vmalloc_to_pfn
vmap
vmemdup_user
vm_event_states
vmf_insert_mixed
vmf_insert_pfn
vm_get_page_prot

View File

@@ -15,6 +15,7 @@ CONFIG_IKHEADERS=y
CONFIG_UCLAMP_TASK=y
CONFIG_UCLAMP_BUCKETS_COUNT=20
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_UCLAMP_TASK_GROUP=y
@@ -257,6 +258,7 @@ CONFIG_NET_ACT_GACT=y
CONFIG_NET_ACT_MIRRED=y
CONFIG_NET_ACT_SKBEDIT=y
CONFIG_VSOCKETS=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_CAN=y
CONFIG_BT=y

View File

@@ -13,30 +13,12 @@
* so use the base value of ldp as thread.keys_user and offset as
* thread.keys_user.ap*.
*/
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
.macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
mov \tmp1, #THREAD_KEYS_USER
add \tmp1, \tsk, \tmp1
alternative_if_not ARM64_HAS_ADDRESS_AUTH
b .Laddr_auth_skip_\@
alternative_else_nop_endif
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
msr_s SYS_APIAKEYLO_EL1, \tmp2
msr_s SYS_APIAKEYHI_EL1, \tmp3
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB]
msr_s SYS_APIBKEYLO_EL1, \tmp2
msr_s SYS_APIBKEYHI_EL1, \tmp3
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA]
msr_s SYS_APDAKEYLO_EL1, \tmp2
msr_s SYS_APDAKEYHI_EL1, \tmp3
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB]
msr_s SYS_APDBKEYLO_EL1, \tmp2
msr_s SYS_APDBKEYHI_EL1, \tmp3
.Laddr_auth_skip_\@:
alternative_if ARM64_HAS_GENERIC_AUTH
ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA]
msr_s SYS_APGAKEYLO_EL1, \tmp2
msr_s SYS_APGAKEYHI_EL1, \tmp3
alternative_else_nop_endif
.endm
.macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3

View File

@@ -4,7 +4,7 @@
#include <asm/atomic_ll_sc.h>
#ifdef CONFIG_ARM64_LSE_ATOMICS
#if defined(CONFIG_ARM64_LSE_ATOMICS) && !defined(BUILD_FIPS140_KO)
#define __LSE_PREAMBLE ".arch_extension lse\n"

View File

@@ -40,7 +40,7 @@ void mte_free_tag_storage(char *storage);
void mte_zero_clear_page_tags(void *addr);
void mte_sync_tags(pte_t *ptep, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
void flush_mte_state(void);
void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
void mte_suspend_enter(void);
void mte_suspend_exit(void);
@@ -63,7 +63,7 @@ static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
static inline void mte_copy_page_tags(void *kto, const void *kfrom)
{
}
static inline void flush_mte_state(void)
static inline void mte_thread_init_user(void)
{
}
static inline void mte_thread_switch(struct task_struct *next)

View File

@@ -3,12 +3,16 @@
#define __ASM_POINTER_AUTH_H
#include <linux/bitops.h>
#include <linux/prctl.h>
#include <linux/random.h>
#include <asm/cpufeature.h>
#include <asm/memory.h>
#include <asm/sysreg.h>
#define PR_PAC_ENABLED_KEYS_MASK \
(PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
#ifdef CONFIG_ARM64_PTR_AUTH
/*
* Each key is a 128-bit quantity which is split across a pair of 64-bit
@@ -34,6 +38,25 @@ struct ptrauth_keys_kernel {
struct ptrauth_key apia;
};
#define __ptrauth_key_install_nosync(k, v) \
do { \
struct ptrauth_key __pki_v = (v); \
write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1); \
write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
} while (0)
static inline void ptrauth_keys_install_user(struct ptrauth_keys_user *keys)
{
if (system_supports_address_auth()) {
__ptrauth_key_install_nosync(APIB, keys->apib);
__ptrauth_key_install_nosync(APDA, keys->apda);
__ptrauth_key_install_nosync(APDB, keys->apdb);
}
if (system_supports_generic_auth())
__ptrauth_key_install_nosync(APGA, keys->apga);
}
static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
{
if (system_supports_address_auth()) {
@@ -45,14 +68,9 @@ static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
if (system_supports_generic_auth())
get_random_bytes(&keys->apga, sizeof(keys->apga));
}
#define __ptrauth_key_install_nosync(k, v) \
do { \
struct ptrauth_key __pki_v = (v); \
write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1); \
write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
} while (0)
ptrauth_keys_install_user(keys);
}
static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
{
@@ -71,6 +89,10 @@ static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kerne
extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
unsigned long enabled);
extern int ptrauth_get_enabled_keys(struct task_struct *tsk);
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
{
return ptrauth_clear_pac(ptr);
@@ -85,8 +107,23 @@ static __always_inline void ptrauth_enable(void)
isb();
}
#define ptrauth_thread_init_user(tsk) \
ptrauth_keys_init_user(&(tsk)->thread.keys_user)
#define ptrauth_suspend_exit() \
ptrauth_keys_install_user(&current->thread.keys_user)
#define ptrauth_thread_init_user() \
do { \
ptrauth_keys_init_user(&current->thread.keys_user); \
\
/* enable all keys */ \
if (system_supports_address_auth()) \
ptrauth_set_enabled_keys(current, \
PR_PAC_ENABLED_KEYS_MASK, \
PR_PAC_ENABLED_KEYS_MASK); \
} while (0)
#define ptrauth_thread_switch_user(tsk) \
ptrauth_keys_install_user(&(tsk)->thread.keys_user)
#define ptrauth_thread_init_kernel(tsk) \
ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
#define ptrauth_thread_switch_kernel(tsk) \
@@ -95,9 +132,13 @@ static __always_inline void ptrauth_enable(void)
#else /* CONFIG_ARM64_PTR_AUTH */
#define ptrauth_enable()
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
#define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL)
#define ptrauth_get_enabled_keys(tsk) (-EINVAL)
#define ptrauth_strip_insn_pac(lr) (lr)
#define ptrauth_thread_init_user(tsk)
#define ptrauth_suspend_exit()
#define ptrauth_thread_init_user()
#define ptrauth_thread_init_kernel(tsk)
#define ptrauth_thread_switch_user(tsk)
#define ptrauth_thread_switch_kernel(tsk)
#endif /* CONFIG_ARM64_PTR_AUTH */

View File

@@ -19,6 +19,12 @@
*/
#define NET_IP_ALIGN 0
#define MTE_CTRL_GCR_USER_EXCL_SHIFT 0
#define MTE_CTRL_GCR_USER_EXCL_MASK 0xffff
#define MTE_CTRL_TCF_SYNC (1UL << 16)
#define MTE_CTRL_TCF_ASYNC (1UL << 17)
#ifndef __ASSEMBLY__
#include <linux/build_bug.h>
@@ -156,12 +162,14 @@ struct thread_struct {
struct ptrauth_keys_kernel keys_kernel;
#endif
#ifdef CONFIG_ARM64_MTE
u64 gcr_user_excl;
u64 mte_ctrl;
#endif
u64 sctlr_user;
};
#define SCTLR_USER_MASK SCTLR_EL1_TCF0_MASK
#define SCTLR_USER_MASK \
(SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \
SCTLR_EL1_TCF0_MASK)
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
@@ -254,7 +262,7 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
void set_task_sctlr_el1(u64 sctlr);
void update_sctlr_el1(u64 sctlr);
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
@@ -310,6 +318,11 @@ extern void __init minsigstksz_setup(void);
/* PR_PAC_RESET_KEYS prctl */
#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
/* PR_PAC_{SET,GET}_ENABLED_KEYS prctl */
#define PAC_SET_ENABLED_KEYS(tsk, keys, enabled) \
ptrauth_set_enabled_keys(tsk, keys, enabled)
#define PAC_GET_ENABLED_KEYS(tsk) ptrauth_get_enabled_keys(tsk)
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);

View File

@@ -610,8 +610,10 @@
#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_ENIA_SHIFT 31
#define SCTLR_ELx_ITFSB (BIT(37))
#define SCTLR_ELx_ENIA (BIT(31))
#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
#define SCTLR_ELx_ENIB (BIT(30))
#define SCTLR_ELx_ENDA (BIT(27))
#define SCTLR_ELx_EE (BIT(25))

View File

@@ -44,12 +44,13 @@ int main(void)
#endif
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
DEFINE(THREAD_SCTLR_USER, offsetof(struct task_struct, thread.sctlr_user));
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
#endif
#ifdef CONFIG_ARM64_MTE
DEFINE(THREAD_GCR_EL1_USER, offsetof(struct task_struct, thread.gcr_user_excl));
DEFINE(THREAD_MTE_CTRL, offsetof(struct task_struct, thread.mte_ctrl));
#endif
BLANK();
DEFINE(S_X0, offsetof(struct pt_regs, regs[0]));
@@ -153,10 +154,6 @@ int main(void)
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia));
DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib));
DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda));
DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb));
DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga));
DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia));
BLANK();
#endif

View File

@@ -148,42 +148,46 @@ alternative_cb_end
.endm
/* Check for MTE asynchronous tag check faults */
.macro check_mte_async_tcf, tmp, ti_flags
.macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
#ifdef CONFIG_ARM64_MTE
.arch_extension lse
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
/*
* Asynchronous tag check faults are only possible in ASYNC (2) or
* ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
* set, so skip the check if it is unset.
*/
tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
mrs_s \tmp, SYS_TFSRE0_EL1
tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
mov \tmp, #_TIF_MTE_ASYNC_FAULT
add \ti_flags, tsk, #TSK_TI_FLAGS
stset \tmp, [\ti_flags]
msr_s SYS_TFSRE0_EL1, xzr
1:
#endif
.endm
/* Clear the MTE asynchronous tag check faults */
.macro clear_mte_async_tcf
.macro clear_mte_async_tcf thread_sctlr
#ifdef CONFIG_ARM64_MTE
alternative_if ARM64_MTE
/* See comment in check_mte_async_tcf above. */
tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
dsb ish
msr_s SYS_TFSRE0_EL1, xzr
1:
alternative_else_nop_endif
#endif
.endm
.macro mte_set_gcr, tmp, tmp2
.macro mte_set_gcr, mte_ctrl, tmp
#ifdef CONFIG_ARM64_MTE
/*
* Calculate and set the exclude mask preserving
* the RRND (bit[16]) setting.
*/
mrs_s \tmp2, SYS_GCR_EL1
bfi \tmp2, \tmp, #0, #16
msr_s SYS_GCR_EL1, \tmp2
ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
orr \tmp, \tmp, #SYS_GCR_EL1_RRND
msr_s SYS_GCR_EL1, \tmp
#endif
.endm
@@ -195,7 +199,6 @@ alternative_else_nop_endif
ldr_l \tmp, gcr_kernel_excl
mte_set_gcr \tmp, \tmp2
isb
1:
#endif
.endm
@@ -205,7 +208,7 @@ alternative_else_nop_endif
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER]
ldr \tmp, [\tsk, #THREAD_MTE_CTRL]
mte_set_gcr \tmp, \tmp2
1:
@@ -246,13 +249,48 @@ alternative_else_nop_endif
disable_step_tsk x19, x20
/* Check for asynchronous tag check faults in user space */
check_mte_async_tcf x22, x23
ldr x0, [tsk, THREAD_SCTLR_USER]
check_mte_async_tcf x22, x23, x0
#ifdef CONFIG_ARM64_PTR_AUTH
alternative_if ARM64_HAS_ADDRESS_AUTH
/*
* Enable IA for in-kernel PAC if the task had it disabled. Although
* this could be implemented with an unconditional MRS which would avoid
* a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
*
* Install the kernel IA key only if IA was enabled in the task. If IA
* was disabled on kernel exit then we would have left the kernel IA
* installed so there is no need to install it again.
*/
tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
b 2f
1:
mrs x0, sctlr_el1
orr x0, x0, SCTLR_ELx_ENIA
msr sctlr_el1, x0
2:
alternative_else_nop_endif
#endif
apply_ssbd 1, x22, x23
ptrauth_keys_install_kernel tsk, x20, x22, x23
mte_set_kernel_gcr x22, x23
/*
* Any non-self-synchronizing system register updates required for
* kernel entry should be placed before this point.
*/
alternative_if ARM64_MTE
isb
b 1f
alternative_else_nop_endif
alternative_if ARM64_HAS_ADDRESS_AUTH
isb
alternative_else_nop_endif
1:
scs_load tsk, x20
.else
add x21, sp, #S_FRAME_SIZE
@@ -367,8 +405,29 @@ alternative_else_nop_endif
3:
scs_save tsk, x0
/* No kernel C function calls after this as user keys are set. */
ptrauth_keys_install_user tsk, x0, x1, x2
/* Ignore asynchronous tag check faults in the uaccess routines */
ldr x0, [tsk, THREAD_SCTLR_USER]
clear_mte_async_tcf x0
#ifdef CONFIG_ARM64_PTR_AUTH
alternative_if ARM64_HAS_ADDRESS_AUTH
/*
* IA was enabled for in-kernel PAC. Disable it now if needed, or
* alternatively install the user's IA. All other per-task keys and
* SCTLR bits were updated on task switch.
*
* No kernel C function calls after this.
*/
tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
__ptrauth_keys_install_user tsk, x0, x1, x2
b 2f
1:
mrs x0, sctlr_el1
bic x0, x0, SCTLR_ELx_ENIA
msr sctlr_el1, x0
2:
alternative_else_nop_endif
#endif
mte_set_user_gcr tsk, x0, x1
@@ -784,8 +843,6 @@ SYM_CODE_START_LOCAL(ret_to_user)
cbnz x2, work_pending
finish_ret_to_user:
user_enter_irqoff
/* Ignore asynchronous tag check faults in the uaccess routines */
clear_mte_async_tcf
enable_step_tsk x19, x2
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
bl stackleak_erase

View File

@@ -4,6 +4,7 @@
*/
#include <linux/bitops.h>
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/prctl.h>
@@ -26,6 +27,8 @@ u64 gcr_kernel_excl __ro_after_init;
static bool report_fault_once = true;
static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred);
#ifdef CONFIG_KASAN_HW_TAGS
/* Whether the MTE asynchronous mode is enabled. */
DEFINE_STATIC_KEY_FALSE(mte_async_mode);
@@ -197,17 +200,29 @@ static void update_gcr_el1_excl(u64 excl)
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
}
static void set_gcr_el1_excl(u64 excl)
static void mte_update_sctlr_user(struct task_struct *task)
{
current->thread.gcr_user_excl = excl;
/*
* SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value
* by mte_set_user_gcr() in kernel_exit,
* This must be called with preemption disabled and can only be called
* on the current or next task since the CPU must match where the thread
* is going to run. The caller is responsible for calling
* update_sctlr_el1() later in the same preemption disabled block.
*/
unsigned long sctlr = task->thread.sctlr_user;
unsigned long mte_ctrl = task->thread.mte_ctrl;
unsigned long pref, resolved_mte_tcf;
pref = __this_cpu_read(mte_tcf_preferred);
resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl;
sctlr &= ~SCTLR_EL1_TCF0_MASK;
if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC)
sctlr |= SCTLR_EL1_TCF0_ASYNC;
else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC)
sctlr |= SCTLR_EL1_TCF0_SYNC;
task->thread.sctlr_user = sctlr;
}
void flush_mte_state(void)
void mte_thread_init_user(void)
{
if (!system_supports_mte())
return;
@@ -216,15 +231,14 @@ void flush_mte_state(void)
dsb(ish);
write_sysreg_s(0, SYS_TFSRE0_EL1);
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
/* disable tag checking */
set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) |
SCTLR_EL1_TCF0_NONE);
/* reset tag generation mask */
set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
/* disable tag checking and reset tag generation mask */
set_mte_ctrl(current, 0);
}
void mte_thread_switch(struct task_struct *next)
{
mte_update_sctlr_user(next);
/*
* Check if an async tag exception occurred at EL1.
*
@@ -262,33 +276,23 @@ void mte_suspend_exit(void)
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK;
u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
SYS_GCR_EL1_EXCL_MASK;
u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT;
if (!system_supports_mte())
return 0;
switch (arg & PR_MTE_TCF_MASK) {
case PR_MTE_TCF_NONE:
sctlr |= SCTLR_EL1_TCF0_NONE;
break;
case PR_MTE_TCF_SYNC:
sctlr |= SCTLR_EL1_TCF0_SYNC;
break;
case PR_MTE_TCF_ASYNC:
sctlr |= SCTLR_EL1_TCF0_ASYNC;
break;
default:
return -EINVAL;
}
if (arg & PR_MTE_TCF_ASYNC)
mte_ctrl |= MTE_CTRL_TCF_ASYNC;
if (arg & PR_MTE_TCF_SYNC)
mte_ctrl |= MTE_CTRL_TCF_SYNC;
if (task != current) {
task->thread.sctlr_user = sctlr;
task->thread.gcr_user_excl = gcr_excl;
} else {
set_task_sctlr_el1(sctlr);
set_gcr_el1_excl(gcr_excl);
task->thread.mte_ctrl = mte_ctrl;
if (task == current) {
preempt_disable();
mte_update_sctlr_user(task);
update_sctlr_el1(task->thread.sctlr_user);
preempt_enable();
}
return 0;
@@ -297,24 +301,18 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
long get_mte_ctrl(struct task_struct *task)
{
unsigned long ret;
u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK;
u64 mte_ctrl = task->thread.mte_ctrl;
u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) &
SYS_GCR_EL1_EXCL_MASK;
if (!system_supports_mte())
return 0;
ret = incl << PR_MTE_TAG_SHIFT;
switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) {
case SCTLR_EL1_TCF0_NONE:
ret |= PR_MTE_TCF_NONE;
break;
case SCTLR_EL1_TCF0_SYNC:
ret |= PR_MTE_TCF_SYNC;
break;
case SCTLR_EL1_TCF0_ASYNC:
if (mte_ctrl & MTE_CTRL_TCF_ASYNC)
ret |= PR_MTE_TCF_ASYNC;
break;
}
if (mte_ctrl & MTE_CTRL_TCF_SYNC)
ret |= PR_MTE_TCF_SYNC;
return ret;
}
@@ -453,3 +451,54 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request,
return ret;
}
static ssize_t mte_tcf_preferred_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
switch (per_cpu(mte_tcf_preferred, dev->id)) {
case MTE_CTRL_TCF_ASYNC:
return sysfs_emit(buf, "async\n");
case MTE_CTRL_TCF_SYNC:
return sysfs_emit(buf, "sync\n");
default:
return sysfs_emit(buf, "???\n");
}
}
static ssize_t mte_tcf_preferred_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u64 tcf;
if (sysfs_streq(buf, "async"))
tcf = MTE_CTRL_TCF_ASYNC;
else if (sysfs_streq(buf, "sync"))
tcf = MTE_CTRL_TCF_SYNC;
else
return -EINVAL;
device_lock(dev);
per_cpu(mte_tcf_preferred, dev->id) = tcf;
device_unlock(dev);
return count;
}
static DEVICE_ATTR_RW(mte_tcf_preferred);
static int register_mte_tcf_preferred_sysctl(void)
{
unsigned int cpu;
if (!system_supports_mte())
return 0;
for_each_possible_cpu(cpu) {
per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC;
device_create_file(get_cpu_device(cpu),
&dev_attr_mte_tcf_preferred);
}
return 0;
}
subsys_initcall(register_mte_tcf_preferred_sysctl);

View File

@@ -43,6 +43,71 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
get_random_bytes(&keys->apdb, sizeof(keys->apdb));
if (arg & PR_PAC_APGAKEY)
get_random_bytes(&keys->apga, sizeof(keys->apga));
ptrauth_keys_install_user(keys);
return 0;
}
static u64 arg_to_enxx_mask(unsigned long arg)
{
u64 sctlr_enxx_mask = 0;
WARN_ON(arg & ~PR_PAC_ENABLED_KEYS_MASK);
if (arg & PR_PAC_APIAKEY)
sctlr_enxx_mask |= SCTLR_ELx_ENIA;
if (arg & PR_PAC_APIBKEY)
sctlr_enxx_mask |= SCTLR_ELx_ENIB;
if (arg & PR_PAC_APDAKEY)
sctlr_enxx_mask |= SCTLR_ELx_ENDA;
if (arg & PR_PAC_APDBKEY)
sctlr_enxx_mask |= SCTLR_ELx_ENDB;
return sctlr_enxx_mask;
}
int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
unsigned long enabled)
{
u64 sctlr;
if (!system_supports_address_auth())
return -EINVAL;
if (is_compat_thread(task_thread_info(tsk)))
return -EINVAL;
if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys))
return -EINVAL;
preempt_disable();
sctlr = tsk->thread.sctlr_user;
sctlr &= ~arg_to_enxx_mask(keys);
sctlr |= arg_to_enxx_mask(enabled);
tsk->thread.sctlr_user = sctlr;
if (tsk == current)
update_sctlr_el1(sctlr);
preempt_enable();
return 0;
}
int ptrauth_get_enabled_keys(struct task_struct *tsk)
{
int retval = 0;
if (!system_supports_address_auth())
return -EINVAL;
if (is_compat_thread(task_thread_info(tsk)))
return -EINVAL;
if (tsk->thread.sctlr_user & SCTLR_ELx_ENIA)
retval |= PR_PAC_APIAKEY;
if (tsk->thread.sctlr_user & SCTLR_ELx_ENIB)
retval |= PR_PAC_APIBKEY;
if (tsk->thread.sctlr_user & SCTLR_ELx_ENDA)
retval |= PR_PAC_APDAKEY;
if (tsk->thread.sctlr_user & SCTLR_ELx_ENDB)
retval |= PR_PAC_APDBKEY;
return retval;
}

View File

@@ -337,7 +337,6 @@ void flush_thread(void)
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
flush_tagged_addr_state();
flush_mte_state();
}
void release_thread(struct task_struct *dead_task)
@@ -538,27 +537,24 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
write_sysreg(val, cntkctl_el1);
}
static void update_sctlr_el1(u64 sctlr)
/*
* __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore
* this function must be called with preemption disabled and the update to
* sctlr_user must be made in the same preemption disabled block so that
* __switch_to() does not see the variable update before the SCTLR_EL1 one.
*/
void update_sctlr_el1(u64 sctlr)
{
sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK, sctlr);
/*
* EnIA must not be cleared while in the kernel as this is necessary for
* in-kernel PAC. It will be cleared on kernel exit if needed.
*/
sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
/* ISB required for the kernel uaccess routines when setting TCF0. */
isb();
}
void set_task_sctlr_el1(u64 sctlr)
{
/*
* __switch_to() checks current->thread.sctlr as an
* optimisation. Disable preemption so that it does not see
* the variable update before the SCTLR_EL1 one.
*/
preempt_disable();
current->thread.sctlr_user = sctlr;
update_sctlr_el1(sctlr);
preempt_enable();
}
/*
* Thread switching.
*/
@@ -575,6 +571,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
uao_thread_switch(next);
ssbs_thread_switch(next);
erratum_1418040_thread_switch(prev, next);
ptrauth_thread_switch_user(next);
/*
* vendor hook is needed before the dsb(),
* because MPAM is related to cache maintenance.
@@ -666,7 +663,8 @@ void arch_setup_new_exec(void)
}
current->mm->context.flags = mmflags;
ptrauth_thread_init_user(current);
ptrauth_thread_init_user();
mte_thread_init_user();
if (task_spec_ssb_noexec(current)) {
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,

View File

@@ -908,6 +908,38 @@ static int pac_mask_get(struct task_struct *target,
return membuf_write(&to, &uregs, sizeof(uregs));
}
static int pac_enabled_keys_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
long enabled_keys = ptrauth_get_enabled_keys(target);
if (IS_ERR_VALUE(enabled_keys))
return enabled_keys;
return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
}
static int pac_enabled_keys_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
long enabled_keys = ptrauth_get_enabled_keys(target);
if (IS_ERR_VALUE(enabled_keys))
return enabled_keys;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
sizeof(long));
if (ret)
return ret;
return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
enabled_keys);
}
#ifdef CONFIG_CHECKPOINT_RESTORE
static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
{
@@ -1073,6 +1105,7 @@ enum aarch64_regset {
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
REGSET_PAC_MASK,
REGSET_PAC_ENABLED_KEYS,
#ifdef CONFIG_CHECKPOINT_RESTORE
REGSET_PACA_KEYS,
REGSET_PACG_KEYS,
@@ -1159,6 +1192,14 @@ static const struct user_regset aarch64_regsets[] = {
.regset_get = pac_mask_get,
/* this cannot be set dynamically */
},
[REGSET_PAC_ENABLED_KEYS] = {
.core_note_type = NT_ARM_PAC_ENABLED_KEYS,
.n = 1,
.size = sizeof(long),
.align = sizeof(long),
.regset_get = pac_enabled_keys_get,
.set = pac_enabled_keys_set,
},
#ifdef CONFIG_CHECKPOINT_RESTORE
[REGSET_PACA_KEYS] = {
.core_note_type = NT_ARM_PACA_KEYS,

View File

@@ -75,8 +75,9 @@ void notrace __cpu_suspend_exit(void)
*/
spectre_v4_enable_mitigation(NULL);
/* Restore additional MTE-specific configuration */
/* Restore additional feature-specific configuration */
mte_suspend_exit();
ptrauth_suspend_exit();
}
/*

View File

@@ -17,6 +17,7 @@ CONFIG_IKHEADERS=y
CONFIG_UCLAMP_TASK=y
CONFIG_UCLAMP_BUCKETS_COUNT=20
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_UCLAMP_TASK_GROUP=y
@@ -233,6 +234,7 @@ CONFIG_NET_ACT_GACT=y
CONFIG_NET_ACT_MIRRED=y
CONFIG_NET_ACT_SKBEDIT=y
CONFIG_VSOCKETS=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_CAN=y
CONFIG_BT=y

View File

@@ -18,7 +18,6 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-pm.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
@@ -441,8 +440,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
* responsible for ensuring that that counter is
* globally visible before the queue is unfrozen.
*/
if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
!blk_queue_pm_only(q)) {
if (pm || !blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
@@ -467,7 +465,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
blk_pm_resume_queue(pm, q)) ||
(pm || (blk_pm_request_resume(q),
!blk_queue_pm_only(q)))) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;

View File

@@ -41,6 +41,8 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
#include <trace/hooks/block.h>
static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
static void blk_mq_poll_stats_start(struct request_queue *q);
@@ -341,6 +343,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
}
data->hctx->queued++;
trace_android_vh_blk_rq_ctx_init(rq, tags, data, alloc_time_ns);
return rq;
}
@@ -2480,6 +2483,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
*/
rq_size = round_up(sizeof(struct request) + set->cmd_size,
cache_line_size());
trace_android_vh_blk_alloc_rqs(&rq_size, set, tags);
left = rq_size * depth;
for (i = 0; i < depth; ) {

View File

@@ -6,14 +6,11 @@
#include <linux/pm_runtime.h>
#ifdef CONFIG_PM
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
static inline void blk_pm_request_resume(struct request_queue *q)
{
if (!q->dev || !blk_queue_pm_only(q))
return 1; /* Nothing to do */
if (pm && q->rpm_status != RPM_SUSPENDED)
return 1; /* Request allowed */
pm_request_resume(q->dev);
return 0;
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
q->rpm_status == RPM_SUSPENDING))
pm_request_resume(q->dev);
}
static inline void blk_pm_mark_last_busy(struct request *rq)
@@ -47,9 +44,8 @@ static inline void blk_pm_put_request(struct request *rq)
--rq->q->nr_pending;
}
#else
static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
static inline void blk_pm_request_resume(struct request_queue *q)
{
return 1;
}
static inline void blk_pm_mark_last_busy(struct request *rq)

View File

@@ -1,5 +1,5 @@
BRANCH=android12-5.10
KMI_GENERATION=8
KMI_GENERATION=9
LLVM=1
DEPMOD=depmod

View File

@@ -32,13 +32,35 @@ config CRYPTO_FIPS
certification. You should say no unless you know what
this is.
# CRYPTO_FIPS140 just enables the support in the kernel for loading fips140.ko.
# The module still needs to be built and loaded if you need FIPS 140 compliance.
config CRYPTO_FIPS140
def_bool y
depends on MODULES && ARM64 && ARM64_MODULE_PLTS
config CRYPTO_FIPS140_MOD
bool "Enable FIPS140 integrity self-checked loadable module"
bool "Enable FIPS 140 cryptographic module"
depends on LTO_CLANG && CRYPTO_FIPS140
help
This option enables building a loadable module fips140.ko, which
contains various crypto algorithms that are also built into vmlinux.
At load time, this module overrides the built-in implementations of
these algorithms with its implementations. It also runs self-tests on
these algorithms and verifies the integrity of its code and data. If
either of these steps fails, the kernel will panic.
This module is intended to be loaded at early boot time in order to
meet FIPS 140 and NIAP FPT_TST_EXT.1 requirements. It shouldn't be
used if you don't need to meet these requirements.
config CRYPTO_FIPS140_MOD_ERROR_INJECTION
bool "Support injecting failures into the FIPS 140 self-tests"
depends on CRYPTO_FIPS140_MOD
help
This option adds a module parameter "broken_alg" to the fips140 module
which can be used to fail the self-tests for a particular algorithm,
causing a kernel panic. This option is for FIPS lab testing only, and
it shouldn't be enabled on production systems.
config CRYPTO_ALGAPI
tristate

View File

@@ -228,7 +228,7 @@ $(obj)/lib-crypto-%-fips.o: $(srctree)/lib/crypto/%.c FORCE
$(obj)/crypto-fips.a: $(addprefix $(obj)/,$(crypto-fips-objs)) FORCE
$(call if_changed,ar_and_symver)
fips140-objs := fips140-module.o crypto-fips.a
fips140-objs := fips140-module.o fips140-selftests.o crypto-fips.a
obj-m += fips140.o
CFLAGS_fips140-module.o += $(FIPS140_CFLAGS)

View File

@@ -0,0 +1,65 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright 2021 Google LLC */
/*
* This header was automatically generated by gen_fips140_testvecs.py.
* Don't edit it directly.
*/
static const u8 fips_message[32] __initconst =
"This is a 32-byte test message.";
static const u8 fips_aes_key[16] __initconst = "128-bit AES key";
static const u8 fips_aes_iv[16] __initconst = "ABCDEFGHIJKL";
static const u8 fips_aes_cbc_ciphertext[32] __initconst =
"\xc4\x6d\xad\xa4\x04\x52\x11\x5a\x7a\xb3\x7c\x68\x85\x8d\x90\xf0"
"\x55\xc3\xd3\x35\xc1\x75\x31\x90\xdf\x90\x4b\x5a\x56\xfd\xa7\x89";
static const u8 fips_aes_ecb_ciphertext[32] __initconst =
"\xc1\x9d\xe6\xb8\xb2\x90\xff\xfe\xf2\x77\x18\xb0\x55\xd3\xee\xa9"
"\xe2\x6f\x4a\x32\x67\xfd\xb7\xa5\x2f\x4b\x6e\x1a\x86\x2b\x6e\x3a";
static const u8 fips_aes_ctr_ciphertext[32] __initconst =
"\x92\xbe\x23\xa1\x80\x88\x5d\x31\x27\xb3\x9c\x40\x58\x57\x1d\xde"
"\xc1\x8d\x5b\xe7\x42\x93\x09\xf8\xd4\xf7\x49\x42\xcf\x40\x62\x7e";
static const u8 fips_aes_gcm_assoc[22] __initconst = "associated data string";
static const u8 fips_aes_gcm_ciphertext[48] __initconst =
"\x37\x88\x3e\x1d\x58\x50\xda\x10\x07\xeb\x52\xdf\xea\x0a\x54\xd4"
"\x44\xbf\x88\x2a\xf3\x03\x03\x84\xaf\x8b\x96\xbd\xea\x65\x60\x6f"
"\x82\xfa\x51\xf4\x28\xad\x0c\xf1\xce\x0f\x91\xdd\x1a\x4c\x77\x5f";
static const u8 fips_aes_xts_key[32] __initconst =
"This is an AES-128-XTS key.";
static const u8 fips_aes_xts_ciphertext[32] __initconst =
"\x5e\xb9\x98\xd6\x26\xb3\x55\xbf\x44\xab\x3e\xae\x73\xc0\x81\xc9"
"\xf4\x29\x0e\x17\x1e\xc5\xc8\x90\x79\x99\xf1\x43\x3a\x23\x08\x5a";
static const u8 fips_hmac_key[16] __initconst = "128-bit HMAC key";
static const u8 fips_sha1_digest[20] __initconst =
"\x1b\x78\xc7\x4b\xd5\xd4\x83\xb1\x58\xc5\x96\x83\x4f\x16\x8d\x15"
"\xb4\xaa\x22\x8c";
static const u8 fips_sha256_digest[32] __initconst =
"\x4e\x11\x83\x0c\x53\x80\x1e\x5f\x9b\x38\x33\x38\xe8\x74\x43\xb0"
"\xc1\x3a\xbe\xbf\x75\xf0\x12\x0f\x21\x33\xf5\x16\x33\xf1\xb0\x81";
static const u8 fips_hmac_sha256_digest[32] __initconst =
"\x63\x0e\xb5\x73\x79\xfc\xaf\x5f\x86\xe3\xaf\xf0\xc8\x36\xef\xd5"
"\x35\x8d\x40\x25\x38\xb3\x65\x72\x98\xf3\x59\xd8\x1e\x54\x4c\xa1";
static const u8 fips_sha512_digest[64] __initconst =
"\x32\xe0\x44\x23\xbd\xe3\xec\x28\xbf\xf1\x34\x11\xd5\xae\xbf\xd5"
"\xc0\x8e\xb5\xa1\x04\xef\x2f\x07\x84\xf1\xd9\x83\x0f\x6c\x31\xab"
"\xf7\xe7\x57\xfa\xf7\xae\xf0\x6f\xb2\x16\x08\x32\xcf\xc7\xef\x35"
"\xb3\x3b\x51\xb9\xfd\xe7\xff\x5e\xb2\x8b\xc6\x79\xe6\x14\x04\xb4";
/*
* This header was automatically generated by gen_fips140_testvecs.py.
* Don't edit it directly.
*/

View File

@@ -3,16 +3,17 @@
* Copyright 2021 Google LLC
* Author: Ard Biesheuvel <ardb@google.com>
*
* This file is the core of the fips140.ko, which carries a number of crypto
* algorithms and chaining mode templates that are also built into vmlinux.
* This modules performs a load time integrity check, as mandated by FIPS 140,
* and replaces registered crypto algorithms that appear on the FIPS 140 list
* with ones provided by this module. This meets the FIPS 140 requirements for
* a cryptographic software module.
* This file is the core of fips140.ko, which contains various crypto algorithms
* that are also built into vmlinux. At load time, this module overrides the
* built-in implementations of these algorithms with its implementations. It
* also runs self-tests on these algorithms and verifies the integrity of its
* code and data. If either of these steps fails, the kernel will panic.
*
* This module is intended to be loaded at early boot time in order to meet
* FIPS 140 and NIAP FPT_TST_EXT.1 requirements. It shouldn't be used if you
* don't need to meet these requirements.
*/
#define pr_fmt(fmt) "fips140: " fmt
#include <linux/ctype.h>
#include <linux/module.h>
#include <crypto/aead.h>
@@ -23,8 +24,18 @@
#include <crypto/rng.h>
#include <trace/hooks/fips140.h>
#include "fips140-module.h"
#include "internal.h"
/*
* This option allows deliberately failing the self-tests for a particular
* algorithm. This is for FIPS lab testing only.
*/
#ifdef CONFIG_CRYPTO_FIPS140_MOD_ERROR_INJECTION
char *fips140_broken_alg;
module_param_named(broken_alg, fips140_broken_alg, charp, 0);
#endif
/*
* FIPS 140-2 prefers the use of HMAC with a public key over a plain hash.
*/
@@ -52,6 +63,12 @@ const u32 *__initcall_start = &__initcall_start_marker;
const u8 *__text_start = &__fips140_text_start;
const u8 *__rodata_start = &__fips140_rodata_start;
/*
* The list of the crypto API algorithms (by cra_name) that will be unregistered
* by this module, in preparation for the module registering its own
* implementation(s) of them. When adding a new algorithm here, make sure to
* consider whether it needs a self-test added to fips140_selftests[] as well.
*/
static const char * const fips140_algorithms[] __initconst = {
"aes",
@@ -566,13 +583,16 @@ fips140_init(void)
*/
synchronize_rcu_tasks();
/* insert self tests here */
if (!fips140_run_selftests())
goto panic;
/*
* It may seem backward to perform the integrity check last, but this
* is intentional: the check itself uses hmac(sha256) which is one of
* the algorithms that are replaced with versions from this module, and
* the integrity check must use the replacement version.
* the integrity check must use the replacement version. Also, to be
* ready for FIPS 140-3, the integrity check algorithm must have already
* been self-tested.
*/
if (!check_fips140_module_hmac()) {

20
crypto/fips140-module.h Normal file
View File

@@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2021 Google LLC
*/
#ifndef _CRYPTO_FIPS140_MODULE_H
#define _CRYPTO_FIPS140_MODULE_H
#include <linux/module.h>
#undef pr_fmt
#define pr_fmt(fmt) "fips140: " fmt
#ifdef CONFIG_CRYPTO_FIPS140_MOD_ERROR_INJECTION
extern char *fips140_broken_alg;
#endif
bool __init __must_check fips140_run_selftests(void);
#endif /* _CRYPTO_FIPS140_MODULE_H */

867
crypto/fips140-selftests.c Normal file
View File

@@ -0,0 +1,867 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2021 Google LLC
*
* Authors: Elena Petrova <lenaptr@google.com>,
* Eric Biggers <ebiggers@google.com>
*
* Self-tests of fips140.ko cryptographic functionality. These are run at
* module load time to fulfill FIPS 140 and NIAP FPT_TST_EXT.1 requirements.
*
* The actual requirements for these self-tests are somewhat vague, but
* section 9 ("Self-Tests") of the FIPS 140-2 Implementation Guidance document
* (https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/fips140-2/fips1402ig.pdf)
* is somewhat helpful. Basically, all implementations of all FIPS approved
* algorithms (including modes of operation) must be tested. However:
*
* - If an implementation won't be used, it doesn't have to be tested. So
* when multiple implementations of the same algorithm are registered with
* the crypto API, we only have to test the default (highest-priority) one.
*
* - There are provisions for skipping tests that are already sufficiently
* covered by other tests. E.g., HMAC-SHA256 may cover SHA-256.
*
* - Only one test vector is required per algorithm, and it can be generated
* by any known-good implementation or taken from any official document.
*
* - For ciphers, both encryption and decryption must be tested.
*
* - Only one key size per algorithm needs to be tested.
*
* See fips140_selftests[] for the list of tests we've selected. Currently, all
* our test vectors except the DRBG ones were generated by the script
* tools/crypto/gen_fips140_testvecs.py, using the known-good implementations in
* the Python packages hashlib, pycryptodome, and cryptography. The DRBG test
* vectors were manually extracted from
* https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Algorithm-Validation-Program/documents/drbg/drbgtestvectors.zip.
*
* Note that we don't reuse the upstream crypto API's self-tests
* (crypto/testmgr.{c,h}), for several reasons:
*
* - To meet FIPS requirements, the self-tests must be located within the FIPS
* module boundary (fips140.ko). But testmgr is integrated into the crypto
* API framework and can't be extracted into the module.
*
* - testmgr is much more heavyweight than required for FIPS and NIAP; it
* tests more algorithms and does more tests per algorithm, as it's meant to
* do proper testing and not just meet certification requirements. We need
* tests that can run with minimal overhead on every boot-up.
*
* - Despite being more heavyweight in general, testmgr doesn't test the
* SHA-256 and AES library APIs, despite that being needed here.
*/
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/drbg.h>
#include <crypto/hash.h>
#include <crypto/internal/cipher.h>
#include <crypto/rng.h>
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include "fips140-module.h"
/* Test vector for a block cipher algorithm */
struct blockcipher_testvec {
const u8 *key;
size_t key_size;
const u8 *plaintext;
const u8 *ciphertext;
size_t block_size;
};
/* Test vector for an AEAD algorithm */
struct aead_testvec {
const u8 *key;
size_t key_size;
const u8 *iv;
size_t iv_size;
const u8 *assoc;
size_t assoc_size;
const u8 *plaintext;
size_t plaintext_size;
const u8 *ciphertext;
size_t ciphertext_size;
};
/* Test vector for a length-preserving encryption algorithm */
struct skcipher_testvec {
const u8 *key;
size_t key_size;
const u8 *iv;
size_t iv_size;
const u8 *plaintext;
const u8 *ciphertext;
size_t message_size;
};
/* Test vector for a hash algorithm */
struct hash_testvec {
const u8 *key;
size_t key_size;
const u8 *message;
size_t message_size;
const u8 *digest;
size_t digest_size;
};
/* Test vector for a DRBG algorithm */
struct drbg_testvec {
const u8 *entropy;
size_t entropy_size;
const u8 *pers;
size_t pers_size;
const u8 *entpr_a;
const u8 *entpr_b;
size_t entpr_size;
const u8 *add_a;
const u8 *add_b;
size_t add_size;
const u8 *output;
size_t out_size;
};
/*
* A struct which specifies an algorithm name (using crypto API syntax), a test
* function for that algorithm, and a test vector used by that test function.
*/
struct fips_test {
const char *alg;
int __must_check (*func)(const struct fips_test *test);
union {
struct blockcipher_testvec blockcipher;
struct aead_testvec aead;
struct skcipher_testvec skcipher;
struct hash_testvec hash;
struct drbg_testvec drbg;
};
};
/* Maximum IV size (in bytes) among any algorithm tested here */
#define MAX_IV_SIZE 16
static int __init __must_check
fips_check_result(const struct fips_test *test, u8 *result,
const u8 *expected_result, size_t result_size,
const char *operation)
{
#ifdef CONFIG_CRYPTO_FIPS140_MOD_ERROR_INJECTION
/* Inject a failure (via corrupting the result) if requested. */
if (fips140_broken_alg && strcmp(test->alg, fips140_broken_alg) == 0)
result[0] ^= 0xff;
#endif
if (memcmp(result, expected_result, result_size) != 0) {
pr_err("wrong result from %s %s\n", test->alg, operation);
return -EBADMSG;
}
return 0;
}
/*
* None of the algorithms should be ASYNC, as the FIPS module doesn't register
* any ASYNC algorithms. (The ASYNC flag is only declared by hardware
* algorithms, which would need their own FIPS certification.)
*
* Ideally we would verify alg->cra_module == THIS_MODULE here as well, but that
* doesn't work because the files are compiled as built-in code.
*/
static int __init __must_check
fips_validate_alg(const struct crypto_alg *alg)
{
if (alg->cra_flags & CRYPTO_ALG_ASYNC) {
pr_err("unexpectedly got async implementation of %s (%s)\n",
alg->cra_name, alg->cra_driver_name);
return -EINVAL;
}
return 0;
}
/* Test a block cipher using the crypto_cipher API. */
static int __init __must_check
fips_test_blockcipher(const struct fips_test *test)
{
const struct blockcipher_testvec *vec = &test->blockcipher;
struct crypto_cipher *tfm;
u8 block[MAX_CIPHER_BLOCKSIZE];
int err;
if (WARN_ON(vec->block_size > MAX_CIPHER_BLOCKSIZE))
return -EINVAL;
tfm = crypto_alloc_cipher(test->alg, 0, 0);
if (IS_ERR(tfm)) {
err = PTR_ERR(tfm);
pr_err("failed to allocate %s tfm: %d\n", test->alg, err);
return err;
}
err = fips_validate_alg(tfm->base.__crt_alg);
if (err)
goto out;
if (crypto_cipher_blocksize(tfm) != vec->block_size) {
pr_err("%s has wrong block size\n", test->alg);
err = -EINVAL;
goto out;
}
err = crypto_cipher_setkey(tfm, vec->key, vec->key_size);
if (err) {
pr_err("failed to set %s key: %d\n", test->alg, err);
goto out;
}
/* Encrypt the plaintext, then verify the resulting ciphertext. */
memcpy(block, vec->plaintext, vec->block_size);
crypto_cipher_encrypt_one(tfm, block, block);
err = fips_check_result(test, block, vec->ciphertext, vec->block_size,
"encryption");
if (err)
goto out;
/* Decrypt the ciphertext, then verify the resulting plaintext. */
crypto_cipher_decrypt_one(tfm, block, block);
err = fips_check_result(test, block, vec->plaintext, vec->block_size,
"decryption");
out:
crypto_free_cipher(tfm);
return err;
}
/*
* Test for plain AES (no mode of operation). We test this separately from the
* AES modes because the implementation of AES which is used by the "aes"
* crypto_cipher isn't necessarily the same as that used by the AES modes such
* as "ecb(aes)". Similarly, the aes_{encrypt,decrypt}() library functions may
* use a different implementation as well, so we test them separately too.
*/
static int __init __must_check
fips_test_aes(const struct fips_test *test)
{
const struct blockcipher_testvec *vec = &test->blockcipher;
struct crypto_aes_ctx ctx;
u8 block[AES_BLOCK_SIZE];
int err;
if (WARN_ON(vec->block_size != AES_BLOCK_SIZE))
return -EINVAL;
err = fips_test_blockcipher(test);
if (err)
return err;
err = aes_expandkey(&ctx, vec->key, vec->key_size);
if (err) {
pr_err("aes_expandkey() failed: %d\n", err);
return err;
}
aes_encrypt(&ctx, block, vec->plaintext);
err = fips_check_result(test, block, vec->ciphertext, AES_BLOCK_SIZE,
"encryption (library API)");
if (err)
return err;
aes_decrypt(&ctx, block, block);
return fips_check_result(test, block, vec->plaintext, AES_BLOCK_SIZE,
"decryption (library API)");
}
/* Test a length-preserving symmetric cipher using the crypto_skcipher API. */
static int __init __must_check
fips_test_skcipher(const struct fips_test *test)
{
const struct skcipher_testvec *vec = &test->skcipher;
struct crypto_skcipher *tfm;
struct skcipher_request *req = NULL;
u8 *message = NULL;
struct scatterlist sg;
u8 iv[MAX_IV_SIZE];
int err;
if (WARN_ON(vec->iv_size > MAX_IV_SIZE))
return -EINVAL;
tfm = crypto_alloc_skcipher(test->alg, 0, 0);
if (IS_ERR(tfm)) {
err = PTR_ERR(tfm);
pr_err("failed to allocate %s tfm: %d\n", test->alg, err);
return err;
}
err = fips_validate_alg(&crypto_skcipher_alg(tfm)->base);
if (err)
goto out;
if (crypto_skcipher_ivsize(tfm) != vec->iv_size) {
pr_err("%s has wrong IV size\n", test->alg);
err = -EINVAL;
goto out;
}
req = skcipher_request_alloc(tfm, GFP_KERNEL);
message = kmemdup(vec->plaintext, vec->message_size, GFP_KERNEL);
if (!req || !message) {
err = -ENOMEM;
goto out;
}
sg_init_one(&sg, message, vec->message_size);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, vec->message_size, iv);
err = crypto_skcipher_setkey(tfm, vec->key, vec->key_size);
if (err) {
pr_err("failed to set %s key: %d\n", test->alg, err);
goto out;
}
/* Encrypt the plaintext, then verify the resulting ciphertext. */
memcpy(iv, vec->iv, vec->iv_size);
err = crypto_skcipher_encrypt(req);
if (err) {
pr_err("%s encryption failed: %d\n", test->alg, err);
goto out;
}
err = fips_check_result(test, message, vec->ciphertext,
vec->message_size, "encryption");
if (err)
goto out;
/* Decrypt the ciphertext, then verify the resulting plaintext. */
memcpy(iv, vec->iv, vec->iv_size);
err = crypto_skcipher_decrypt(req);
if (err) {
pr_err("%s decryption failed: %d\n", test->alg, err);
goto out;
}
err = fips_check_result(test, message, vec->plaintext,
vec->message_size, "decryption");
out:
kfree(message);
skcipher_request_free(req);
crypto_free_skcipher(tfm);
return err;
}
/* Test an AEAD using the crypto_aead API. */
static int __init __must_check
fips_test_aead(const struct fips_test *test)
{
const struct aead_testvec *vec = &test->aead;
const int tag_size = vec->ciphertext_size - vec->plaintext_size;
struct crypto_aead *tfm;
struct aead_request *req = NULL;
u8 *assoc = NULL;
u8 *message = NULL;
struct scatterlist sg[2];
int sg_idx = 0;
u8 iv[MAX_IV_SIZE];
int err;
if (WARN_ON(vec->iv_size > MAX_IV_SIZE))
return -EINVAL;
if (WARN_ON(vec->ciphertext_size <= vec->plaintext_size))
return -EINVAL;
tfm = crypto_alloc_aead(test->alg, 0, 0);
if (IS_ERR(tfm)) {
err = PTR_ERR(tfm);
pr_err("failed to allocate %s tfm: %d\n", test->alg, err);
return err;
}
err = fips_validate_alg(&crypto_aead_alg(tfm)->base);
if (err)
goto out;
if (crypto_aead_ivsize(tfm) != vec->iv_size) {
pr_err("%s has wrong IV size\n", test->alg);
err = -EINVAL;
goto out;
}
req = aead_request_alloc(tfm, GFP_KERNEL);
assoc = kmemdup(vec->assoc, vec->assoc_size, GFP_KERNEL);
message = kzalloc(vec->ciphertext_size, GFP_KERNEL);
if (!req || !assoc || !message) {
err = -ENOMEM;
goto out;
}
memcpy(message, vec->plaintext, vec->plaintext_size);
sg_init_table(sg, ARRAY_SIZE(sg));
if (vec->assoc_size)
sg_set_buf(&sg[sg_idx++], assoc, vec->assoc_size);
sg_set_buf(&sg[sg_idx++], message, vec->ciphertext_size);
aead_request_set_ad(req, vec->assoc_size);
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
err = crypto_aead_setkey(tfm, vec->key, vec->key_size);
if (err) {
pr_err("failed to set %s key: %d\n", test->alg, err);
goto out;
}
err = crypto_aead_setauthsize(tfm, tag_size);
if (err) {
pr_err("failed to set %s authentication tag size: %d\n",
test->alg, err);
goto out;
}
/*
* Encrypt the plaintext, then verify the resulting ciphertext (which
* includes the authentication tag).
*/
memcpy(iv, vec->iv, vec->iv_size);
aead_request_set_crypt(req, sg, sg, vec->plaintext_size, iv);
err = crypto_aead_encrypt(req);
if (err) {
pr_err("%s encryption failed: %d\n", test->alg, err);
goto out;
}
err = fips_check_result(test, message, vec->ciphertext,
vec->ciphertext_size, "encryption");
if (err)
goto out;
/*
* Decrypt the ciphertext (which includes the authentication tag), then
* verify the resulting plaintext.
*/
memcpy(iv, vec->iv, vec->iv_size);
aead_request_set_crypt(req, sg, sg, vec->ciphertext_size, iv);
err = crypto_aead_decrypt(req);
if (err) {
pr_err("%s decryption failed: %d\n", test->alg, err);
goto out;
}
err = fips_check_result(test, message, vec->plaintext,
vec->plaintext_size, "decryption");
out:
kfree(message);
kfree(assoc);
aead_request_free(req);
crypto_free_aead(tfm);
return err;
}
/*
* Test a hash algorithm using the crypto_shash API.
*
* Note that we don't need to test the crypto_ahash API too, since none of the
* hash algorithms in the FIPS module have the ASYNC flag, and thus there will
* be no hash algorithms that can be accessed only through crypto_ahash.
*/
static int __init __must_check
fips_test_hash(const struct fips_test *test)
{
const struct hash_testvec *vec = &test->hash;
struct crypto_shash *tfm;
u8 digest[HASH_MAX_DIGESTSIZE];
int err;
if (WARN_ON(vec->digest_size > HASH_MAX_DIGESTSIZE))
return -EINVAL;
tfm = crypto_alloc_shash(test->alg, 0, 0);
if (IS_ERR(tfm)) {
err = PTR_ERR(tfm);
pr_err("failed to allocate %s tfm: %d\n", test->alg, err);
return err;
}
err = fips_validate_alg(&crypto_shash_alg(tfm)->base);
if (err)
goto out;
if (crypto_shash_digestsize(tfm) != vec->digest_size) {
pr_err("%s has wrong digest size\n", test->alg);
err = -EINVAL;
goto out;
}
if (vec->key) {
err = crypto_shash_setkey(tfm, vec->key, vec->key_size);
if (err) {
pr_err("failed to set %s key: %d\n", test->alg, err);
goto out;
}
}
err = crypto_shash_tfm_digest(tfm, vec->message, vec->message_size,
digest);
if (err) {
pr_err("%s digest computation failed: %d\n", test->alg, err);
goto out;
}
err = fips_check_result(test, digest, vec->digest, vec->digest_size,
"digest");
out:
crypto_free_shash(tfm);
return err;
}
/*
* Test the sha256() library function, as it may not be covered by the "sha256"
* crypto_shash, and thus may not be covered by the "hmac(sha256)" test we do.
*/
static int __init __must_check
fips_test_sha256_library(const struct fips_test *test)
{
const struct hash_testvec *vec = &test->hash;
u8 digest[SHA256_DIGEST_SIZE];
if (WARN_ON(vec->digest_size != SHA256_DIGEST_SIZE))
return -EINVAL;
sha256(vec->message, vec->message_size, digest);
return fips_check_result(test, digest, vec->digest, vec->digest_size,
"digest (library API)");
}
/* Test a DRBG using the crypto_rng API. */
static int __init __must_check
fips_test_drbg(const struct fips_test *test)
{
const struct drbg_testvec *vec = &test->drbg;
struct crypto_rng *rng;
u8 *output = NULL;
struct drbg_test_data test_data;
struct drbg_string addtl, pers, testentropy;
int err;
rng = crypto_alloc_rng(test->alg, 0, 0);
if (IS_ERR(rng)) {
err = PTR_ERR(rng);
pr_err("failed to allocate %s tfm: %d\n", test->alg, err);
return PTR_ERR(rng);
}
err = fips_validate_alg(&crypto_rng_alg(rng)->base);
if (err)
goto out;
output = kzalloc(vec->out_size, GFP_KERNEL);
if (!output) {
err = -ENOMEM;
goto out;
}
/*
* Initialize the DRBG with the entropy and personalization string given
* in the test vector.
*/
test_data.testentropy = &testentropy;
drbg_string_fill(&testentropy, vec->entropy, vec->entropy_size);
drbg_string_fill(&pers, vec->pers, vec->pers_size);
err = crypto_drbg_reset_test(rng, &pers, &test_data);
if (err) {
pr_err("failed to reset %s\n", test->alg);
goto out;
}
/*
* Generate some random bytes using the additional data string provided
* in the test vector. Also use the additional entropy if provided
* (relevant for the prediction-resistant DRBG variants only).
*/
drbg_string_fill(&addtl, vec->add_a, vec->add_size);
if (vec->entpr_size) {
drbg_string_fill(&testentropy, vec->entpr_a, vec->entpr_size);
err = crypto_drbg_get_bytes_addtl_test(rng, output,
vec->out_size, &addtl,
&test_data);
} else {
err = crypto_drbg_get_bytes_addtl(rng, output, vec->out_size,
&addtl);
}
if (err) {
pr_err("failed to get bytes from %s (try 1): %d\n",
test->alg, err);
goto out;
}
/*
* Do the same again, using a second additional data string, and (when
* applicable) a second additional entropy string.
*/
drbg_string_fill(&addtl, vec->add_b, vec->add_size);
if (test->drbg.entpr_size) {
drbg_string_fill(&testentropy, vec->entpr_b, vec->entpr_size);
err = crypto_drbg_get_bytes_addtl_test(rng, output,
vec->out_size, &addtl,
&test_data);
} else {
err = crypto_drbg_get_bytes_addtl(rng, output, vec->out_size,
&addtl);
}
if (err) {
pr_err("failed to get bytes from %s (try 2): %d\n",
test->alg, err);
goto out;
}
/* Check that the DRBG generated the expected output. */
err = fips_check_result(test, output, vec->output, vec->out_size,
"get_bytes");
out:
kfree(output);
crypto_free_rng(rng);
return err;
}
/* Include the test vectors generated by the Python script. */
#include "fips140-generated-testvecs.h"
/* List of all self-tests. Keep this in sync with fips140_algorithms[]. */
static const struct fips_test fips140_selftests[] __initconst = {
/*
* Tests for AES and AES modes.
*
* The full list of AES algorithms we potentially need to test are AES
* by itself, AES-CBC, AES-CTR, AES-ECB, AES-GCM, and AES-XTS. We can
* follow the FIPS 140-2 Implementation Guidance (IG) document to try to
* reduce this list, but we run into the issue that the architecture-
* specific implementations of these algorithms in Linux often don't
* share the "same" underlying AES implementation. E.g., the ARMv8 CE
* optimized implementations issue ARMv8 CE instructions directly rather
* than going through a separate AES implementation. In this case,
* separate tests are needed according to section 9.2 of the IG.
*/
{
.alg = "aes",
.func = fips_test_aes,
.blockcipher = {
.key = fips_aes_key,
.key_size = sizeof(fips_aes_key),
.plaintext = fips_message,
.ciphertext = fips_aes_ecb_ciphertext,
.block_size = 16,
}
}, {
.alg = "cbc(aes)",
.func = fips_test_skcipher,
.skcipher = {
.key = fips_aes_key,
.key_size = sizeof(fips_aes_key),
.iv = fips_aes_iv,
.iv_size = sizeof(fips_aes_iv),
.plaintext = fips_message,
.ciphertext = fips_aes_cbc_ciphertext,
.message_size = sizeof(fips_message),
}
}, {
.alg = "ctr(aes)",
.func = fips_test_skcipher,
.skcipher = {
.key = fips_aes_key,
.key_size = sizeof(fips_aes_key),
.iv = fips_aes_iv,
.iv_size = sizeof(fips_aes_iv),
.plaintext = fips_message,
.ciphertext = fips_aes_ctr_ciphertext,
.message_size = sizeof(fips_message),
}
}, {
.alg = "ecb(aes)",
.func = fips_test_skcipher,
.skcipher = {
.key = fips_aes_key,
.key_size = sizeof(fips_aes_key),
.plaintext = fips_message,
.ciphertext = fips_aes_ecb_ciphertext,
.message_size = sizeof(fips_message)
}
}, {
.alg = "gcm(aes)",
.func = fips_test_aead,
.aead = {
.key = fips_aes_key,
.key_size = sizeof(fips_aes_key),
.iv = fips_aes_iv,
/* The GCM implementation assumes an IV size of 12. */
.iv_size = 12,
.assoc = fips_aes_gcm_assoc,
.assoc_size = sizeof(fips_aes_gcm_assoc),
.plaintext = fips_message,
.plaintext_size = sizeof(fips_message),
.ciphertext = fips_aes_gcm_ciphertext,
.ciphertext_size = sizeof(fips_aes_gcm_ciphertext),
}
}, {
.alg = "xts(aes)",
.func = fips_test_skcipher,
.skcipher = {
.key = fips_aes_xts_key,
.key_size = sizeof(fips_aes_xts_key),
.iv = fips_aes_iv,
.iv_size = sizeof(fips_aes_iv),
.plaintext = fips_message,
.ciphertext = fips_aes_xts_ciphertext,
.message_size = sizeof(fips_message),
}
/*
* Tests for SHA-1, SHA-256, HMAC-SHA256, and SHA-512.
*
* The selection of these specific tests follows the guidance from
* section 9 of the FIPS 140-2 Implementation Guidance (IG) document to
* achieve a minimal list of tests, rather than testing all of
* SHA-{1,224,256,384,512} and HMAC-SHA{1,224,256,384,512}. As per the
* IG, testing SHA-224 is only required if SHA-256 isn't implemented,
* and testing SHA-384 is only required if SHA-512 isn't implemented.
* Also, HMAC only has to be tested with one underlying SHA, and the
* HMAC test also fulfills the test for its underlying SHA. That would
* result in a test list of e.g. SHA-1, HMAC-SHA256, and SHA-512.
*
* However we also need to take into account cases where implementations
* aren't shared in the "natural" way assumed by the IG. Currently the
* only known exception w.r.t. SHA-* and HMAC-* is the sha256() library
* function which may not be covered by the test of the "hmac(sha256)"
* crypto_shash. So, we test sha256() separately.
*/
}, {
.alg = "sha1",
.func = fips_test_hash,
.hash = {
.message = fips_message,
.message_size = sizeof(fips_message),
.digest = fips_sha1_digest,
.digest_size = sizeof(fips_sha1_digest)
}
}, {
.alg = "sha256",
.func = fips_test_sha256_library,
.hash = {
.message = fips_message,
.message_size = sizeof(fips_message),
.digest = fips_sha256_digest,
.digest_size = sizeof(fips_sha256_digest)
}
}, {
.alg = "hmac(sha256)",
.func = fips_test_hash,
.hash = {
.key = fips_hmac_key,
.key_size = sizeof(fips_hmac_key),
.message = fips_message,
.message_size = sizeof(fips_message),
.digest = fips_hmac_sha256_digest,
.digest_size = sizeof(fips_hmac_sha256_digest)
}
}, {
.alg = "sha512",
.func = fips_test_hash,
.hash = {
.message = fips_message,
.message_size = sizeof(fips_message),
.digest = fips_sha512_digest,
.digest_size = sizeof(fips_sha512_digest)
}
/*
* Tests for DRBG algorithms.
*
* Only the default variant (the one that users get when they request
* "stdrng") is required to be tested, as we don't consider the other
* variants to be used / usable in the FIPS security policy. This is
* similar to how e.g. we don't test both "xts(aes-generic)" and
* "xts-aes-ce" but rather just "xts(aes)".
*
* Currently the default one is "drbg_nopr_hmac_sha256"; however, just
* in case we also test the prediction-resistant enabled variant too.
*/
}, {
.alg = "drbg_nopr_hmac_sha256",
.func = fips_test_drbg,
.drbg = {
.entropy =
"\xf9\x7a\x3c\xfd\x91\xfa\xa0\x46\xb9\xe6\x1b\x94"
"\x93\xd4\x36\xc4\x93\x1f\x60\x4b\x22\xf1\x08\x15"
"\x21\xb3\x41\x91\x51\xe8\xff\x06\x11\xf3\xa7\xd4"
"\x35\x95\x35\x7d\x58\x12\x0b\xd1\xe2\xdd\x8a\xed",
.entropy_size = 48,
.output =
"\xc6\x87\x1c\xff\x08\x24\xfe\x55\xea\x76\x89\xa5"
"\x22\x29\x88\x67\x30\x45\x0e\x5d\x36\x2d\xa5\xbf"
"\x59\x0d\xcf\x9a\xcd\x67\xfe\xd4\xcb\x32\x10\x7d"
"\xf5\xd0\x39\x69\xa6\x6b\x1f\x64\x94\xfd\xf5\xd6"
"\x3d\x5b\x4d\x0d\x34\xea\x73\x99\xa0\x7d\x01\x16"
"\x12\x6d\x0d\x51\x8c\x7c\x55\xba\x46\xe1\x2f\x62"
"\xef\xc8\xfe\x28\xa5\x1c\x9d\x42\x8e\x6d\x37\x1d"
"\x73\x97\xab\x31\x9f\xc7\x3d\xed\x47\x22\xe5\xb4"
"\xf3\x00\x04\x03\x2a\x61\x28\xdf\x5e\x74\x97\xec"
"\xf8\x2c\xa7\xb0\xa5\x0e\x86\x7e\xf6\x72\x8a\x4f"
"\x50\x9a\x8c\x85\x90\x87\x03\x9c",
.out_size = 128,
.add_a =
"\x51\x72\x89\xaf\xe4\x44\xa0\xfe\x5e\xd1\xa4\x1d"
"\xbb\xb5\xeb\x17\x15\x00\x79\xbd\xd3\x1e\x29\xcf"
"\x2f\xf3\x00\x34\xd8\x26\x8e\x3b",
.add_b =
"\x88\x02\x8d\x29\xef\x80\xb4\xe6\xf0\xfe\x12\xf9"
"\x1d\x74\x49\xfe\x75\x06\x26\x82\xe8\x9c\x57\x14"
"\x40\xc0\xc9\xb5\x2c\x42\xa6\xe0",
.add_size = 32,
}
}, {
.alg = "drbg_pr_hmac_sha256",
.func = fips_test_drbg,
.drbg = {
.entropy =
"\xc7\xcc\xbc\x67\x7e\x21\x66\x1e\x27\x2b\x63\xdd"
"\x3a\x78\xdc\xdf\x66\x6d\x3f\x24\xae\xcf\x37\x01"
"\xa9\x0d\x89\x8a\xa7\xdc\x81\x58\xae\xb2\x10\x15"
"\x7e\x18\x44\x6d\x13\xea\xdf\x37\x85\xfe\x81\xfb",
.entropy_size = 48,
.entpr_a =
"\x7b\xa1\x91\x5b\x3c\x04\xc4\x1b\x1d\x19\x2f\x1a"
"\x18\x81\x60\x3c\x6c\x62\x91\xb7\xe9\xf5\xcb\x96"
"\xbb\x81\x6a\xcc\xb5\xae\x55\xb6",
.entpr_b =
"\x99\x2c\xc7\x78\x7e\x3b\x88\x12\xef\xbe\xd3\xd2"
"\x7d\x2a\xa5\x86\xda\x8d\x58\x73\x4a\x0a\xb2\x2e"
"\xbb\x4c\x7e\xe3\x9a\xb6\x81\xc1",
.entpr_size = 32,
.output =
"\x95\x6f\x95\xfc\x3b\xb7\xfe\x3e\xd0\x4e\x1a\x14"
"\x6c\x34\x7f\x7b\x1d\x0d\x63\x5e\x48\x9c\x69\xe6"
"\x46\x07\xd2\x87\xf3\x86\x52\x3d\x98\x27\x5e\xd7"
"\x54\xe7\x75\x50\x4f\xfb\x4d\xfd\xac\x2f\x4b\x77"
"\xcf\x9e\x8e\xcc\x16\xa2\x24\xcd\x53\xde\x3e\xc5"
"\x55\x5d\xd5\x26\x3f\x89\xdf\xca\x8b\x4e\x1e\xb6"
"\x88\x78\x63\x5c\xa2\x63\x98\x4e\x6f\x25\x59\xb1"
"\x5f\x2b\x23\xb0\x4b\xa5\x18\x5d\xc2\x15\x74\x40"
"\x59\x4c\xb4\x1e\xcf\x9a\x36\xfd\x43\xe2\x03\xb8"
"\x59\x91\x30\x89\x2a\xc8\x5a\x43\x23\x7c\x73\x72"
"\xda\x3f\xad\x2b\xba\x00\x6b\xd1",
.out_size = 128,
.add_a =
"\x18\xe8\x17\xff\xef\x39\xc7\x41\x5c\x73\x03\x03"
"\xf6\x3d\xe8\x5f\xc8\xab\xe4\xab\x0f\xad\xe8\xd6"
"\x86\x88\x55\x28\xc1\x69\xdd\x76",
.add_b =
"\xac\x07\xfc\xbe\x87\x0e\xd3\xea\x1f\x7e\xb8\xe7"
"\x9d\xec\xe8\xe7\xbc\xf3\x18\x25\x77\x35\x4a\xaa"
"\x00\x99\x2a\xdd\x0a\x00\x50\x82",
.add_size = 32,
.pers =
"\xbc\x55\xab\x3c\xf6\x52\xb0\x11\x3d\x7b\x90\xb8"
"\x24\xc9\x26\x4e\x5a\x1e\x77\x0d\x3d\x58\x4a\xda"
"\xd1\x81\xe9\xf8\xeb\x30\x8f\x6f",
.pers_size = 32,
}
}
};
bool __init fips140_run_selftests(void)
{
int i;
pr_info("running self-tests\n");
for (i = 0; i < ARRAY_SIZE(fips140_selftests); i++) {
const struct fips_test *test = &fips140_selftests[i];
int err;
err = test->func(test);
if (err) {
pr_emerg("self-tests failed for algorithm %s: %d\n",
test->alg, err);
/* The caller is responsible for calling panic(). */
return false;
}
}
pr_info("all self-tests passed\n");
return true;
}

View File

@@ -16,6 +16,8 @@
#include <linux/memblock.h>
#include <linux/page_owner.h>
#include <linux/swap.h>
#include <linux/mm.h>
#include <linux/security.h>
struct ads_entry {
char *name;
@@ -59,6 +61,13 @@ static const struct ads_entry ads_entries[ADS_END] = {
#ifdef CONFIG_SWAP
ADS_ENTRY(ADS_NR_SWAP_PAGES, &nr_swap_pages),
#endif
#ifdef CONFIG_MMU
ADS_ENTRY(ADS_MMAP_MIN_ADDR, &mmap_min_addr),
#endif
ADS_ENTRY(ADS_STACK_GUARD_GAP, &stack_guard_gap),
#ifdef CONFIG_SYSCTL
ADS_ENTRY(ADS_SYSCTL_LEGACY_VA_LAYOUT, &sysctl_legacy_va_layout),
#endif
};
/*

View File

@@ -40,6 +40,7 @@
#include <trace/hooks/iommu.h>
#include <trace/hooks/thermal.h>
#include <trace/hooks/ufshcd.h>
#include <trace/hooks/block.h>
#include <trace/hooks/cgroup.h>
#include <trace/hooks/workqueue.h>
#include <trace/hooks/sys.h>
@@ -68,6 +69,9 @@
#include <trace/hooks/net.h>
#include <trace/hooks/syscall_check.h>
#include <trace/hooks/usb.h>
#include <trace/hooks/ipv6.h>
#include <trace/hooks/sound.h>
#include <trace/hooks/snd_compr.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@@ -87,6 +91,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_prepare_prio_fork);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_finish_prio_fork);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sk_alloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sk_free);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_nf_conn_alloc);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_nf_conn_free);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_arch_set_freq_scale);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_is_fpsimd_save);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_init);
@@ -237,6 +245,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_selinux_avc_insert);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_selinux_avc_node_delete);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_selinux_avc_node_replace);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_selinux_avc_lookup);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_alloc_rqs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_rq_ctx_init);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_commit_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_override_creds);
@@ -353,3 +363,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_file_open);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_bpf_syscall);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_usb_dev_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_usb_dev_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ipv6_gen_linklocal_addr);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sound_usb_support_cpu_suspend);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_snd_compr_use_pause_in_drain);

View File

@@ -84,120 +84,6 @@ static struct kobj_type dma_buf_ktype = {
.default_groups = dma_buf_stats_default_groups,
};
#define to_dma_buf_attach_entry_from_kobj(x) container_of(x, struct dma_buf_attach_sysfs_entry, kobj)
struct dma_buf_attach_stats_attribute {
struct attribute attr;
ssize_t (*show)(struct dma_buf_attach_sysfs_entry *sysfs_entry,
struct dma_buf_attach_stats_attribute *attr, char *buf);
};
#define to_dma_buf_attach_stats_attr(x) container_of(x, struct dma_buf_attach_stats_attribute, attr)
static ssize_t dma_buf_attach_stats_attribute_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct dma_buf_attach_stats_attribute *attribute;
struct dma_buf_attach_sysfs_entry *sysfs_entry;
attribute = to_dma_buf_attach_stats_attr(attr);
sysfs_entry = to_dma_buf_attach_entry_from_kobj(kobj);
if (!attribute->show)
return -EIO;
return attribute->show(sysfs_entry, attribute, buf);
}
static const struct sysfs_ops dma_buf_attach_stats_sysfs_ops = {
.show = dma_buf_attach_stats_attribute_show,
};
static ssize_t map_counter_show(struct dma_buf_attach_sysfs_entry *sysfs_entry,
struct dma_buf_attach_stats_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%u\n", sysfs_entry->map_counter);
}
static struct dma_buf_attach_stats_attribute map_counter_attribute =
__ATTR_RO(map_counter);
static struct attribute *dma_buf_attach_stats_default_attrs[] = {
&map_counter_attribute.attr,
NULL,
};
ATTRIBUTE_GROUPS(dma_buf_attach_stats_default);
static void dma_buf_attach_sysfs_release(struct kobject *kobj)
{
struct dma_buf_attach_sysfs_entry *sysfs_entry;
sysfs_entry = to_dma_buf_attach_entry_from_kobj(kobj);
kfree(sysfs_entry);
}
static struct kobj_type dma_buf_attach_ktype = {
.sysfs_ops = &dma_buf_attach_stats_sysfs_ops,
.release = dma_buf_attach_sysfs_release,
.default_groups = dma_buf_attach_stats_default_groups,
};
void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach)
{
struct dma_buf_attach_sysfs_entry *sysfs_entry;
sysfs_entry = attach->sysfs_entry;
if (!sysfs_entry)
return;
sysfs_delete_link(&sysfs_entry->kobj, &attach->dev->kobj, "device");
kobject_del(&sysfs_entry->kobj);
kobject_put(&sysfs_entry->kobj);
}
int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach,
unsigned int uid)
{
struct dma_buf_attach_sysfs_entry *sysfs_entry;
int ret;
struct dma_buf *dmabuf;
if (!attach)
return -EINVAL;
dmabuf = attach->dmabuf;
sysfs_entry = kzalloc(sizeof(struct dma_buf_attach_sysfs_entry),
GFP_KERNEL);
if (!sysfs_entry)
return -ENOMEM;
sysfs_entry->kobj.kset = dmabuf->sysfs_entry->attach_stats_kset;
attach->sysfs_entry = sysfs_entry;
ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_attach_ktype,
NULL, "%u", uid);
if (ret)
goto kobj_err;
ret = sysfs_create_link(&sysfs_entry->kobj, &attach->dev->kobj,
"device");
if (ret)
goto link_err;
return 0;
link_err:
kobject_del(&sysfs_entry->kobj);
kobj_err:
kobject_put(&sysfs_entry->kobj);
attach->sysfs_entry = NULL;
return ret;
}
void dma_buf_stats_teardown(struct dma_buf *dmabuf)
{
struct dma_buf_sysfs_entry *sysfs_entry;
@@ -206,7 +92,6 @@ void dma_buf_stats_teardown(struct dma_buf *dmabuf)
if (!sysfs_entry)
return;
kset_unregister(sysfs_entry->attach_stats_kset);
kobject_del(&sysfs_entry->kobj);
kobject_put(&sysfs_entry->kobj);
}
@@ -254,7 +139,6 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
{
struct dma_buf_sysfs_entry *sysfs_entry;
int ret;
struct kset *attach_stats_kset;
if (!dmabuf || !dmabuf->file)
return -EINVAL;
@@ -279,21 +163,8 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
if (ret)
goto err_sysfs_dmabuf;
/* create the directory for attachment stats */
attach_stats_kset = kset_create_and_add("attachments",
&dmabuf_sysfs_no_uevent_ops,
&sysfs_entry->kobj);
if (!attach_stats_kset) {
ret = -ENOMEM;
goto err_sysfs_attach;
}
sysfs_entry->attach_stats_kset = attach_stats_kset;
return 0;
err_sysfs_attach:
kobject_del(&sysfs_entry->kobj);
err_sysfs_dmabuf:
kobject_put(&sysfs_entry->kobj);
dmabuf->sysfs_entry = NULL;

View File

@@ -14,23 +14,8 @@ int dma_buf_init_sysfs_statistics(void);
void dma_buf_uninit_sysfs_statistics(void);
int dma_buf_stats_setup(struct dma_buf *dmabuf);
int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach,
unsigned int uid);
static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach,
int delta)
{
struct dma_buf_attach_sysfs_entry *entry = attach->sysfs_entry;
entry->map_counter += delta;
}
void dma_buf_stats_teardown(struct dma_buf *dmabuf);
void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach);
static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf)
{
struct dma_buf_sysfs_entry *entry = dmabuf->sysfs_entry;
return entry->attachment_uid++;
}
#else
static inline int dma_buf_init_sysfs_statistics(void)
@@ -44,19 +29,7 @@ static inline int dma_buf_stats_setup(struct dma_buf *dmabuf)
{
return 0;
}
static inline int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach,
unsigned int uid)
{
return 0;
}
static inline void dma_buf_stats_teardown(struct dma_buf *dmabuf) {}
static inline void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach) {}
static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach,
int delta) {}
static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf)
{
return 0;
}
#endif
#endif // _DMA_BUF_SYSFS_STATS_H

View File

@@ -730,7 +730,6 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
{
struct dma_buf_attachment *attach;
int ret;
unsigned int attach_uid;
if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
@@ -756,13 +755,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
}
dma_resv_lock(dmabuf->resv, NULL);
list_add(&attach->node, &dmabuf->attachments);
attach_uid = dma_buf_update_attach_uid(dmabuf);
dma_resv_unlock(dmabuf->resv);
ret = dma_buf_attach_stats_setup(attach, attach_uid);
if (ret)
goto err_sysfs;
/* When either the importer or the exporter can't handle dynamic
* mappings we cache the mapping here to avoid issues with the
* reservation object lock.
@@ -789,7 +783,6 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
dma_resv_unlock(attach->dmabuf->resv);
attach->sgt = sgt;
attach->dir = DMA_BIDIRECTIONAL;
dma_buf_update_attachment_map_count(attach, 1 /* delta */);
}
return attach;
@@ -806,7 +799,6 @@ err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
err_sysfs:
dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
}
@@ -845,7 +837,6 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
dma_resv_lock(attach->dmabuf->resv, NULL);
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
dma_buf_update_attachment_map_count(attach, -1 /* delta */);
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_buf_unpin(attach);
@@ -859,7 +850,6 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (dmabuf->ops->detach)
dmabuf->ops->detach(dmabuf, attach);
dma_buf_attach_stats_teardown(attach);
kfree(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_detach);
@@ -965,9 +955,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
attach->dir = direction;
}
if (!IS_ERR(sg_table))
dma_buf_update_attachment_map_count(attach, 1 /* delta */);
return sg_table;
}
EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
@@ -1005,8 +992,6 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf) &&
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
dma_buf_unpin(attach);
dma_buf_update_attachment_map_count(attach, -1 /* delta */);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);

View File

@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/leds.h>
#include <linux/interrupt.h>
#include <linux/android_kabi.h>
#include <linux/mmc/host.h>
@@ -605,6 +606,8 @@ struct sdhci_host {
u64 data_timeout;
ANDROID_KABI_RESERVE(1);
unsigned long private[] ____cacheline_aligned;
};
@@ -652,6 +655,8 @@ struct sdhci_ops {
void (*request_done)(struct sdhci_host *host,
struct mmc_request *mrq);
void (*dump_vendor_regs)(struct sdhci_host *host);
ANDROID_KABI_RESERVE(1);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS

View File

@@ -13,6 +13,7 @@
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/android_kabi.h>
#include <uapi/scsi/scsi_bsg_ufs.h>
#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
@@ -594,6 +595,7 @@ struct ufs_dev_info {
u8 b_presrv_uspc_en;
/* UFS HPB related flag */
bool hpb_enabled;
ANDROID_KABI_RESERVE(1);
};
/**

View File

@@ -43,6 +43,7 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_eh.h>
#include <linux/android_kabi.h>
#include "ufs.h"
#include "ufs_quirks.h"
@@ -217,6 +218,8 @@ struct ufshcd_lrb {
#endif
bool req_abort_skip;
ANDROID_KABI_RESERVE(1);
};
/**
@@ -353,6 +356,11 @@ struct ufs_hba_variant_ops {
const union ufs_crypto_cfg_entry *cfg, int slot);
void (*event_notify)(struct ufs_hba *hba,
enum ufs_event_type evt, void *data);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
/* clock gating state */
@@ -392,6 +400,8 @@ struct ufs_clk_gating {
bool is_initialized;
int active_reqs;
struct workqueue_struct *clk_gating_workq;
ANDROID_KABI_RESERVE(1);
};
struct ufs_saved_pwr_info {
@@ -438,6 +448,8 @@ struct ufs_clk_scaling {
bool is_initialized;
bool is_busy_started;
bool is_suspended;
ANDROID_KABI_RESERVE(1);
};
#define UFS_EVENT_HIST_LENGTH 8
@@ -915,6 +927,11 @@ struct ufs_hba {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_root;
#endif
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
/* Returns true if clocks can be gated. Otherwise false */

View File

@@ -915,6 +915,15 @@ static const struct file_operations ashmem_fops = {
#endif
};
/*
* is_ashmem_file - Check if struct file* is associated with ashmem
*/
int is_ashmem_file(struct file *file)
{
return file->f_op == &ashmem_fops;
}
EXPORT_SYMBOL_GPL(is_ashmem_file);
static struct miscdevice ashmem_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ashmem",

View File

@@ -21,4 +21,6 @@
#define COMPAT_ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned int)
#endif
int is_ashmem_file(struct file *file);
#endif /* _LINUX_ASHMEM_H */

View File

@@ -1267,6 +1267,7 @@ static void dwc3_get_properties(struct dwc3 *dwc)
u8 rx_max_burst_prd;
u8 tx_thr_num_pkt_prd;
u8 tx_max_burst_prd;
u8 tx_fifo_resize_max_num;
const char *usb_psy_name;
int ret;
@@ -1282,6 +1283,13 @@ static void dwc3_get_properties(struct dwc3 *dwc)
*/
hird_threshold = 12;
/*
* default to a TXFIFO size large enough to fit 6 max packets. This
* allows for systems with larger bus latencies to have some headroom
* for endpoints that have a large bMaxBurst value.
*/
tx_fifo_resize_max_num = 6;
dwc->maximum_speed = usb_get_maximum_speed(dev);
dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
dwc->dr_mode = usb_get_dr_mode(dev);
@@ -1325,6 +1333,11 @@ static void dwc3_get_properties(struct dwc3 *dwc)
&tx_thr_num_pkt_prd);
device_property_read_u8(dev, "snps,tx-max-burst-prd",
&tx_max_burst_prd);
dwc->do_fifo_resize = device_property_read_bool(dev,
"tx-fifo-resize");
if (dwc->do_fifo_resize)
device_property_read_u8(dev, "tx-fifo-max-num",
&tx_fifo_resize_max_num);
dwc->disable_scramble_quirk = device_property_read_bool(dev,
"snps,disable_scramble_quirk");
@@ -1390,6 +1403,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->tx_max_burst_prd = tx_max_burst_prd;
dwc->imod_interval = 0;
dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
}
/* check whether the core supports IMOD */

View File

@@ -1038,6 +1038,7 @@ struct dwc3_scratchpad_array {
* @rx_max_burst_prd: max periodic ESS receive burst size
* @tx_thr_num_pkt_prd: periodic ESS transmit packet count
* @tx_max_burst_prd: max periodic ESS transmit burst size
* @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize
* @hsphy_interface: "utmi" or "ulpi"
* @connected: true when we're connected to a host, false otherwise
* @delayed_status: true when gadget driver asks for delayed status
@@ -1052,6 +1053,7 @@ struct dwc3_scratchpad_array {
* 1 - utmi_l1_suspend_n
* @is_fpga: true when we are using the FPGA board
* @pending_events: true when we have pending IRQs to be handled
* @do_fifo_resize: true when txfifo resizing is enabled for dwc3 endpoints
* @pullups_connected: true when Run/Stop bit is set
* @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
* @three_stage_setup: set if we perform a three phase setup
@@ -1094,6 +1096,11 @@ struct dwc3_scratchpad_array {
* @dis_split_quirk: set to disable split boundary.
* @imod_interval: set the interrupt moderation interval in 250ns
* increments or 0 to disable.
* @max_cfg_eps: current max number of IN eps used across all USB configs.
* @last_fifo_depth: last fifo depth used to determine next fifo ram start
* address.
* @num_ep_resized: carries the current number endpoints which have had its tx
* fifo resized.
*/
struct dwc3 {
struct work_struct drd_work;
@@ -1249,6 +1256,7 @@ struct dwc3 {
u8 rx_max_burst_prd;
u8 tx_thr_num_pkt_prd;
u8 tx_max_burst_prd;
u8 tx_fifo_resize_max_num;
const char *hsphy_interface;
@@ -1262,6 +1270,7 @@ struct dwc3 {
unsigned is_utmi_l1_suspend:1;
unsigned is_fpga:1;
unsigned pending_events:1;
unsigned do_fifo_resize:1;
unsigned pullups_connected:1;
unsigned setup_packet_pending:1;
unsigned three_stage_setup:1;
@@ -1295,9 +1304,14 @@ struct dwc3 {
unsigned dis_metastability_quirk:1;
unsigned dis_split_quirk:1;
unsigned async_callbacks:1;
u16 imod_interval;
int max_cfg_eps;
int last_fifo_depth;
int num_ep_resized;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
@@ -1534,6 +1548,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
u32 param);
void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt);
void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc);
#else
static inline int dwc3_gadget_init(struct dwc3 *dwc)
{ return 0; }
@@ -1556,6 +1571,8 @@ static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
static inline void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
bool interrupt)
{ }
static inline void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
{ }
#endif
#if IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)

View File

@@ -640,6 +640,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node, *dwc3_np;
struct device *dev = &pdev->dev;
struct property *prop;
int ret;
dwc3_np = of_get_child_by_name(np, "dwc3");
@@ -648,6 +649,20 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
return -ENODEV;
}
prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
if (!prop) {
ret = -ENOMEM;
dev_err(dev, "unable to allocate memory for property\n");
goto node_put;
}
prop->name = "tx-fifo-resize";
ret = of_add_property(dwc3_np, prop);
if (ret) {
dev_err(dev, "unable to add property\n");
goto node_put;
}
ret = of_platform_populate(np, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to register dwc3 core - %d\n", ret);

View File

@@ -597,11 +597,13 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
int ret;
int ret = -EINVAL;
spin_unlock(&dwc->lock);
ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
spin_lock(&dwc->lock);
if (dwc->async_callbacks) {
spin_unlock(&dwc->lock);
ret = dwc->gadget_driver->setup(dwc->gadget, ctrl);
spin_lock(&dwc->lock);
}
return ret;
}
@@ -619,6 +621,8 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
return -EINVAL;
case USB_STATE_ADDRESS:
dwc3_gadget_clear_tx_fifos(dwc);
ret = dwc3_ep0_delegate_req(dwc, ctrl);
/* if the cfg matches and the cfg is non zero */
if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {

View File

@@ -629,6 +629,187 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
}
/**
* dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
* @dwc: pointer to the DWC3 context
* @nfifos: number of fifos to calculate for
*
* Calculates the size value based on the equation below:
*
* DWC3 revision 280A and prior:
* fifo_size = mult * (max_packet / mdwidth) + 1;
*
* DWC3 revision 290A and onwards:
* fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
*
* The max packet size is set to 1024, as the txfifo requirements mainly apply
* to super speed USB use cases. However, it is safe to overestimate the fifo
* allocations for other scenarios, i.e. high speed USB.
*/
static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
{
int max_packet = 1024;
int fifo_size;
int mdwidth;
mdwidth = dwc3_mdwidth(dwc);
/* MDWIDTH is represented in bits, we need it in bytes */
mdwidth >>= 3;
if (DWC3_VER_IS_PRIOR(DWC3, 290A))
fifo_size = mult * (max_packet / mdwidth) + 1;
else
fifo_size = mult * ((max_packet + mdwidth) / mdwidth) + 1;
return fifo_size;
}
/**
* dwc3_gadget_clear_tx_fifo_size - Clears txfifo allocation
* @dwc: pointer to the DWC3 context
*
* Iterates through all the endpoint registers and clears the previous txfifo
* allocations.
*/
void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
int fifo_depth;
int size;
int num;
if (!dwc->do_fifo_resize)
return;
/* Read ep0IN related TXFIFO size */
dep = dwc->eps[1];
size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
if (DWC3_IP_IS(DWC3))
fifo_depth = DWC3_GTXFIFOSIZ_TXFDEP(size);
else
fifo_depth = DWC31_GTXFIFOSIZ_TXFDEP(size);
dwc->last_fifo_depth = fifo_depth;
/* Clear existing TXFIFO for all IN eps except ep0 */
for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
num += 2) {
dep = dwc->eps[num];
/* Don't change TXFRAMNUM on usb31 version */
size = DWC3_IP_IS(DWC3) ? 0 :
dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
DWC31_GTXFIFOSIZ_TXFRAMNUM;
dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1), size);
}
dwc->num_ep_resized = 0;
}
/*
* dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
* @dwc: pointer to our context structure
*
* This function will a best effort FIFO allocation in order
* to improve FIFO usage and throughput, while still allowing
* us to enable as many endpoints as possible.
*
* Keep in mind that this operation will be highly dependent
* on the configured size for RAM1 - which contains TxFifo -,
* the amount of endpoints enabled on coreConsultant tool, and
* the width of the Master Bus.
*
* In general, FIFO depths are represented with the following equation:
*
* fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
*
* In conjunction with dwc3_gadget_check_config(), this resizing logic will
* ensure that all endpoints will have enough internal memory for one max
* packet per endpoint.
*/
static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
int fifo_0_start;
int ram1_depth;
int fifo_size;
int min_depth;
int num_in_ep;
int remaining;
int num_fifos = 1;
int fifo;
int tmp;
if (!dwc->do_fifo_resize)
return 0;
/* resize IN endpoints except ep0 */
if (!usb_endpoint_dir_in(dep->endpoint.desc) || dep->number <= 1)
return 0;
ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
if ((dep->endpoint.maxburst > 1 &&
usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
usb_endpoint_xfer_isoc(dep->endpoint.desc))
num_fifos = 3;
if (dep->endpoint.maxburst > 6 &&
usb_endpoint_xfer_bulk(dep->endpoint.desc) && DWC3_IP_IS(DWC31))
num_fifos = dwc->tx_fifo_resize_max_num;
/* FIFO size for a single buffer */
fifo = dwc3_gadget_calc_tx_fifo_size(dwc, 1);
/* Calculate the number of remaining EPs w/o any FIFO */
num_in_ep = dwc->max_cfg_eps;
num_in_ep -= dwc->num_ep_resized;
/* Reserve at least one FIFO for the number of IN EPs */
min_depth = num_in_ep * (fifo + 1);
remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
remaining = max_t(int, 0, remaining);
/*
* We've already reserved 1 FIFO per EP, so check what we can fit in
* addition to it. If there is not enough remaining space, allocate
* all the remaining space to the EP.
*/
fifo_size = (num_fifos - 1) * fifo;
if (remaining < fifo_size)
fifo_size = remaining;
fifo_size += fifo;
/* Last increment according to the TX FIFO size equation */
fifo_size++;
/* Check if TXFIFOs start at non-zero addr */
tmp = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(tmp);
fifo_size |= (fifo_0_start + (dwc->last_fifo_depth << 16));
if (DWC3_IP_IS(DWC3))
dwc->last_fifo_depth += DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
else
dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
/* Check fifo size allocation doesn't exceed available RAM size. */
if (dwc->last_fifo_depth >= ram1_depth) {
dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
dwc->last_fifo_depth, ram1_depth,
dep->endpoint.name, fifo_size);
if (DWC3_IP_IS(DWC3))
fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
else
fifo_size = DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
dwc->last_fifo_depth -= fifo_size;
return -ENOMEM;
}
dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1), fifo_size);
dwc->num_ep_resized++;
return 0;
}
/**
* __dwc3_gadget_ep_enable - initializes a hw endpoint
* @dep: endpoint to be initialized
@@ -646,6 +827,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
int ret;
if (!(dep->flags & DWC3_EP_ENABLED)) {
ret = dwc3_gadget_resize_tx_fifos(dep);
if (ret)
return ret;
ret = dwc3_gadget_start_config(dep);
if (ret)
return ret;
@@ -2510,6 +2695,7 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
spin_lock_irqsave(&dwc->lock, flags);
dwc->gadget_driver = NULL;
dwc->max_cfg_eps = 0;
spin_unlock_irqrestore(&dwc->lock, flags);
free_irq(dwc->irq_gadget, dwc->ev_buf);
@@ -2597,6 +2783,61 @@ static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
return ret;
}
/**
* dwc3_gadget_check_config - ensure dwc3 can support the USB configuration
* @g: pointer to the USB gadget
*
* Used to record the maximum number of endpoints being used in a USB composite
* device. (across all configurations) This is to be used in the calculation
* of the TXFIFO sizes when resizing internal memory for individual endpoints.
* It will help ensured that the resizing logic reserves enough space for at
* least one max packet.
*/
static int dwc3_gadget_check_config(struct usb_gadget *g)
{
struct dwc3 *dwc = gadget_to_dwc(g);
struct usb_ep *ep;
int fifo_size = 0;
int ram1_depth;
int ep_num = 0;
if (!dwc->do_fifo_resize)
return 0;
list_for_each_entry(ep, &g->ep_list, ep_list) {
/* Only interested in the IN endpoints */
if (ep->claimed && (ep->address & USB_DIR_IN))
ep_num++;
}
if (ep_num <= dwc->max_cfg_eps)
return 0;
/* Update the max number of eps in the composition */
dwc->max_cfg_eps = ep_num;
fifo_size = dwc3_gadget_calc_tx_fifo_size(dwc, dwc->max_cfg_eps);
/* Based on the equation, increment by one for every ep */
fifo_size += dwc->max_cfg_eps;
/* Check if we can fit a single fifo per endpoint */
ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
if (fifo_size > ram1_depth)
return -ENOMEM;
return 0;
}
static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
dwc->async_callbacks = enable;
spin_unlock_irqrestore(&dwc->lock, flags);
}
static const struct usb_gadget_ops dwc3_gadget_ops = {
.get_frame = dwc3_gadget_get_frame,
.wakeup = dwc3_gadget_wakeup,
@@ -2608,6 +2849,8 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
.udc_set_ssp_rate = dwc3_gadget_set_ssp_rate,
.get_config_params = dwc3_gadget_config_params,
.vbus_draw = dwc3_gadget_vbus_draw,
.check_config = dwc3_gadget_check_config,
.udc_async_callbacks = dwc3_gadget_async_callbacks,
};
/* -------------------------------------------------------------------------- */
@@ -3241,7 +3484,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
static void dwc3_disconnect_gadget(struct dwc3 *dwc)
{
if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->disconnect(dwc->gadget);
spin_lock(&dwc->lock);
@@ -3250,7 +3493,7 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc)
static void dwc3_suspend_gadget(struct dwc3 *dwc)
{
if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->suspend(dwc->gadget);
spin_lock(&dwc->lock);
@@ -3259,7 +3502,7 @@ static void dwc3_suspend_gadget(struct dwc3 *dwc)
static void dwc3_resume_gadget(struct dwc3 *dwc)
{
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
if (dwc->async_callbacks && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(dwc->gadget);
spin_lock(&dwc->lock);
@@ -3271,7 +3514,7 @@ static void dwc3_reset_gadget(struct dwc3 *dwc)
if (!dwc->gadget_driver)
return;
if (dwc->gadget->speed != USB_SPEED_UNKNOWN) {
if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
spin_unlock(&dwc->lock);
usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
spin_lock(&dwc->lock);
@@ -3601,7 +3844,7 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
* implemented.
*/
if (dwc->gadget_driver && dwc->gadget_driver->resume) {
if (dwc->async_callbacks && dwc->gadget_driver->resume) {
spin_unlock(&dwc->lock);
dwc->gadget_driver->resume(dwc->gadget);
spin_lock(&dwc->lock);

View File

@@ -1436,6 +1436,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
goto err_purge_funcs;
}
}
ret = usb_gadget_check_config(cdev->gadget);
if (ret)
goto err_purge_funcs;
usb_ep_autoconfig_reset(cdev->gadget);
}
if (cdev->use_os_string) {

View File

@@ -1006,6 +1006,25 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
}
EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
/**
* usb_gadget_check_config - checks if the UDC can support the binded
* configuration
* @gadget: controller to check the USB configuration
*
* Ensure that a UDC is able to support the requested resources by a
* configuration, and that there are no resource limitations, such as
* internal memory allocated to all requested endpoints.
*
* Returns zero on success, else a negative errno.
*/
int usb_gadget_check_config(struct usb_gadget *gadget)
{
if (gadget->ops->check_config)
return gadget->ops->check_config(gadget);
return 0;
}
EXPORT_SYMBOL_GPL(usb_gadget_check_config);
/* ------------------------------------------------------------------------- */
static void usb_gadget_state_work(struct work_struct *work)
@@ -1150,6 +1169,53 @@ static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
gadget->ops->udc_set_speed(gadget, s);
}
/**
* usb_gadget_enable_async_callbacks - tell usb device controller to enable asynchronous callbacks
* @udc: The UDC which should enable async callbacks
*
* This routine is used when binding gadget drivers. It undoes the effect
* of usb_gadget_disable_async_callbacks(); the UDC driver should enable IRQs
* (if necessary) and resume issuing callbacks.
*
* This routine will always be called in process context.
*/
static inline void usb_gadget_enable_async_callbacks(struct usb_udc *udc)
{
struct usb_gadget *gadget = udc->gadget;
if (gadget->ops->udc_async_callbacks)
gadget->ops->udc_async_callbacks(gadget, true);
}
/**
* usb_gadget_disable_async_callbacks - tell usb device controller to disable asynchronous callbacks
* @udc: The UDC which should disable async callbacks
*
* This routine is used when unbinding gadget drivers. It prevents a race:
* The UDC driver doesn't know when the gadget driver's ->unbind callback
* runs, so unless it is told to disable asynchronous callbacks, it might
* issue a callback (such as ->disconnect) after the unbind has completed.
*
* After this function runs, the UDC driver must suppress all ->suspend,
* ->resume, ->disconnect, ->reset, and ->setup callbacks to the gadget driver
* until async callbacks are again enabled. A simple-minded but effective
* way to accomplish this is to tell the UDC hardware not to generate any
* more IRQs.
*
* Request completion callbacks must still be issued. However, it's okay
* to defer them until the request is cancelled, since the pull-up will be
* turned off during the time period when async callbacks are disabled.
*
* This routine will always be called in process context.
*/
static inline void usb_gadget_disable_async_callbacks(struct usb_udc *udc)
{
struct usb_gadget *gadget = udc->gadget;
if (gadget->ops->udc_async_callbacks)
gadget->ops->udc_async_callbacks(gadget, false);
}
/**
* usb_udc_release - release the usb_udc struct
* @dev: the dev member within usb_udc
@@ -1364,6 +1430,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
usb_gadget_disconnect(udc->gadget);
usb_gadget_disable_async_callbacks(udc);
if (udc->gadget->irq)
synchronize_irq(udc->gadget->irq);
udc->driver->unbind(udc->gadget);
@@ -1445,6 +1512,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
driver->unbind(udc->gadget);
goto err1;
}
usb_gadget_enable_async_callbacks(udc);
usb_udc_connect_control(udc);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);

View File

@@ -12,6 +12,7 @@
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/usb/pd_vdo.h>
#include <linux/android_kabi.h>
#include "bus.h"
@@ -20,6 +21,7 @@ struct typec_plug {
enum typec_plug_index index;
struct ida mode_ids;
int num_altmodes;
ANDROID_KABI_RESERVE(1);
};
struct typec_cable {
@@ -28,6 +30,7 @@ struct typec_cable {
struct usb_pd_identity *identity;
unsigned int active:1;
u16 pd_revision; /* 0300H = "3.0" */
ANDROID_KABI_RESERVE(1);
};
struct typec_partner {
@@ -39,6 +42,7 @@ struct typec_partner {
int num_altmodes;
u16 pd_revision; /* 0300H = "3.0" */
enum usb_pd_svdm_ver svdm_version;
ANDROID_KABI_RESERVE(1);
};
struct typec_port {
@@ -60,6 +64,7 @@ struct typec_port {
const struct typec_capability *cap;
const struct typec_operations *ops;
ANDROID_KABI_RESERVE(1);
};
#define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev)

View File

@@ -1307,11 +1307,12 @@ static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
unsigned long start_pfn;
int rc;
struct acr_info dummy;
start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
sb_id * vm->subblock_size);
rc = alloc_contig_range(start_pfn, start_pfn + nr_pages,
MIGRATE_MOVABLE, GFP_KERNEL);
MIGRATE_MOVABLE, GFP_KERNEL, &dummy);
if (rc == -ENOMEM)
/* whoops, out of memory */
return rc;

View File

@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/resource.h>
#include <linux/regulator/consumer.h>
#include <linux/android_kabi.h>
#define AMBA_NR_IRQS 9
#define AMBA_CID 0xb105f00d
@@ -71,6 +72,8 @@ struct amba_device {
struct amba_cs_uci_id uci;
unsigned int irq[AMBA_NR_IRQS];
char *driver_override;
ANDROID_KABI_RESERVE(1);
};
struct amba_driver {
@@ -79,6 +82,8 @@ struct amba_driver {
void (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
const struct amba_id *id_table;
ANDROID_KABI_RESERVE(1);
};
/*

View File

@@ -29,6 +29,13 @@ enum android_debug_symbol {
#endif
#ifdef CONFIG_SWAP
ADS_NR_SWAP_PAGES,
#endif
#ifdef CONFIG_MMU
ADS_MMAP_MIN_ADDR,
#endif
ADS_STACK_GUARD_GAP,
#ifdef CONFIG_SYSCTL
ADS_SYSCTL_LEGACY_VA_LAYOUT,
#endif
ADS_END
};

View File

@@ -13,6 +13,7 @@
#include <linux/workqueue.h>
#include <linux/kref.h>
#include <linux/refcount.h>
#include <linux/android_kabi.h>
struct page;
struct device;
@@ -160,6 +161,9 @@ struct bdi_writeback {
struct rcu_head rcu;
};
#endif
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
struct backing_dev_info {
@@ -198,6 +202,9 @@ struct backing_dev_info {
#ifdef CONFIG_DEBUG_FS
struct dentry *debug_dir;
#endif
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
enum {

View File

@@ -10,6 +10,7 @@
#include <linux/ioprio.h>
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
#include <linux/android_kabi.h>
#define BIO_DEBUG
@@ -321,6 +322,10 @@ struct bio_integrity_payload {
struct work_struct bip_work; /* I/O completion */
struct bio_vec *bip_vec;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
struct bio_vec bip_inline_vecs[];/* embedded bvec array */
};
@@ -694,6 +699,11 @@ struct bio_set {
struct bio_list rescue_list;
struct work_struct rescue_work;
struct workqueue_struct *rescue_workqueue;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
struct biovec_slab {

View File

@@ -48,6 +48,11 @@ struct block_device {
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
struct super_block *bd_fsfreeze_sb;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
} __randomize_layout;
/*

View File

@@ -26,6 +26,8 @@
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
#include <linux/pm.h>
#include <linux/android_kabi.h>
#include <linux/android_vendor.h>
struct module;
struct scsi_ioctl_command;
@@ -242,6 +244,8 @@ struct request {
*/
rq_end_io_fn *end_io;
void *end_io_data;
ANDROID_KABI_RESERVE(1);
};
static inline bool blk_op_is_scsi(unsigned int op)
@@ -346,6 +350,8 @@ struct queue_limits {
unsigned char discard_misaligned;
unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned;
ANDROID_KABI_RESERVE(1);
};
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
@@ -589,6 +595,10 @@ struct request_queue {
#define BLK_MAX_WRITE_HINTS 5
u64 write_hints[BLK_MAX_WRITE_HINTS];
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
ANDROID_OEM_DATA(1);
};
@@ -694,18 +704,6 @@ static inline bool queue_is_mq(struct request_queue *q)
return q->mq_ops;
}
#ifdef CONFIG_PM
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
return q->rpm_status;
}
#else
static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
return RPM_ACTIVE;
}
#endif
static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue *q)
{
@@ -1870,6 +1868,10 @@ struct block_device_operations {
char *(*devnode)(struct gendisk *disk, umode_t *mode);
struct module *owner;
const struct pr_ops *pr_ops;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_OEM_DATA(1);
};
#ifdef CONFIG_COMPAT

View File

@@ -21,6 +21,7 @@
#include <linux/kallsyms.h>
#include <linux/capability.h>
#include <linux/percpu-refcount.h>
#include <linux/android_kabi.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -132,6 +133,9 @@ struct bpf_map_ops {
/* bpf_iter info used to open a seq_file */
const struct bpf_iter_seq_info *iter_seq_info;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
struct bpf_map_memory {
@@ -218,6 +222,8 @@ struct bpf_map_dev_ops {
int (*map_update_elem)(struct bpf_offloaded_map *map,
void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
ANDROID_KABI_RESERVE(1);
};
struct bpf_offloaded_map {
@@ -459,6 +465,7 @@ struct bpf_verifier_ops {
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
u32 *next_btf_id);
ANDROID_KABI_RESERVE(1);
};
struct bpf_prog_offload_ops {
@@ -474,6 +481,7 @@ struct bpf_prog_offload_ops {
int (*prepare)(struct bpf_prog *prog);
int (*translate)(struct bpf_prog *prog);
void (*destroy)(struct bpf_prog *prog);
ANDROID_KABI_RESERVE(1);
};
struct bpf_prog_offload {
@@ -851,6 +859,7 @@ struct bpf_prog_aux {
struct work_struct work;
struct rcu_head rcu;
};
ANDROID_KABI_RESERVE(1);
};
struct bpf_array_aux {

View File

@@ -7,6 +7,7 @@
#include <linux/bpf.h> /* for enum bpf_reg_type */
#include <linux/filter.h> /* for MAX_BPF_STACK */
#include <linux/tnum.h>
#include <linux/android_kabi.h>
/* Maximum variable offset umax_value permitted when resolving memory accesses.
* In practice this is far bigger than any realistic pointer offset; this limit
@@ -370,6 +371,8 @@ struct bpf_subprog_info {
bool has_tail_call;
bool tail_call_reachable;
bool has_ld_abs;
ANDROID_KABI_RESERVE(1);
};
/* single container for all structs
@@ -425,6 +428,9 @@ struct bpf_verifier_env {
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,

View File

@@ -24,6 +24,7 @@
#include <linux/user_namespace.h>
#include <linux/refcount.h>
#include <linux/kernel_stat.h>
#include <linux/android_kabi.h>
#include <linux/cgroup-defs.h>
@@ -66,6 +67,8 @@ struct css_task_iter {
struct css_set *cur_dcset;
struct task_struct *cur_task;
struct list_head iters_node; /* css_set->task_iters */
ANDROID_KABI_RESERVE(1);
};
extern struct cgroup_root cgrp_dfl_root;

View File

@@ -5,6 +5,7 @@
#include <linux/fs.h>
#include <linux/exportfs.h>
#include <linux/mm.h>
#include <linux/android_vendor.h>
#define CLEANCACHE_NO_POOL -1
#define CLEANCACHE_NO_BACKEND -2
@@ -36,6 +37,7 @@ struct cleancache_ops {
void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
void (*invalidate_inode)(int, struct cleancache_filekey);
void (*invalidate_fs)(int);
ANDROID_OEM_DATA(1);
};
extern int cleancache_register_ops(const struct cleancache_ops *ops);

View File

@@ -22,6 +22,15 @@
struct cma;
struct cma_alloc_info {
unsigned long nr_migrated;
unsigned long nr_reclaimed;
unsigned long nr_mapped;
unsigned int nr_isolate_fail;
unsigned int nr_migrate_fail;
unsigned int nr_test_fail;
};
extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);

View File

@@ -195,6 +195,10 @@ enum cpuhp_state {
CPUHP_AP_X86_KVM_CLK_ONLINE,
CPUHP_AP_DTPM_CPU_ONLINE,
CPUHP_AP_ACTIVE,
CPUHP_ANDROID_RESERVED_1,
CPUHP_ANDROID_RESERVED_2,
CPUHP_ANDROID_RESERVED_3,
CPUHP_ANDROID_RESERVED_4,
CPUHP_ONLINE,
};

View File

@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
#include <linux/android_kabi.h>
#define CPUIDLE_STATE_MAX 10
#define CPUIDLE_NAME_LEN 16
@@ -110,6 +111,8 @@ struct cpuidle_device {
cpumask_t coupled_cpus;
struct cpuidle_coupled *coupled;
#endif
ANDROID_KABI_RESERVE(1);
};
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -135,6 +138,8 @@ struct cpuidle_driver {
/* preferred governor to switch at register time */
const char *governor;
ANDROID_KABI_RESERVE(1);
};
#ifdef CONFIG_CPU_IDLE

View File

@@ -13,6 +13,7 @@
#include <linux/lockref.h>
#include <linux/stringhash.h>
#include <linux/wait.h>
#include <linux/android_kabi.h>
struct path;
struct vfsmount;
@@ -118,6 +119,9 @@ struct dentry {
struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */
struct rcu_head d_rcu;
} d_u;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
} __randomize_layout;
/*
@@ -148,6 +152,10 @@ struct dentry_operations {
int (*d_manage)(const struct path *, bool);
struct dentry *(*d_real)(struct dentry *, const struct inode *);
void (*d_canonical_path)(const struct path *, struct path *);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
} ____cacheline_aligned;
/*

View File

@@ -13,6 +13,7 @@
#include <linux/dm-ioctl.h>
#include <linux/math64.h>
#include <linux/ratelimit.h>
#include <linux/android_kabi.h>
struct dm_dev;
struct dm_target;
@@ -198,6 +199,9 @@ struct target_type {
dm_dax_copy_iter_fn dax_copy_to_iter;
dm_dax_zero_page_range_fn dax_zero_page_range;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
/* For internal device-mapper use. */
struct list_head list;
};
@@ -349,6 +353,9 @@ struct dm_target {
* Set if we need to limit the number of in-flight bios when swapping.
*/
bool limit_swap_bios:1;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
void *dm_per_bio_data(struct bio *bio, size_t data_size);

View File

@@ -30,6 +30,7 @@
#include <linux/device/bus.h>
#include <linux/device/class.h>
#include <linux/device/driver.h>
#include <linux/android_kabi.h>
#include <asm/device.h>
struct device;
@@ -553,6 +554,14 @@ struct device {
#ifdef CONFIG_DMA_OPS_BYPASS
bool dma_ops_bypass : 1;
#endif
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
ANDROID_KABI_RESERVE(5);
ANDROID_KABI_RESERVE(6);
ANDROID_KABI_RESERVE(7);
ANDROID_KABI_RESERVE(8);
};
/**

View File

@@ -112,6 +112,11 @@ struct bus_type {
struct lock_class_key lock_key;
bool need_parent_lock;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
extern int __must_check bus_register(struct bus_type *bus);

View File

@@ -76,7 +76,11 @@ struct class {
const struct dev_pm_ops *pm;
struct subsys_private *p;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
struct class_dev_iter {

View File

@@ -118,6 +118,11 @@ struct device_driver {
void (*coredump) (struct device *dev);
struct driver_private *p;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};

View File

@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/dma-fence.h>
#include <linux/wait.h>
#include <linux/android_kabi.h>
struct device;
struct dma_buf;
@@ -353,6 +354,9 @@ struct dma_buf_ops {
* will be populated with the buffer's flags.
*/
int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
/**
@@ -379,8 +383,6 @@ struct dma_buf_ops {
* @cb_excl: for userspace poll support
* @cb_shared: for userspace poll support
* @sysfs_entry: for exposing information about this buffer in sysfs.
* The attachment_uid member of @sysfs_entry is protected by dma_resv lock
* and is incremented on each attach.
*
* This represents a shared buffer, created by calling dma_buf_export(). The
* userspace representation is a normal file descriptor, which can be created by
@@ -421,10 +423,11 @@ struct dma_buf {
struct dma_buf_sysfs_entry {
struct kobject kobj;
struct dma_buf *dmabuf;
unsigned int attachment_uid;
struct kset *attach_stats_kset;
} *sysfs_entry;
#endif
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
/**
@@ -476,7 +479,6 @@ struct dma_buf_attach_ops {
* @importer_priv: importer specific attachment data.
* @dma_map_attrs: DMA attributes to be used when the exporter maps the buffer
* through dma_buf_map_attachment.
* @sysfs_entry: For exposing information about this attachment in sysfs.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -498,13 +500,9 @@ struct dma_buf_attachment {
void *importer_priv;
void *priv;
unsigned long dma_map_attrs;
#ifdef CONFIG_DMABUF_SYSFS_STATS
/* for sysfs stats */
struct dma_buf_attach_sysfs_entry {
struct kobject kobj;
unsigned int map_counter;
} *sysfs_entry;
#endif
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
/**
@@ -528,6 +526,9 @@ struct dma_buf_export_info {
int flags;
struct dma_resv *resv;
void *priv;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
/**

View File

@@ -12,6 +12,7 @@
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <linux/types.h>
#include <linux/android_kabi.h>
#include <asm/page.h>
/**
@@ -941,6 +942,11 @@ struct dma_device {
void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev);
struct dentry *dbg_dev_root;
#endif
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
static inline int dmaengine_slave_config(struct dma_chan *chan,

View File

@@ -4,6 +4,7 @@
#include <linux/percpu.h>
#include <linux/hashtable.h>
#include <linux/android_kabi.h>
#ifdef CONFIG_BLOCK
@@ -50,6 +51,11 @@ struct elevator_mq_ops {
struct request *(*next_request)(struct request_queue *, struct request *);
void (*init_icq)(struct io_cq *);
void (*exit_icq)(struct io_cq *);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
#define ELV_NAME_MAX (16)
@@ -86,6 +92,9 @@ struct elevator_type
/* managed by elevator core */
char icq_cache_name[ELV_NAME_MAX + 6]; /* elvname + "_io_cq" */
struct list_head list;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
#define ELV_HASH_BITS 6

View File

@@ -39,9 +39,11 @@
#include <linux/fs_types.h>
#include <linux/build_bug.h>
#include <linux/stddef.h>
#include <linux/android_kabi.h>
#include <asm/byteorder.h>
#include <uapi/linux/fs.h>
#include <linux/android_vendor.h>
struct backing_dev_info;
struct bdi_writeback;
@@ -413,6 +415,11 @@ struct address_space_operations {
int (*swap_activate)(struct swap_info_struct *sis, struct file *file,
sector_t *span);
void (*swap_deactivate)(struct file *file);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
extern const struct address_space_operations empty_aops;
@@ -468,6 +475,11 @@ struct address_space {
spinlock_t private_lock;
struct list_head private_list;
void *private_data;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
} __attribute__((aligned(sizeof(long)))) __randomize_layout;
/*
* On most architectures that alignment is already the case; but
@@ -718,6 +730,9 @@ struct inode {
#endif
void *i_private; /* fs or device private pointer */
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
} __randomize_layout;
struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
@@ -952,6 +967,10 @@ struct file {
struct address_space *f_mapping;
errseq_t f_wb_err;
errseq_t f_sb_err; /* for syncfs */
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_OEM_DATA(1);
} __randomize_layout
__attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
@@ -1011,6 +1030,9 @@ struct file_lock;
struct file_lock_operations {
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
void (*fl_release_private)(struct file_lock *);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
struct lock_manager_operations {
@@ -1022,6 +1044,9 @@ struct lock_manager_operations {
int (*lm_change)(struct file_lock *, int, struct list_head *);
void (*lm_setup)(struct file_lock *, void **);
bool (*lm_breaker_owns_lease)(struct file_lock *);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
struct lock_manager {
@@ -1095,6 +1120,10 @@ struct file_lock {
unsigned int debug_id;
} afs;
} fl_u;
struct list_head android_reserved1; /* not a macro as we might just need it as-is */
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
} __randomize_layout;
struct file_lock_context {
@@ -1547,6 +1576,11 @@ struct super_block {
spinlock_t s_inode_wblist_lock;
struct list_head s_inodes_wb; /* writeback inodes */
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
} __randomize_layout;
/* Helper functions so that in most cases filesystems will
@@ -1859,6 +1893,11 @@ struct file_operations {
struct file *file_out, loff_t pos_out,
loff_t len, unsigned int remap_flags);
int (*fadvise)(struct file *, loff_t, loff_t, int);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
} __randomize_layout;
struct inode_operations {
@@ -1889,6 +1928,11 @@ struct inode_operations {
umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
int (*set_acl)(struct inode *, struct posix_acl *, int);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
} ____cacheline_aligned;
static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio,
@@ -1965,6 +2009,11 @@ struct super_operations {
struct shrink_control *);
long (*free_cached_objects)(struct super_block *,
struct shrink_control *);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
/*
@@ -2252,6 +2301,11 @@ struct file_system_type {
struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
struct lock_class_key i_mutex_dir_key;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)

View File

@@ -70,6 +70,13 @@ struct fscrypt_operations {
int (*get_num_devices)(struct super_block *sb);
void (*get_devices)(struct super_block *sb,
struct request_queue **devs);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
ANDROID_OEM_DATA_ARRAY(1, 4);
};
static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)

View File

@@ -642,9 +642,24 @@ static inline bool pm_suspended_storage(void)
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_CONTIG_ALLOC
extern unsigned long pfn_max_align_up(unsigned long pfn);
#define ACR_ERR_ISOLATE (1 << 0)
#define ACR_ERR_MIGRATE (1 << 1)
#define ACR_ERR_TEST (1 << 2)
struct acr_info {
unsigned long nr_mapped;
unsigned long nr_migrated;
unsigned long nr_reclaimed;
unsigned int err;
unsigned long failed_pfn;
};
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask);
unsigned migratetype, gfp_t gfp_mask,
struct acr_info *info);
extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
int nid, nodemask_t *nodemask);
#endif

View File

@@ -10,6 +10,7 @@
#include <linux/lockdep.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/android_kabi.h>
struct gpio_desc;
struct of_phandle_args;
@@ -266,6 +267,9 @@ struct gpio_irq_chip {
* Store old irq_chip irq_mask callback
*/
void (*irq_mask)(struct irq_data *data);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
/**
@@ -469,6 +473,9 @@ struct gpio_chip {
int (*of_xlate)(struct gpio_chip *gc,
const struct of_phandle_args *gpiospec, u32 *flags);
#endif /* CONFIG_OF_GPIO */
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
extern const char *gpiochip_is_requested(struct gpio_chip *gc,

View File

@@ -7,6 +7,7 @@
#include <linux/time.h>
#include <linux/list.h>
#include <linux/android_kabi.h>
#include <uapi/linux/input.h>
/* Implementation details, userspace should not care about these */
#define ABS_MT_FIRST ABS_MT_TOUCH_MAJOR
@@ -201,6 +202,11 @@ struct input_dev {
bool devres_managed;
ktime_t timestamp[INPUT_CLK_MAX];
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
#define to_input_dev(d) container_of(d, struct input_dev, dev)
@@ -320,6 +326,8 @@ struct input_handler {
struct list_head h_list;
struct list_head node;
ANDROID_KABI_RESERVE(1);
};
/**
@@ -346,6 +354,8 @@ struct input_handle {
struct list_head d_node;
struct list_head h_node;
ANDROID_KABI_RESERVE(1);
};
struct input_dev __must_check *input_allocate_device(void);
@@ -550,6 +560,9 @@ struct ff_device {
int max_effects;
struct ff_effect *effects;
ANDROID_KABI_RESERVE(1);
struct file *effect_owners[];
};

View File

@@ -89,6 +89,8 @@ struct iomap {
void *inline_data;
void *private; /* filesystem private */
const struct iomap_page_ops *page_ops;
ANDROID_KABI_RESERVE(1);
};
static inline sector_t

View File

@@ -113,7 +113,7 @@ struct static_key {
#endif /* CONFIG_JUMP_LABEL */
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_JUMP_LABEL
#if defined(CONFIG_JUMP_LABEL) && !defined(BUILD_FIPS140_KO)
#include <asm/jump_label.h>
#ifndef __ASSEMBLY__
@@ -188,7 +188,28 @@ enum jump_label_type {
struct module;
#ifdef CONFIG_JUMP_LABEL
#ifdef BUILD_FIPS140_KO
static inline int static_key_count(struct static_key *key)
{
return atomic_read(&key->enabled);
}
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely(static_key_count(key) > 0))
return true;
return false;
}
static __always_inline bool static_key_true(struct static_key *key)
{
if (likely(static_key_count(key) > 0))
return true;
return false;
}
#elif defined(CONFIG_JUMP_LABEL)
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
@@ -393,7 +414,7 @@ extern bool ____wrong_branch_error(void);
static_key_count((struct static_key *)x) > 0; \
})
#ifdef CONFIG_JUMP_LABEL
#if defined(CONFIG_JUMP_LABEL) && !defined(BUILD_FIPS140_KO)
/*
* Combine the right initial value (type) with the right branch order

View File

@@ -157,6 +157,8 @@ struct kernfs_node {
unsigned short flags;
umode_t mode;
struct kernfs_iattrs *iattr;
ANDROID_KABI_RESERVE(1);
};
/*
@@ -198,6 +200,8 @@ struct kernfs_root {
struct list_head supers;
wait_queue_head_t deactivate_waitq;
ANDROID_KABI_RESERVE(1);
};
struct kernfs_open_file {
@@ -218,6 +222,8 @@ struct kernfs_open_file {
bool mmapped:1;
bool released:1;
const struct vm_operations_struct *vm_ops;
ANDROID_KABI_RESERVE(1);
};
struct kernfs_ops {

View File

@@ -10,6 +10,7 @@
#include <linux/key.h>
#include <linux/errno.h>
#include <linux/android_kabi.h>
#ifdef CONFIG_KEYS
@@ -155,6 +156,9 @@ struct key_type {
int (*asym_verify_signature)(struct kernel_pkey_params *params,
const void *in, const void *in2);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */

View File

@@ -331,6 +331,7 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
ANDROID_OEM_DATA(1);
struct mem_cgroup_per_node *nodeinfo[0];
/* WARNING: nodeinfo must be the last member here */
};

View File

@@ -598,6 +598,8 @@ struct mm_struct {
#ifdef CONFIG_IOMMU_SUPPORT
u32 pasid;
#endif
ANDROID_KABI_RESERVE(1);
} __randomize_layout;
/*

View File

@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/android_kabi.h>
struct mmc_cid {
unsigned int manfid;
@@ -236,6 +237,8 @@ struct mmc_part {
#define MMC_BLK_DATA_AREA_BOOT (1<<1)
#define MMC_BLK_DATA_AREA_GP (1<<2)
#define MMC_BLK_DATA_AREA_RPMB (1<<3)
ANDROID_KABI_RESERVE(1);
};
/*
@@ -314,6 +317,8 @@ struct mmc_card {
unsigned int bouncesz; /* Bounce buffer size */
struct workqueue_struct *complete_wq; /* Private workqueue */
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_VENDOR_DATA(1);
};

View File

@@ -16,6 +16,7 @@
#include <linux/mmc/pm.h>
#include <linux/dma-direction.h>
#include <linux/keyslot-manager.h>
#include <linux/android_kabi.h>
#include <linux/android_vendor.h>
@@ -176,6 +177,9 @@ struct mmc_host_ops {
*/
int (*multi_io_quirk)(struct mmc_card *card,
unsigned int direction, int blk_size);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
struct mmc_cqe_ops {
@@ -220,6 +224,9 @@ struct mmc_cqe_ops {
* will have zero data bytes transferred.
*/
void (*cqe_recovery_finish)(struct mmc_host *host);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
struct mmc_async_req {
@@ -485,6 +492,8 @@ struct mmc_host {
/* Host Software Queue support */
bool hsq_enabled;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_VENDOR_DATA(1);
ANDROID_OEM_DATA(1);

View File

@@ -778,6 +778,7 @@ typedef struct pglist_data {
int kswapd_failures; /* Number of 'reclaimed == 0' runs */
ANDROID_OEM_DATA(1);
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
enum zone_type kcompactd_highest_zoneidx;

View File

@@ -14,6 +14,7 @@
#include <linux/netfilter_defs.h>
#include <linux/netdevice.h>
#include <linux/sockptr.h>
#include <linux/android_kabi.h>
#include <net/net_namespace.h>
static inline int NF_DROP_GETERR(int verdict)
@@ -171,6 +172,8 @@ struct nf_sockopt_ops {
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
/* Use the module struct to lock set/get code in place */
struct module *owner;
ANDROID_KABI_RESERVE(1);
};
/* Function to register/unregister hook points. */
@@ -373,6 +376,8 @@ struct nf_nat_hook {
unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct,
enum nf_nat_manip_type mtype,
enum ip_conntrack_dir dir);
ANDROID_KABI_RESERVE(1);
};
extern struct nf_nat_hook __rcu *nf_nat_hook;
@@ -457,6 +462,8 @@ struct nf_ct_hook {
void (*destroy)(struct nf_conntrack *);
bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
const struct sk_buff *);
ANDROID_KABI_RESERVE(1);
};
extern struct nf_ct_hook __rcu *nf_ct_hook;
@@ -474,6 +481,8 @@ struct nfnl_ct_hook {
u32 portid, u32 report);
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, s32 off);
ANDROID_KABI_RESERVE(1);
};
extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;

View File

@@ -14,6 +14,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/stringify.h>
#include <linux/vmalloc.h>
#include <linux/android_kabi.h>
#include <net/netlink.h>
#include <uapi/linux/netfilter/ipset/ip_set.h>
@@ -190,6 +191,8 @@ struct ip_set_type_variant {
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
/* Region-locking is used */
bool region_lock;
ANDROID_KABI_RESERVE(1);
};
struct ip_set_region {
@@ -228,6 +231,8 @@ struct ip_set_type {
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
ANDROID_KABI_RESERVE(1);
};
/* register and unregister set type */
@@ -270,6 +275,8 @@ struct ip_set {
size_t offset[IPSET_EXT_ID_MAX];
/* The type specific data */
void *data;
ANDROID_KABI_RESERVE(1);
};
static inline void

View File

@@ -4,6 +4,7 @@
#include <linux/netlink.h>
#include <linux/capability.h>
#include <linux/android_kabi.h>
#include <net/netlink.h>
#include <uapi/linux/netfilter/nfnetlink.h>
@@ -22,6 +23,8 @@ struct nfnl_callback {
struct netlink_ext_ack *extack);
const struct nla_policy *policy; /* netlink attribute policy */
const u_int16_t attr_count; /* number of nlattr's */
ANDROID_KABI_RESERVE(1);
};
enum nfnl_abort_action {
@@ -41,6 +44,8 @@ struct nfnetlink_subsystem {
enum nfnl_abort_action action);
void (*cleanup)(struct net *net);
bool (*valid_genid)(struct net *net, u32 genid);
ANDROID_KABI_RESERVE(1);
};
int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);

View File

@@ -7,6 +7,7 @@
#ifndef __LINUX_IP6_NETFILTER_H
#define __LINUX_IP6_NETFILTER_H
#include <linux/android_kabi.h>
#include <uapi/linux/netfilter_ipv6.h>
#include <net/tcp.h>
@@ -65,6 +66,8 @@ struct nf_ipv6_ops {
const struct nf_bridge_frag_data *data,
struct sk_buff *));
#endif
ANDROID_KABI_RESERVE(1);
};
#ifdef CONFIG_NETFILTER

View File

@@ -44,7 +44,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
*/
int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned migratetype, int flags);
unsigned migratetype, int flags,
unsigned long *failed_pfn);
/*
* Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
@@ -58,7 +59,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
* Test all pages in [start_pfn, end_pfn) are isolated or not.
*/
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
int isol_flags);
int isol_flags, unsigned long *failed_pfn);
struct page *alloc_migrate_target(struct page *page, unsigned long private);

View File

@@ -558,6 +558,10 @@ struct pci_host_bridge {
resource_size_t start,
resource_size_t size,
resource_size_t align);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
unsigned long private[] ____cacheline_aligned;
};
@@ -745,6 +749,8 @@ struct pci_ops {
void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
ANDROID_KABI_RESERVE(1);
};
/*
@@ -820,6 +826,8 @@ struct pci_error_handlers {
/* Device driver may resume normal operations */
void (*resume)(struct pci_dev *dev);
ANDROID_KABI_RESERVE(1);
};

View File

@@ -27,6 +27,7 @@
#include <linux/irqreturn.h>
#include <linux/iopoll.h>
#include <linux/refcount.h>
#include <linux/android_kabi.h>
#include <linux/atomic.h>
@@ -645,6 +646,11 @@ struct phy_device {
/* MACsec management functions */
const struct macsec_ops *macsec_ops;
#endif
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
};
#define to_phy_device(d) container_of(to_mdio_device(d), \
struct phy_device, mdio)
@@ -875,6 +881,9 @@ struct phy_driver {
int (*get_sqi)(struct phy_device *dev);
/** @get_sqi_max: Get the maximum signal quality indication */
int (*get_sqi_max)(struct phy_device *dev);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
struct phy_driver, mdiodrv)

View File

@@ -11,6 +11,7 @@
#define _PLATFORM_DEVICE_H_
#include <linux/device.h>
#include <linux/android_kabi.h>
#define PLATFORM_DEVID_NONE (-1)
#define PLATFORM_DEVID_AUTO (-2)
@@ -37,6 +38,9 @@ struct platform_device {
/* arch specific additions */
struct pdev_archdata archdata;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
#define platform_get_device_id(pdev) ((pdev)->id_entry)
@@ -94,6 +98,8 @@ struct platform_device_info {
u64 dma_mask;
const struct property_entry *properties;
ANDROID_KABI_RESERVE(1);
};
extern struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo);
@@ -206,6 +212,8 @@ struct platform_driver {
struct device_driver driver;
const struct platform_device_id *id_table;
bool prevent_deferred_probe;
ANDROID_KABI_RESERVE(1);
};
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \

View File

@@ -15,6 +15,7 @@
#include <linux/timer.h>
#include <linux/hrtimer.h>
#include <linux/completion.h>
#include <linux/android_kabi.h>
/*
* Callbacks for platform drivers to implement.
@@ -299,6 +300,8 @@ struct dev_pm_ops {
int (*runtime_suspend)(struct device *dev);
int (*runtime_resume)(struct device *dev);
int (*runtime_idle)(struct device *dev);
ANDROID_KABI_RESERVE(1);
};
#ifdef CONFIG_PM_SLEEP
@@ -621,6 +624,9 @@ struct dev_pm_info {
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
void (*set_latency_tolerance)(struct device *, s32);
struct dev_pm_qos *qos;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
};
extern int dev_pm_get_subsys_data(struct device *dev);
@@ -647,6 +653,8 @@ struct dev_pm_domain {
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
void (*dismiss)(struct device *dev);
ANDROID_KABI_RESERVE(1);
};
/*

View File

@@ -17,6 +17,7 @@
#include <linux/leds.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/android_kabi.h>
/*
* All voltages, currents, charges, energies, time and temperatures in uV,
@@ -233,6 +234,8 @@ struct power_supply_config {
char **supplied_to;
size_t num_supplicants;
ANDROID_KABI_RESERVE(1);
};
/* Description of power supply */
@@ -274,6 +277,8 @@ struct power_supply_desc {
bool no_thermal;
/* For APM emulation, think legacy userspace. */
int use_for_apm;
ANDROID_KABI_RESERVE(1);
};
struct power_supply {
@@ -315,6 +320,8 @@ struct power_supply {
struct led_trigger *charging_blink_full_solid_trig;
char *charging_blink_full_solid_trig_name;
#endif
ANDROID_KABI_RESERVE(1);
};
/*
@@ -382,6 +389,8 @@ struct power_supply_battery_info {
int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
struct power_supply_resistance_temp_table *resist_table;
int resist_table_size;
ANDROID_KABI_RESERVE(1);
};
extern struct atomic_notifier_head power_supply_notifier;

View File

@@ -157,6 +157,7 @@ struct psi_group {
struct timer_list poll_timer;
wait_queue_head_t poll_wait;
atomic_t poll_wakeup;
atomic_t poll_scheduled;
/* Protects data used by the monitor */
struct mutex trigger_lock;

Some files were not shown because too many files have changed in this diff Show More