Merge "Merge keystone/android12-5.10-keystone-qcom-release.66+ (698fa19
) into msm-5.10"
This commit is contained in:

committed by
Gerrit - the friendly Code Review server

commit
20fbd1abd0
@@ -1 +1 @@
|
|||||||
672d51b2a7fa01a15c9d55f33884808a115db18d
|
a7ab784f601a93a78c1c22cd0aacc2af64d8e3c8
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -63,6 +63,7 @@
|
|||||||
bitmap_to_arr32
|
bitmap_to_arr32
|
||||||
__bitmap_weight
|
__bitmap_weight
|
||||||
blk_alloc_queue
|
blk_alloc_queue
|
||||||
|
blk_check_plugged
|
||||||
blk_cleanup_queue
|
blk_cleanup_queue
|
||||||
blkdev_get_by_dev
|
blkdev_get_by_dev
|
||||||
blkdev_put
|
blkdev_put
|
||||||
@@ -686,6 +687,7 @@
|
|||||||
drm_property_create_range
|
drm_property_create_range
|
||||||
drm_property_lookup_blob
|
drm_property_lookup_blob
|
||||||
drm_property_replace_blob
|
drm_property_replace_blob
|
||||||
|
drm_puts
|
||||||
__drm_puts_seq_file
|
__drm_puts_seq_file
|
||||||
drm_read
|
drm_read
|
||||||
drm_rect_clip_scaled
|
drm_rect_clip_scaled
|
||||||
@@ -1012,6 +1014,7 @@
|
|||||||
__kfifo_in
|
__kfifo_in
|
||||||
__kfifo_init
|
__kfifo_init
|
||||||
__kfifo_out
|
__kfifo_out
|
||||||
|
__kfifo_out_peek
|
||||||
kfree
|
kfree
|
||||||
kfree_sensitive
|
kfree_sensitive
|
||||||
kfree_skb
|
kfree_skb
|
||||||
@@ -1032,6 +1035,7 @@
|
|||||||
kobject_add
|
kobject_add
|
||||||
kobject_create_and_add
|
kobject_create_and_add
|
||||||
kobject_del
|
kobject_del
|
||||||
|
kobject_get
|
||||||
kobject_init
|
kobject_init
|
||||||
kobject_init_and_add
|
kobject_init_and_add
|
||||||
kobject_put
|
kobject_put
|
||||||
@@ -1079,6 +1083,7 @@
|
|||||||
ktime_get_real_seconds
|
ktime_get_real_seconds
|
||||||
ktime_get_real_ts64
|
ktime_get_real_ts64
|
||||||
ktime_get_seconds
|
ktime_get_seconds
|
||||||
|
ktime_get_snapshot
|
||||||
ktime_get_ts64
|
ktime_get_ts64
|
||||||
ktime_get_with_offset
|
ktime_get_with_offset
|
||||||
kvfree
|
kvfree
|
||||||
@@ -1590,6 +1595,7 @@
|
|||||||
rtc_valid_tm
|
rtc_valid_tm
|
||||||
__rt_mutex_init
|
__rt_mutex_init
|
||||||
rt_mutex_lock
|
rt_mutex_lock
|
||||||
|
rt_mutex_trylock
|
||||||
rt_mutex_unlock
|
rt_mutex_unlock
|
||||||
rtnl_is_locked
|
rtnl_is_locked
|
||||||
rtnl_lock
|
rtnl_lock
|
||||||
|
@@ -20,6 +20,9 @@
|
|||||||
bcmp
|
bcmp
|
||||||
bdput
|
bdput
|
||||||
__bitmap_and
|
__bitmap_and
|
||||||
|
blocking_notifier_call_chain
|
||||||
|
blocking_notifier_chain_register
|
||||||
|
blocking_notifier_chain_unregister
|
||||||
bpf_trace_run1
|
bpf_trace_run1
|
||||||
bpf_trace_run2
|
bpf_trace_run2
|
||||||
bpf_trace_run3
|
bpf_trace_run3
|
||||||
@@ -103,6 +106,7 @@
|
|||||||
debugfs_create_dir
|
debugfs_create_dir
|
||||||
debugfs_create_file
|
debugfs_create_file
|
||||||
debugfs_remove
|
debugfs_remove
|
||||||
|
default_llseek
|
||||||
deferred_free
|
deferred_free
|
||||||
delayed_work_timer_fn
|
delayed_work_timer_fn
|
||||||
del_timer
|
del_timer
|
||||||
@@ -161,6 +165,7 @@
|
|||||||
devm_phy_create
|
devm_phy_create
|
||||||
devm_phy_get
|
devm_phy_get
|
||||||
devm_pinctrl_get
|
devm_pinctrl_get
|
||||||
|
devm_pinctrl_put
|
||||||
devm_platform_ioremap_resource
|
devm_platform_ioremap_resource
|
||||||
__devm_regmap_init
|
__devm_regmap_init
|
||||||
__devm_regmap_init_i2c
|
__devm_regmap_init_i2c
|
||||||
@@ -202,6 +207,12 @@
|
|||||||
dma_buf_map_attachment
|
dma_buf_map_attachment
|
||||||
dma_buf_put
|
dma_buf_put
|
||||||
dma_buf_unmap_attachment
|
dma_buf_unmap_attachment
|
||||||
|
dma_fence_context_alloc
|
||||||
|
dma_fence_default_wait
|
||||||
|
dma_fence_init
|
||||||
|
dma_fence_release
|
||||||
|
dma_fence_signal_locked
|
||||||
|
dma_fence_wait_timeout
|
||||||
dma_free_attrs
|
dma_free_attrs
|
||||||
dma_heap_add
|
dma_heap_add
|
||||||
dma_heap_get_dev
|
dma_heap_get_dev
|
||||||
@@ -369,6 +380,7 @@
|
|||||||
event_triggers_call
|
event_triggers_call
|
||||||
extcon_set_state_sync
|
extcon_set_state_sync
|
||||||
failure_tracking
|
failure_tracking
|
||||||
|
fd_install
|
||||||
find_next_bit
|
find_next_bit
|
||||||
finish_wait
|
finish_wait
|
||||||
flush_work
|
flush_work
|
||||||
@@ -399,6 +411,7 @@
|
|||||||
get_cpu_device
|
get_cpu_device
|
||||||
get_device
|
get_device
|
||||||
get_random_bytes
|
get_random_bytes
|
||||||
|
get_unused_fd_flags
|
||||||
gic_nonsecure_priorities
|
gic_nonsecure_priorities
|
||||||
gpiochip_generic_free
|
gpiochip_generic_free
|
||||||
gpiochip_generic_request
|
gpiochip_generic_request
|
||||||
@@ -431,6 +444,7 @@
|
|||||||
i2c_recover_bus
|
i2c_recover_bus
|
||||||
i2c_register_driver
|
i2c_register_driver
|
||||||
i2c_smbus_read_byte_data
|
i2c_smbus_read_byte_data
|
||||||
|
i2c_smbus_write_byte_data
|
||||||
i2c_transfer
|
i2c_transfer
|
||||||
i2c_transfer_buffer_flags
|
i2c_transfer_buffer_flags
|
||||||
i2c_unregister_device
|
i2c_unregister_device
|
||||||
@@ -511,6 +525,7 @@
|
|||||||
__list_add_valid
|
__list_add_valid
|
||||||
__list_del_entry_valid
|
__list_del_entry_valid
|
||||||
__local_bh_enable_ip
|
__local_bh_enable_ip
|
||||||
|
__lock_page
|
||||||
__log_post_read_mmio
|
__log_post_read_mmio
|
||||||
__log_read_mmio
|
__log_read_mmio
|
||||||
__log_write_mmio
|
__log_write_mmio
|
||||||
@@ -532,6 +547,7 @@
|
|||||||
__memcpy_fromio
|
__memcpy_fromio
|
||||||
memdup_user
|
memdup_user
|
||||||
memmove
|
memmove
|
||||||
|
memparse
|
||||||
memset
|
memset
|
||||||
__memset_io
|
__memset_io
|
||||||
memstart_addr
|
memstart_addr
|
||||||
@@ -616,6 +632,7 @@
|
|||||||
of_get_next_child
|
of_get_next_child
|
||||||
of_get_parent
|
of_get_parent
|
||||||
of_get_property
|
of_get_property
|
||||||
|
of_get_regulator_init_data
|
||||||
of_graph_get_next_endpoint
|
of_graph_get_next_endpoint
|
||||||
of_graph_get_port_by_id
|
of_graph_get_port_by_id
|
||||||
of_graph_get_remote_node
|
of_graph_get_remote_node
|
||||||
@@ -883,6 +900,7 @@
|
|||||||
__stack_chk_fail
|
__stack_chk_fail
|
||||||
__stack_chk_guard
|
__stack_chk_guard
|
||||||
strcasecmp
|
strcasecmp
|
||||||
|
strcat
|
||||||
strcmp
|
strcmp
|
||||||
strcpy
|
strcpy
|
||||||
strlcpy
|
strlcpy
|
||||||
@@ -891,11 +909,14 @@
|
|||||||
strncpy
|
strncpy
|
||||||
strrchr
|
strrchr
|
||||||
strscpy
|
strscpy
|
||||||
|
strsep
|
||||||
strstr
|
strstr
|
||||||
__sw_hweight16
|
__sw_hweight16
|
||||||
__sw_hweight32
|
__sw_hweight32
|
||||||
__sw_hweight64
|
__sw_hweight64
|
||||||
__sw_hweight8
|
__sw_hweight8
|
||||||
|
sync_file_create
|
||||||
|
sync_file_get_fence
|
||||||
synchronize_irq
|
synchronize_irq
|
||||||
synchronize_net
|
synchronize_net
|
||||||
synchronize_rcu
|
synchronize_rcu
|
||||||
@@ -948,6 +969,7 @@
|
|||||||
uart_update_timeout
|
uart_update_timeout
|
||||||
uart_write_wakeup
|
uart_write_wakeup
|
||||||
__udelay
|
__udelay
|
||||||
|
unlock_page
|
||||||
__unregister_chrdev
|
__unregister_chrdev
|
||||||
unregister_chrdev_region
|
unregister_chrdev_region
|
||||||
unregister_inet6addr_notifier
|
unregister_inet6addr_notifier
|
||||||
@@ -1061,6 +1083,7 @@
|
|||||||
vmap
|
vmap
|
||||||
vsnprintf
|
vsnprintf
|
||||||
vunmap
|
vunmap
|
||||||
|
wait_for_completion
|
||||||
wait_for_completion_interruptible
|
wait_for_completion_interruptible
|
||||||
wait_for_completion_interruptible_timeout
|
wait_for_completion_interruptible_timeout
|
||||||
wait_for_completion_timeout
|
wait_for_completion_timeout
|
||||||
@@ -1252,9 +1275,6 @@
|
|||||||
mmc_cqe_request_done
|
mmc_cqe_request_done
|
||||||
|
|
||||||
# required by device_cooling.ko
|
# required by device_cooling.ko
|
||||||
blocking_notifier_call_chain
|
|
||||||
blocking_notifier_chain_register
|
|
||||||
blocking_notifier_chain_unregister
|
|
||||||
thermal_cooling_device_unregister
|
thermal_cooling_device_unregister
|
||||||
thermal_of_cooling_device_register
|
thermal_of_cooling_device_register
|
||||||
|
|
||||||
@@ -1286,6 +1306,29 @@
|
|||||||
devm_phy_optional_get
|
devm_phy_optional_get
|
||||||
drm_of_encoder_active_endpoint
|
drm_of_encoder_active_endpoint
|
||||||
|
|
||||||
|
# required by fb.ko
|
||||||
|
__arch_copy_in_user
|
||||||
|
compat_alloc_user_space
|
||||||
|
console_lock
|
||||||
|
console_unlock
|
||||||
|
fb_mode_option
|
||||||
|
file_update_time
|
||||||
|
file_write_and_wait_range
|
||||||
|
ignore_console_lock_warning
|
||||||
|
int_sqrt
|
||||||
|
is_console_locked
|
||||||
|
__memcpy_toio
|
||||||
|
of_get_videomode
|
||||||
|
page_mkclean
|
||||||
|
proc_create_seq_private
|
||||||
|
simple_strtol
|
||||||
|
vm_get_page_prot
|
||||||
|
vm_iomap_memory
|
||||||
|
|
||||||
|
# required by fb_fence.ko
|
||||||
|
put_unused_fd
|
||||||
|
system_unbound_wq
|
||||||
|
|
||||||
# required by fec.ko
|
# required by fec.ko
|
||||||
ethtool_op_get_ts_info
|
ethtool_op_get_ts_info
|
||||||
mdiobus_alloc_size
|
mdiobus_alloc_size
|
||||||
@@ -1332,6 +1375,12 @@
|
|||||||
tso_count_descs
|
tso_count_descs
|
||||||
tso_start
|
tso_start
|
||||||
|
|
||||||
|
# required by fp9931-core.ko
|
||||||
|
devm_mfd_add_devices
|
||||||
|
|
||||||
|
# required by fp9931-regulator.ko
|
||||||
|
gpiod_get_raw_value
|
||||||
|
|
||||||
# required by fsl-edma-v3.ko
|
# required by fsl-edma-v3.ko
|
||||||
dma_get_slave_channel
|
dma_get_slave_channel
|
||||||
of_dma_controller_free
|
of_dma_controller_free
|
||||||
@@ -1358,13 +1407,7 @@
|
|||||||
dev_pm_opp_add
|
dev_pm_opp_add
|
||||||
dev_pm_opp_remove
|
dev_pm_opp_remove
|
||||||
dma_fence_array_ops
|
dma_fence_array_ops
|
||||||
dma_fence_context_alloc
|
|
||||||
dma_fence_default_wait
|
|
||||||
dma_fence_init
|
|
||||||
dma_fence_release
|
|
||||||
dma_fence_signal
|
dma_fence_signal
|
||||||
dma_fence_signal_locked
|
|
||||||
dma_fence_wait_timeout
|
|
||||||
down
|
down
|
||||||
driver_create_file
|
driver_create_file
|
||||||
driver_remove_file
|
driver_remove_file
|
||||||
@@ -1372,10 +1415,8 @@
|
|||||||
drm_gem_object_lookup
|
drm_gem_object_lookup
|
||||||
drm_gem_object_release
|
drm_gem_object_release
|
||||||
drm_gem_private_object_init
|
drm_gem_private_object_init
|
||||||
fd_install
|
|
||||||
find_vma
|
find_vma
|
||||||
find_vpid
|
find_vpid
|
||||||
get_unused_fd_flags
|
|
||||||
get_user_pages
|
get_user_pages
|
||||||
hrtimer_resolution
|
hrtimer_resolution
|
||||||
iommu_attach_device
|
iommu_attach_device
|
||||||
@@ -1392,24 +1433,18 @@
|
|||||||
platform_bus_type
|
platform_bus_type
|
||||||
reset_control_reset
|
reset_control_reset
|
||||||
schedule_hrtimeout
|
schedule_hrtimeout
|
||||||
sync_file_create
|
|
||||||
sync_file_get_fence
|
|
||||||
__task_pid_nr_ns
|
__task_pid_nr_ns
|
||||||
_totalram_pages
|
_totalram_pages
|
||||||
vm_mmap
|
vm_mmap
|
||||||
vm_munmap
|
vm_munmap
|
||||||
vm_zone_stat
|
vm_zone_stat
|
||||||
|
|
||||||
# required by gmsl-max9286.ko
|
|
||||||
i2c_smbus_write_byte_data
|
|
||||||
|
|
||||||
# required by goodix.ko
|
# required by goodix.ko
|
||||||
gpiod_direction_input
|
gpiod_direction_input
|
||||||
input_alloc_absinfo
|
input_alloc_absinfo
|
||||||
input_mt_sync_frame
|
input_mt_sync_frame
|
||||||
touchscreen_parse_properties
|
touchscreen_parse_properties
|
||||||
touchscreen_report_pos
|
touchscreen_report_pos
|
||||||
wait_for_completion
|
|
||||||
|
|
||||||
# required by gpio-imx-rpmsg.ko
|
# required by gpio-imx-rpmsg.ko
|
||||||
__irq_alloc_descs
|
__irq_alloc_descs
|
||||||
@@ -1444,7 +1479,6 @@
|
|||||||
# required by gpio-regulator.ko
|
# required by gpio-regulator.ko
|
||||||
devm_kstrdup
|
devm_kstrdup
|
||||||
gpiod_count
|
gpiod_count
|
||||||
of_get_regulator_init_data
|
|
||||||
|
|
||||||
# required by gpio-reset.ko
|
# required by gpio-reset.ko
|
||||||
reset_controller_register
|
reset_controller_register
|
||||||
@@ -1762,11 +1796,9 @@
|
|||||||
sdio_writeb
|
sdio_writeb
|
||||||
sdio_writesb
|
sdio_writesb
|
||||||
skb_realloc_headroom
|
skb_realloc_headroom
|
||||||
strcat
|
|
||||||
strchr
|
strchr
|
||||||
strim
|
strim
|
||||||
strncasecmp
|
strncasecmp
|
||||||
strsep
|
|
||||||
vprintk
|
vprintk
|
||||||
wakeup_source_add
|
wakeup_source_add
|
||||||
wakeup_source_remove
|
wakeup_source_remove
|
||||||
@@ -1831,6 +1863,9 @@
|
|||||||
v4l2_m2m_dqbuf
|
v4l2_m2m_dqbuf
|
||||||
v4l2_m2m_qbuf
|
v4l2_m2m_qbuf
|
||||||
|
|
||||||
|
# required by mxc_epdc_v2_fb.ko
|
||||||
|
fb_get_options
|
||||||
|
|
||||||
# required by mxs-dma.ko
|
# required by mxs-dma.ko
|
||||||
dmaenginem_async_device_register
|
dmaenginem_async_device_register
|
||||||
tasklet_setup
|
tasklet_setup
|
||||||
@@ -1868,9 +1903,6 @@
|
|||||||
v4l2_event_subdev_unsubscribe
|
v4l2_event_subdev_unsubscribe
|
||||||
__v4l2_find_nearest_size
|
__v4l2_find_nearest_size
|
||||||
|
|
||||||
# required by ov5640_camera_mipi_v2.ko
|
|
||||||
devm_pinctrl_put
|
|
||||||
|
|
||||||
# required by panel-raydium-rm67191.ko
|
# required by panel-raydium-rm67191.ko
|
||||||
devm_backlight_device_register
|
devm_backlight_device_register
|
||||||
mipi_dsi_dcs_get_display_brightness
|
mipi_dsi_dcs_get_display_brightness
|
||||||
@@ -2125,7 +2157,6 @@
|
|||||||
snd_interval_refine
|
snd_interval_refine
|
||||||
|
|
||||||
# required by snd-soc-imx-audmux.ko
|
# required by snd-soc-imx-audmux.ko
|
||||||
default_llseek
|
|
||||||
simple_open
|
simple_open
|
||||||
simple_read_from_buffer
|
simple_read_from_buffer
|
||||||
|
|
||||||
@@ -2339,7 +2370,6 @@
|
|||||||
fsync_bdev
|
fsync_bdev
|
||||||
__get_free_pages
|
__get_free_pages
|
||||||
__init_rwsem
|
__init_rwsem
|
||||||
memparse
|
|
||||||
memset64
|
memset64
|
||||||
__num_online_cpus
|
__num_online_cpus
|
||||||
page_endio
|
page_endio
|
||||||
@@ -2362,7 +2392,6 @@
|
|||||||
kern_mount
|
kern_mount
|
||||||
kern_unmount
|
kern_unmount
|
||||||
kill_anon_super
|
kill_anon_super
|
||||||
__lock_page
|
|
||||||
page_mapping
|
page_mapping
|
||||||
_raw_read_lock
|
_raw_read_lock
|
||||||
_raw_read_unlock
|
_raw_read_unlock
|
||||||
@@ -2370,5 +2399,4 @@
|
|||||||
_raw_write_unlock
|
_raw_write_unlock
|
||||||
register_shrinker
|
register_shrinker
|
||||||
__SetPageMovable
|
__SetPageMovable
|
||||||
unlock_page
|
|
||||||
unregister_shrinker
|
unregister_shrinker
|
||||||
|
@@ -3327,6 +3327,7 @@
|
|||||||
woken_wake_function
|
woken_wake_function
|
||||||
work_busy
|
work_busy
|
||||||
work_on_cpu
|
work_on_cpu
|
||||||
|
wq_worker_comm
|
||||||
ww_mutex_lock
|
ww_mutex_lock
|
||||||
ww_mutex_unlock
|
ww_mutex_unlock
|
||||||
__xa_alloc
|
__xa_alloc
|
||||||
|
@@ -899,6 +899,7 @@
|
|||||||
finish_wait
|
finish_wait
|
||||||
firmware_request_nowarn
|
firmware_request_nowarn
|
||||||
flush_dcache_page
|
flush_dcache_page
|
||||||
|
flush_delayed_fput
|
||||||
flush_delayed_work
|
flush_delayed_work
|
||||||
__flush_icache_range
|
__flush_icache_range
|
||||||
flush_work
|
flush_work
|
||||||
@@ -1104,6 +1105,7 @@
|
|||||||
idr_replace
|
idr_replace
|
||||||
iio_channel_get_all
|
iio_channel_get_all
|
||||||
iio_read_channel_processed
|
iio_read_channel_processed
|
||||||
|
iio_write_channel_raw
|
||||||
import_iovec
|
import_iovec
|
||||||
in4_pton
|
in4_pton
|
||||||
in6_pton
|
in6_pton
|
||||||
@@ -2613,6 +2615,7 @@
|
|||||||
__tracepoint_android_rvh_cpu_cgroup_online
|
__tracepoint_android_rvh_cpu_cgroup_online
|
||||||
__tracepoint_android_rvh_cpufreq_transition
|
__tracepoint_android_rvh_cpufreq_transition
|
||||||
__tracepoint_android_rvh_dequeue_task
|
__tracepoint_android_rvh_dequeue_task
|
||||||
|
__tracepoint_android_rvh_do_ptrauth_fault
|
||||||
__tracepoint_android_rvh_do_sched_yield
|
__tracepoint_android_rvh_do_sched_yield
|
||||||
__tracepoint_android_rvh_enqueue_task
|
__tracepoint_android_rvh_enqueue_task
|
||||||
__tracepoint_android_rvh_find_busiest_queue
|
__tracepoint_android_rvh_find_busiest_queue
|
||||||
@@ -2696,6 +2699,7 @@
|
|||||||
__tracepoint_android_vh_show_max_freq
|
__tracepoint_android_vh_show_max_freq
|
||||||
__tracepoint_android_vh_show_resume_epoch_val
|
__tracepoint_android_vh_show_resume_epoch_val
|
||||||
__tracepoint_android_vh_show_suspend_epoch_val
|
__tracepoint_android_vh_show_suspend_epoch_val
|
||||||
|
__tracepoint_android_vh_subpage_dma_contig_alloc
|
||||||
__tracepoint_android_vh_timer_calc_index
|
__tracepoint_android_vh_timer_calc_index
|
||||||
__tracepoint_android_vh_ufs_check_int_errors
|
__tracepoint_android_vh_ufs_check_int_errors
|
||||||
__tracepoint_android_vh_ufs_clock_scaling
|
__tracepoint_android_vh_ufs_clock_scaling
|
||||||
|
@@ -4587,23 +4587,20 @@ static int binder_thread_release(struct binder_proc *proc,
|
|||||||
__release(&t->lock);
|
__release(&t->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If this thread used poll, make sure we remove the waitqueue
|
* If this thread used poll, make sure we remove the waitqueue from any
|
||||||
* from any epoll data structures holding it with POLLFREE.
|
* poll data structures holding it.
|
||||||
* waitqueue_active() is safe to use here because we're holding
|
|
||||||
* the inner lock.
|
|
||||||
*/
|
*/
|
||||||
if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
|
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
||||||
waitqueue_active(&thread->wait)) {
|
wake_up_pollfree(&thread->wait);
|
||||||
wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
|
|
||||||
}
|
|
||||||
|
|
||||||
binder_inner_proc_unlock(thread->proc);
|
binder_inner_proc_unlock(thread->proc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is needed to avoid races between wake_up_poll() above and
|
* This is needed to avoid races between wake_up_pollfree() above and
|
||||||
* and ep_remove_waitqueue() called for other reasons (eg the epoll file
|
* someone else removing the last entry from the queue for other reasons
|
||||||
* descriptor being closed); ep_remove_waitqueue() holds an RCU read
|
* (e.g. ep_remove_wait_queue() being called due to an epoll file
|
||||||
* lock, so we can be sure it's done after calling synchronize_rcu().
|
* descriptor being closed). Such other users hold an RCU read lock, so
|
||||||
|
* we can be sure they're done after we call synchronize_rcu().
|
||||||
*/
|
*/
|
||||||
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
@@ -110,6 +110,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake);
|
|||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_finished);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_finished);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_rwsem_list_add);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_rwsem_list_add);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_futex_plist_add);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_futex_plist_add);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_sleep_start);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start);
|
||||||
@@ -400,3 +401,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_offline);
|
|||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_online);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_online);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_free);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_free);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_alloc);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_alloc);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_subpage_dma_contig_alloc);
|
||||||
|
@@ -3469,6 +3469,14 @@ static int __clk_core_init(struct clk_core *core)
|
|||||||
|
|
||||||
clk_prepare_lock();
|
clk_prepare_lock();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set hw->core after grabbing the prepare_lock to synchronize with
|
||||||
|
* callers of clk_core_fill_parent_index() where we treat hw->core
|
||||||
|
* being NULL as the clk not being registered yet. This is crucial so
|
||||||
|
* that clks aren't parented until their parent is fully registered.
|
||||||
|
*/
|
||||||
|
core->hw->core = core;
|
||||||
|
|
||||||
ret = clk_pm_runtime_get(core);
|
ret = clk_pm_runtime_get(core);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
@@ -3640,8 +3648,10 @@ static int __clk_core_init(struct clk_core *core)
|
|||||||
out:
|
out:
|
||||||
clk_pm_runtime_put(core);
|
clk_pm_runtime_put(core);
|
||||||
unlock:
|
unlock:
|
||||||
if (ret)
|
if (ret) {
|
||||||
hlist_del_init(&core->child_node);
|
hlist_del_init(&core->child_node);
|
||||||
|
core->hw->core = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
clk_prepare_unlock();
|
clk_prepare_unlock();
|
||||||
|
|
||||||
@@ -3905,7 +3915,6 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
|
|||||||
core->num_parents = init->num_parents;
|
core->num_parents = init->num_parents;
|
||||||
core->min_rate = 0;
|
core->min_rate = 0;
|
||||||
core->max_rate = ULONG_MAX;
|
core->max_rate = ULONG_MAX;
|
||||||
hw->core = core;
|
|
||||||
|
|
||||||
ret = clk_core_populate_parent_map(core, init);
|
ret = clk_core_populate_parent_map(core, init);
|
||||||
if (ret)
|
if (ret)
|
||||||
@@ -3923,7 +3932,7 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
|
|||||||
goto fail_create_clk;
|
goto fail_create_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
clk_core_link_consumer(hw->core, hw->clk);
|
clk_core_link_consumer(core, hw->clk);
|
||||||
|
|
||||||
ret = __clk_core_init(core);
|
ret = __clk_core_init(core);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
|
@@ -350,29 +350,18 @@ out:
|
|||||||
|
|
||||||
static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
|
static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
|
||||||
{
|
{
|
||||||
long ret = 0;
|
|
||||||
|
|
||||||
dma_resv_lock(dmabuf->resv, NULL);
|
|
||||||
if (!list_empty(&dmabuf->attachments)) {
|
|
||||||
ret = -EBUSY;
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
spin_lock(&dmabuf->name_lock);
|
spin_lock(&dmabuf->name_lock);
|
||||||
kfree(dmabuf->name);
|
kfree(dmabuf->name);
|
||||||
dmabuf->name = name;
|
dmabuf->name = name;
|
||||||
spin_unlock(&dmabuf->name_lock);
|
spin_unlock(&dmabuf->name_lock);
|
||||||
|
|
||||||
out_unlock:
|
return 0;
|
||||||
dma_resv_unlock(dmabuf->resv);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
|
* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
|
||||||
* The name of the dma-buf buffer can only be set when the dma-buf is not
|
* It could support changing the name of the dma-buf if the same piece of
|
||||||
* attached to any devices. It could theoritically support changing the
|
* memory is used for multiple purpose between different devices.
|
||||||
* name of the dma-buf if the same piece of memory is used for multiple
|
|
||||||
* purpose between different devices.
|
|
||||||
*
|
*
|
||||||
* @dmabuf: [in] dmabuf buffer that will be renamed.
|
* @dmabuf: [in] dmabuf buffer that will be renamed.
|
||||||
* @buf: [in] A piece of userspace memory that contains the name of
|
* @buf: [in] A piece of userspace memory that contains the name of
|
||||||
|
@@ -207,14 +207,14 @@ config HID_CHERRY
|
|||||||
|
|
||||||
config HID_CHICONY
|
config HID_CHICONY
|
||||||
tristate "Chicony devices"
|
tristate "Chicony devices"
|
||||||
depends on HID
|
depends on USB_HID
|
||||||
default !EXPERT
|
default !EXPERT
|
||||||
help
|
help
|
||||||
Support for Chicony Tactical pad and special keys on Chicony keyboards.
|
Support for Chicony Tactical pad and special keys on Chicony keyboards.
|
||||||
|
|
||||||
config HID_CORSAIR
|
config HID_CORSAIR
|
||||||
tristate "Corsair devices"
|
tristate "Corsair devices"
|
||||||
depends on HID && USB && LEDS_CLASS
|
depends on USB_HID && LEDS_CLASS
|
||||||
help
|
help
|
||||||
Support for Corsair devices that are not fully compliant with the
|
Support for Corsair devices that are not fully compliant with the
|
||||||
HID standard.
|
HID standard.
|
||||||
@@ -245,7 +245,7 @@ config HID_MACALLY
|
|||||||
|
|
||||||
config HID_PRODIKEYS
|
config HID_PRODIKEYS
|
||||||
tristate "Prodikeys PC-MIDI Keyboard support"
|
tristate "Prodikeys PC-MIDI Keyboard support"
|
||||||
depends on HID && SND
|
depends on USB_HID && SND
|
||||||
select SND_RAWMIDI
|
select SND_RAWMIDI
|
||||||
help
|
help
|
||||||
Support for Prodikeys PC-MIDI Keyboard device support.
|
Support for Prodikeys PC-MIDI Keyboard device support.
|
||||||
@@ -541,7 +541,7 @@ config HID_LENOVO
|
|||||||
|
|
||||||
config HID_LOGITECH
|
config HID_LOGITECH
|
||||||
tristate "Logitech devices"
|
tristate "Logitech devices"
|
||||||
depends on HID
|
depends on USB_HID
|
||||||
depends on LEDS_CLASS
|
depends on LEDS_CLASS
|
||||||
default !EXPERT
|
default !EXPERT
|
||||||
help
|
help
|
||||||
@@ -918,7 +918,7 @@ config HID_SAITEK
|
|||||||
|
|
||||||
config HID_SAMSUNG
|
config HID_SAMSUNG
|
||||||
tristate "Samsung InfraRed remote control or keyboards"
|
tristate "Samsung InfraRed remote control or keyboards"
|
||||||
depends on HID
|
depends on USB_HID
|
||||||
help
|
help
|
||||||
Support for Samsung InfraRed remote control or keyboards.
|
Support for Samsung InfraRed remote control or keyboards.
|
||||||
|
|
||||||
|
@@ -918,8 +918,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||||||
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
|
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
|
||||||
drvdata->tp = &asus_i2c_tp;
|
drvdata->tp = &asus_i2c_tp;
|
||||||
|
|
||||||
if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
|
if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
|
||||||
hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
|
||||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||||
|
|
||||||
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
|
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
|
||||||
@@ -947,8 +946,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||||||
drvdata->tp = &asus_t100chi_tp;
|
drvdata->tp = &asus_t100chi_tp;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
|
if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) {
|
||||||
hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
|
||||||
struct usb_host_interface *alt =
|
struct usb_host_interface *alt =
|
||||||
to_usb_interface(hdev->dev.parent)->altsetting;
|
to_usb_interface(hdev->dev.parent)->altsetting;
|
||||||
|
|
||||||
|
@@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work)
|
|||||||
struct bigben_device, worker);
|
struct bigben_device, worker);
|
||||||
struct hid_field *report_field = bigben->report->field[0];
|
struct hid_field *report_field = bigben->report->field[0];
|
||||||
|
|
||||||
if (bigben->removed)
|
if (bigben->removed || !report_field)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (bigben->work_led) {
|
if (bigben->work_led) {
|
||||||
|
@@ -58,8 +58,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
|||||||
static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||||
unsigned int *rsize)
|
unsigned int *rsize)
|
||||||
{
|
{
|
||||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
struct usb_interface *intf;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return rdesc;
|
||||||
|
|
||||||
|
intf = to_usb_interface(hdev->dev.parent);
|
||||||
if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
|
if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
|
||||||
/* Change usage maximum and logical maximum from 0x7fff to
|
/* Change usage maximum and logical maximum from 0x7fff to
|
||||||
* 0x2fff, so they don't exceed HID_MAX_USAGES */
|
* 0x2fff, so they don't exceed HID_MAX_USAGES */
|
||||||
|
@@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
|
|||||||
int ret;
|
int ret;
|
||||||
unsigned long quirks = id->driver_data;
|
unsigned long quirks = id->driver_data;
|
||||||
struct corsair_drvdata *drvdata;
|
struct corsair_drvdata *drvdata;
|
||||||
struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
|
struct usb_interface *usbif;
|
||||||
|
|
||||||
|
if (!hid_is_usb(dev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
usbif = to_usb_interface(dev->dev.parent);
|
||||||
|
|
||||||
drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
|
drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
|
@@ -50,7 +50,7 @@ struct elan_drvdata {
|
|||||||
|
|
||||||
static int is_not_elan_touchpad(struct hid_device *hdev)
|
static int is_not_elan_touchpad(struct hid_device *hdev)
|
||||||
{
|
{
|
||||||
if (hdev->bus == BUS_USB) {
|
if (hid_is_usb(hdev)) {
|
||||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||||
|
|
||||||
return (intf->altsetting->desc.bInterfaceNumber !=
|
return (intf->altsetting->desc.bInterfaceNumber !=
|
||||||
|
@@ -229,6 +229,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||||||
struct elo_priv *priv;
|
struct elo_priv *priv;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||||
if (!priv)
|
if (!priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
|
|||||||
static int holtek_kbd_probe(struct hid_device *hdev,
|
static int holtek_kbd_probe(struct hid_device *hdev,
|
||||||
const struct hid_device_id *id)
|
const struct hid_device_id *id)
|
||||||
{
|
{
|
||||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
struct usb_interface *intf;
|
||||||
int ret = hid_parse(hdev);
|
int ret;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = hid_parse(hdev);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
|
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
|
||||||
|
|
||||||
|
intf = to_usb_interface(hdev->dev.parent);
|
||||||
if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
|
if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
|
||||||
struct hid_input *hidinput;
|
struct hid_input *hidinput;
|
||||||
list_for_each_entry(hidinput, &hdev->inputs, list) {
|
list_for_each_entry(hidinput, &hdev->inputs, list) {
|
||||||
|
@@ -62,6 +62,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||||||
return rdesc;
|
return rdesc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int holtek_mouse_probe(struct hid_device *hdev,
|
||||||
|
const struct hid_device_id *id)
|
||||||
|
{
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct hid_device_id holtek_mouse_devices[] = {
|
static const struct hid_device_id holtek_mouse_devices[] = {
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
|
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
|
||||||
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
|
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
|
||||||
@@ -83,6 +91,7 @@ static struct hid_driver holtek_mouse_driver = {
|
|||||||
.name = "holtek_mouse",
|
.name = "holtek_mouse",
|
||||||
.id_table = holtek_mouse_devices,
|
.id_table = holtek_mouse_devices,
|
||||||
.report_fixup = holtek_mouse_report_fixup,
|
.report_fixup = holtek_mouse_report_fixup,
|
||||||
|
.probe = holtek_mouse_probe,
|
||||||
};
|
};
|
||||||
|
|
||||||
module_hid_driver(holtek_mouse_driver);
|
module_hid_driver(holtek_mouse_driver);
|
||||||
|
@@ -769,12 +769,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
|
|||||||
|
|
||||||
static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||||
{
|
{
|
||||||
struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
|
struct usb_interface *iface;
|
||||||
__u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
|
__u8 iface_num;
|
||||||
unsigned int connect_mask = HID_CONNECT_DEFAULT;
|
unsigned int connect_mask = HID_CONNECT_DEFAULT;
|
||||||
struct lg_drv_data *drv_data;
|
struct lg_drv_data *drv_data;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
iface = to_usb_interface(hdev->dev.parent);
|
||||||
|
iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
|
||||||
|
|
||||||
/* G29 only work with the 1st interface */
|
/* G29 only work with the 1st interface */
|
||||||
if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
|
if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
|
||||||
(iface_num != 0)) {
|
(iface_num != 0)) {
|
||||||
|
@@ -1693,7 +1693,7 @@ static int logi_dj_probe(struct hid_device *hdev,
|
|||||||
case recvr_type_27mhz: no_dj_interfaces = 2; break;
|
case recvr_type_27mhz: no_dj_interfaces = 2; break;
|
||||||
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
|
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
|
||||||
}
|
}
|
||||||
if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
if (hid_is_usb(hdev)) {
|
||||||
intf = to_usb_interface(hdev->dev.parent);
|
intf = to_usb_interface(hdev->dev.parent);
|
||||||
if (intf && intf->altsetting->desc.bInterfaceNumber >=
|
if (intf && intf->altsetting->desc.bInterfaceNumber >=
|
||||||
no_dj_interfaces) {
|
no_dj_interfaces) {
|
||||||
|
@@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
|
|||||||
static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
struct usb_interface *intf;
|
||||||
unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
|
unsigned short ifnum;
|
||||||
unsigned long quirks = id->driver_data;
|
unsigned long quirks = id->driver_data;
|
||||||
struct pk_device *pk;
|
struct pk_device *pk;
|
||||||
struct pcmidi_snd *pm = NULL;
|
struct pcmidi_snd *pm = NULL;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
intf = to_usb_interface(hdev->dev.parent);
|
||||||
|
ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
|
||||||
|
|
||||||
pk = kzalloc(sizeof(*pk), GFP_KERNEL);
|
pk = kzalloc(sizeof(*pk), GFP_KERNEL);
|
||||||
if (pk == NULL) {
|
if (pk == NULL) {
|
||||||
hid_err(hdev, "can't alloc descriptor\n");
|
hid_err(hdev, "can't alloc descriptor\n");
|
||||||
|
@@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev,
|
|||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
retval = hid_parse(hdev);
|
retval = hid_parse(hdev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev,
|
|||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
retval = hid_parse(hdev);
|
retval = hid_parse(hdev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
retval = hid_parse(hdev);
|
retval = hid_parse(hdev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev,
|
|||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
retval = hid_parse(hdev);
|
retval = hid_parse(hdev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev,
|
|||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
retval = hid_parse(hdev);
|
retval = hid_parse(hdev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev,
|
|||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
retval = hid_parse(hdev);
|
retval = hid_parse(hdev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev,
|
|||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
retval = hid_parse(hdev);
|
retval = hid_parse(hdev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
retval = hid_parse(hdev);
|
retval = hid_parse(hdev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev,
|
|||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
retval = hid_parse(hdev);
|
retval = hid_parse(hdev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev,
|
|||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
retval = hid_parse(hdev);
|
retval = hid_parse(hdev);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev,
|
|||||||
int ret;
|
int ret;
|
||||||
unsigned int cmask = HID_CONNECT_DEFAULT;
|
unsigned int cmask = HID_CONNECT_DEFAULT;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
ret = hid_parse(hdev);
|
ret = hid_parse(hdev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
hid_err(hdev, "parse failed\n");
|
hid_err(hdev, "parse failed\n");
|
||||||
|
@@ -286,7 +286,7 @@ static int u2fzero_probe(struct hid_device *hdev,
|
|||||||
unsigned int minor;
|
unsigned int minor;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
|
if (!hid_is_usb(hdev))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
|
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
|
||||||
|
@@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev,
|
|||||||
struct uclogic_drvdata *drvdata = NULL;
|
struct uclogic_drvdata *drvdata = NULL;
|
||||||
bool params_initialized = false;
|
bool params_initialized = false;
|
||||||
|
|
||||||
|
if (!hid_is_usb(hdev))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* libinput requires the pad interface to be on a different node
|
* libinput requires the pad interface to be on a different node
|
||||||
* than the pen, so use QUIRK_MULTI_INPUT for all tablets.
|
* than the pen, so use QUIRK_MULTI_INPUT for all tablets.
|
||||||
|
@@ -841,8 +841,7 @@ int uclogic_params_init(struct uclogic_params *params,
|
|||||||
struct uclogic_params p = {0, };
|
struct uclogic_params p = {0, };
|
||||||
|
|
||||||
/* Check arguments */
|
/* Check arguments */
|
||||||
if (params == NULL || hdev == NULL ||
|
if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) {
|
||||||
!hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
@@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
|
|||||||
* Skip the query for this type and modify defaults based on
|
* Skip the query for this type and modify defaults based on
|
||||||
* interface number.
|
* interface number.
|
||||||
*/
|
*/
|
||||||
if (features->type == WIRELESS) {
|
if (features->type == WIRELESS && intf) {
|
||||||
if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
|
if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
|
||||||
features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
|
features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
|
||||||
else
|
else
|
||||||
@@ -2217,7 +2217,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
|
|||||||
if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
|
if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
|
||||||
char *product_name = wacom->hdev->name;
|
char *product_name = wacom->hdev->name;
|
||||||
|
|
||||||
if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
|
if (hid_is_usb(wacom->hdev)) {
|
||||||
struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
|
struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
|
||||||
struct usb_device *dev = interface_to_usbdev(intf);
|
struct usb_device *dev = interface_to_usbdev(intf);
|
||||||
product_name = dev->product;
|
product_name = dev->product;
|
||||||
@@ -2448,6 +2448,9 @@ static void wacom_wireless_work(struct work_struct *work)
|
|||||||
|
|
||||||
wacom_destroy_battery(wacom);
|
wacom_destroy_battery(wacom);
|
||||||
|
|
||||||
|
if (!usbdev)
|
||||||
|
return;
|
||||||
|
|
||||||
/* Stylus interface */
|
/* Stylus interface */
|
||||||
hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
|
hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
|
||||||
wacom1 = hid_get_drvdata(hdev1);
|
wacom1 = hid_get_drvdata(hdev1);
|
||||||
@@ -2727,8 +2730,6 @@ static void wacom_mode_change_work(struct work_struct *work)
|
|||||||
static int wacom_probe(struct hid_device *hdev,
|
static int wacom_probe(struct hid_device *hdev,
|
||||||
const struct hid_device_id *id)
|
const struct hid_device_id *id)
|
||||||
{
|
{
|
||||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
|
||||||
struct usb_device *dev = interface_to_usbdev(intf);
|
|
||||||
struct wacom *wacom;
|
struct wacom *wacom;
|
||||||
struct wacom_wac *wacom_wac;
|
struct wacom_wac *wacom_wac;
|
||||||
struct wacom_features *features;
|
struct wacom_features *features;
|
||||||
@@ -2763,8 +2764,14 @@ static int wacom_probe(struct hid_device *hdev,
|
|||||||
wacom_wac->hid_data.inputmode = -1;
|
wacom_wac->hid_data.inputmode = -1;
|
||||||
wacom_wac->mode_report = -1;
|
wacom_wac->mode_report = -1;
|
||||||
|
|
||||||
wacom->usbdev = dev;
|
if (hid_is_usb(hdev)) {
|
||||||
wacom->intf = intf;
|
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||||
|
struct usb_device *dev = interface_to_usbdev(intf);
|
||||||
|
|
||||||
|
wacom->usbdev = dev;
|
||||||
|
wacom->intf = intf;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_init(&wacom->lock);
|
mutex_init(&wacom->lock);
|
||||||
INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
|
INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
|
||||||
INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
|
INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
|
||||||
|
@@ -5180,6 +5180,19 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
|||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Release the resources allocated for processing a SCSI command. */
|
||||||
|
static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
|
||||||
|
struct ufshcd_lrb *lrbp)
|
||||||
|
{
|
||||||
|
struct scsi_cmnd *cmd = lrbp->cmd;
|
||||||
|
|
||||||
|
scsi_dma_unmap(cmd);
|
||||||
|
ufshcd_crypto_clear_prdt(hba, lrbp);
|
||||||
|
lrbp->cmd = NULL; /* Mark the command as completed. */
|
||||||
|
ufshcd_release(hba);
|
||||||
|
ufshcd_clk_scaling_update_busy(hba);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
|
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
|
||||||
* @hba: per adapter instance
|
* @hba: per adapter instance
|
||||||
@@ -5190,9 +5203,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
|||||||
{
|
{
|
||||||
struct ufshcd_lrb *lrbp;
|
struct ufshcd_lrb *lrbp;
|
||||||
struct scsi_cmnd *cmd;
|
struct scsi_cmnd *cmd;
|
||||||
int result;
|
|
||||||
int index;
|
int index;
|
||||||
bool update_scaling = false;
|
|
||||||
|
|
||||||
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
|
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
|
||||||
if (!test_and_clear_bit(index, &hba->outstanding_reqs))
|
if (!test_and_clear_bit(index, &hba->outstanding_reqs))
|
||||||
@@ -5205,16 +5216,10 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
|||||||
ufshcd_update_monitor(hba, lrbp);
|
ufshcd_update_monitor(hba, lrbp);
|
||||||
trace_android_vh_ufs_compl_command(hba, lrbp);
|
trace_android_vh_ufs_compl_command(hba, lrbp);
|
||||||
ufshcd_add_command_trace(hba, index, "complete");
|
ufshcd_add_command_trace(hba, index, "complete");
|
||||||
result = ufshcd_transfer_rsp_status(hba, lrbp);
|
cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
|
||||||
scsi_dma_unmap(cmd);
|
ufshcd_release_scsi_cmd(hba, lrbp);
|
||||||
cmd->result = result;
|
|
||||||
ufshcd_crypto_clear_prdt(hba, lrbp);
|
|
||||||
/* Mark completed command as NULL in LRB */
|
|
||||||
lrbp->cmd = NULL;
|
|
||||||
/* Do not touch lrbp after scsi done */
|
/* Do not touch lrbp after scsi done */
|
||||||
cmd->scsi_done(cmd);
|
cmd->scsi_done(cmd);
|
||||||
ufshcd_release(hba);
|
|
||||||
update_scaling = true;
|
|
||||||
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
|
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
|
||||||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
|
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
|
||||||
if (hba->dev_cmd.complete) {
|
if (hba->dev_cmd.complete) {
|
||||||
@@ -5222,11 +5227,9 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
|||||||
ufshcd_add_command_trace(hba, index,
|
ufshcd_add_command_trace(hba, index,
|
||||||
"dev_complete");
|
"dev_complete");
|
||||||
complete(hba->dev_cmd.complete);
|
complete(hba->dev_cmd.complete);
|
||||||
update_scaling = true;
|
ufshcd_clk_scaling_update_busy(hba);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (update_scaling)
|
|
||||||
ufshcd_clk_scaling_update_busy(hba);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -6366,9 +6369,8 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
|
|||||||
irqreturn_t ret = IRQ_NONE;
|
irqreturn_t ret = IRQ_NONE;
|
||||||
int tag;
|
int tag;
|
||||||
|
|
||||||
pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
|
|
||||||
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
|
pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
|
||||||
issued = hba->outstanding_tasks & ~pending;
|
issued = hba->outstanding_tasks & ~pending;
|
||||||
for_each_set_bit(tag, &issued, hba->nutmrs) {
|
for_each_set_bit(tag, &issued, hba->nutmrs) {
|
||||||
struct request *req = tmf_rqs[tag];
|
struct request *req = tmf_rqs[tag];
|
||||||
@@ -6529,11 +6531,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
|
|||||||
err = wait_for_completion_io_timeout(&wait,
|
err = wait_for_completion_io_timeout(&wait,
|
||||||
msecs_to_jiffies(TM_CMD_TIMEOUT));
|
msecs_to_jiffies(TM_CMD_TIMEOUT));
|
||||||
if (!err) {
|
if (!err) {
|
||||||
/*
|
|
||||||
* Make sure that ufshcd_compl_tm() does not trigger a
|
|
||||||
* use-after-free.
|
|
||||||
*/
|
|
||||||
req->end_io_data = NULL;
|
|
||||||
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
|
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
|
||||||
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
|
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
|
||||||
__func__, tm_function);
|
__func__, tm_function);
|
||||||
@@ -6944,6 +6941,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int tag;
|
unsigned int tag;
|
||||||
int err = FAILED, res;
|
int err = FAILED, res;
|
||||||
|
bool outstanding;
|
||||||
struct ufshcd_lrb *lrbp;
|
struct ufshcd_lrb *lrbp;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
@@ -7030,6 +7028,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
|||||||
goto release;
|
goto release;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Clear the corresponding bit from outstanding_reqs since the command
|
||||||
|
* has been aborted successfully.
|
||||||
|
*/
|
||||||
|
spin_lock_irqsave(host->host_lock, flags);
|
||||||
|
outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
|
||||||
|
spin_unlock_irqrestore(host->host_lock, flags);
|
||||||
|
|
||||||
|
if (outstanding)
|
||||||
|
ufshcd_release_scsi_cmd(hba, lrbp);
|
||||||
|
|
||||||
err = SUCCESS;
|
err = SUCCESS;
|
||||||
|
|
||||||
release:
|
release:
|
||||||
|
@@ -264,19 +264,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
|
|||||||
{
|
{
|
||||||
u32 reg;
|
u32 reg;
|
||||||
int retries = 1000;
|
int retries = 1000;
|
||||||
int ret;
|
|
||||||
|
|
||||||
usb_phy_init(dwc->usb2_phy);
|
|
||||||
usb_phy_init(dwc->usb3_phy);
|
|
||||||
ret = phy_init(dwc->usb2_generic_phy);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = phy_init(dwc->usb3_generic_phy);
|
|
||||||
if (ret < 0) {
|
|
||||||
phy_exit(dwc->usb2_generic_phy);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We're resetting only the device side because, if we're in host mode,
|
* We're resetting only the device side because, if we're in host mode,
|
||||||
@@ -310,9 +297,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
|
|||||||
udelay(1);
|
udelay(1);
|
||||||
} while (--retries);
|
} while (--retries);
|
||||||
|
|
||||||
phy_exit(dwc->usb3_generic_phy);
|
|
||||||
phy_exit(dwc->usb2_generic_phy);
|
|
||||||
|
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
|
|
||||||
done:
|
done:
|
||||||
@@ -982,9 +966,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
|
|||||||
dwc->phys_ready = true;
|
dwc->phys_ready = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
usb_phy_init(dwc->usb2_phy);
|
||||||
|
usb_phy_init(dwc->usb3_phy);
|
||||||
|
ret = phy_init(dwc->usb2_generic_phy);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err0a;
|
||||||
|
|
||||||
|
ret = phy_init(dwc->usb3_generic_phy);
|
||||||
|
if (ret < 0) {
|
||||||
|
phy_exit(dwc->usb2_generic_phy);
|
||||||
|
goto err0a;
|
||||||
|
}
|
||||||
|
|
||||||
ret = dwc3_core_soft_reset(dwc);
|
ret = dwc3_core_soft_reset(dwc);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err0a;
|
goto err1;
|
||||||
|
|
||||||
if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
|
if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
|
||||||
!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
|
!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
|
||||||
|
@@ -1724,6 +1724,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
|
|||||||
struct usb_function *f = NULL;
|
struct usb_function *f = NULL;
|
||||||
u8 endp;
|
u8 endp;
|
||||||
|
|
||||||
|
if (w_length > USB_COMP_EP0_BUFSIZ) {
|
||||||
|
if (ctrl->bRequestType & USB_DIR_IN) {
|
||||||
|
/* Cast away the const, we are going to overwrite on purpose. */
|
||||||
|
__le16 *temp = (__le16 *)&ctrl->wLength;
|
||||||
|
|
||||||
|
*temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
|
||||||
|
w_length = USB_COMP_EP0_BUFSIZ;
|
||||||
|
} else {
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* partial re-init of the response message; the function or the
|
/* partial re-init of the response message; the function or the
|
||||||
* gadget might need to intercept e.g. a control-OUT completion
|
* gadget might need to intercept e.g. a control-OUT completion
|
||||||
* when we delegate to it.
|
* when we delegate to it.
|
||||||
@@ -2254,7 +2266,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
|
|||||||
if (!cdev->req)
|
if (!cdev->req)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
|
cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
|
||||||
if (!cdev->req->buf)
|
if (!cdev->req->buf)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
@@ -117,6 +117,7 @@ struct uvc_device {
|
|||||||
enum uvc_state state;
|
enum uvc_state state;
|
||||||
struct usb_function func;
|
struct usb_function func;
|
||||||
struct uvc_video video;
|
struct uvc_video video;
|
||||||
|
bool func_connected;
|
||||||
|
|
||||||
/* Descriptors */
|
/* Descriptors */
|
||||||
struct {
|
struct {
|
||||||
@@ -147,6 +148,7 @@ static inline struct uvc_device *to_uvc(struct usb_function *f)
|
|||||||
struct uvc_file_handle {
|
struct uvc_file_handle {
|
||||||
struct v4l2_fh vfh;
|
struct v4l2_fh vfh;
|
||||||
struct uvc_video *device;
|
struct uvc_video *device;
|
||||||
|
bool is_uvc_app_handle;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define to_uvc_file_handle(handle) \
|
#define to_uvc_file_handle(handle) \
|
||||||
|
@@ -227,17 +227,55 @@ static int
|
|||||||
uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
|
uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
|
||||||
const struct v4l2_event_subscription *sub)
|
const struct v4l2_event_subscription *sub)
|
||||||
{
|
{
|
||||||
|
struct uvc_device *uvc = video_get_drvdata(fh->vdev);
|
||||||
|
struct uvc_file_handle *handle = to_uvc_file_handle(fh);
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
|
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return v4l2_event_subscribe(fh, sub, 2, NULL);
|
if (sub->type == UVC_EVENT_SETUP && uvc->func_connected)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
ret = v4l2_event_subscribe(fh, sub, 2, NULL);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (sub->type == UVC_EVENT_SETUP) {
|
||||||
|
uvc->func_connected = true;
|
||||||
|
handle->is_uvc_app_handle = true;
|
||||||
|
uvc_function_connect(uvc);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void uvc_v4l2_disable(struct uvc_device *uvc)
|
||||||
|
{
|
||||||
|
uvc->func_connected = false;
|
||||||
|
uvc_function_disconnect(uvc);
|
||||||
|
uvcg_video_enable(&uvc->video, 0);
|
||||||
|
uvcg_free_buffers(&uvc->video.queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
uvc_v4l2_unsubscribe_event(struct v4l2_fh *fh,
|
uvc_v4l2_unsubscribe_event(struct v4l2_fh *fh,
|
||||||
const struct v4l2_event_subscription *sub)
|
const struct v4l2_event_subscription *sub)
|
||||||
{
|
{
|
||||||
return v4l2_event_unsubscribe(fh, sub);
|
struct uvc_device *uvc = video_get_drvdata(fh->vdev);
|
||||||
|
struct uvc_file_handle *handle = to_uvc_file_handle(fh);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = v4l2_event_unsubscribe(fh, sub);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (sub->type == UVC_EVENT_SETUP && handle->is_uvc_app_handle) {
|
||||||
|
uvc_v4l2_disable(uvc);
|
||||||
|
handle->is_uvc_app_handle = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static long
|
static long
|
||||||
@@ -292,7 +330,6 @@ uvc_v4l2_open(struct file *file)
|
|||||||
handle->device = &uvc->video;
|
handle->device = &uvc->video;
|
||||||
file->private_data = &handle->vfh;
|
file->private_data = &handle->vfh;
|
||||||
|
|
||||||
uvc_function_connect(uvc);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,11 +341,9 @@ uvc_v4l2_release(struct file *file)
|
|||||||
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
|
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
|
||||||
struct uvc_video *video = handle->device;
|
struct uvc_video *video = handle->device;
|
||||||
|
|
||||||
uvc_function_disconnect(uvc);
|
|
||||||
|
|
||||||
mutex_lock(&video->mutex);
|
mutex_lock(&video->mutex);
|
||||||
uvcg_video_enable(video, 0);
|
if (handle->is_uvc_app_handle)
|
||||||
uvcg_free_buffers(&video->queue);
|
uvc_v4l2_disable(uvc);
|
||||||
mutex_unlock(&video->mutex);
|
mutex_unlock(&video->mutex);
|
||||||
|
|
||||||
file->private_data = NULL;
|
file->private_data = NULL;
|
||||||
|
@@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep)
|
|||||||
goto fail_1;
|
goto fail_1;
|
||||||
}
|
}
|
||||||
|
|
||||||
req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
|
req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL);
|
||||||
if (!req->buf) {
|
if (!req->buf) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
stp = 2;
|
stp = 2;
|
||||||
@@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget,
|
|||||||
void *data = NULL;
|
void *data = NULL;
|
||||||
u16 len = 0;
|
u16 len = 0;
|
||||||
|
|
||||||
|
if (length > DBGP_REQ_LEN) {
|
||||||
|
if (ctrl->bRequestType & USB_DIR_IN) {
|
||||||
|
/* Cast away the const, we are going to overwrite on purpose. */
|
||||||
|
__le16 *temp = (__le16 *)&ctrl->wLength;
|
||||||
|
|
||||||
|
*temp = cpu_to_le16(DBGP_REQ_LEN);
|
||||||
|
length = DBGP_REQ_LEN;
|
||||||
|
} else {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
if (request == USB_REQ_GET_DESCRIPTOR) {
|
if (request == USB_REQ_GET_DESCRIPTOR) {
|
||||||
switch (value>>8) {
|
switch (value>>8) {
|
||||||
case USB_DT_DEVICE:
|
case USB_DT_DEVICE:
|
||||||
|
@@ -110,6 +110,8 @@ enum ep0_state {
|
|||||||
/* enough for the whole queue: most events invalidate others */
|
/* enough for the whole queue: most events invalidate others */
|
||||||
#define N_EVENT 5
|
#define N_EVENT 5
|
||||||
|
|
||||||
|
#define RBUF_SIZE 256
|
||||||
|
|
||||||
struct dev_data {
|
struct dev_data {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
refcount_t count;
|
refcount_t count;
|
||||||
@@ -144,7 +146,7 @@ struct dev_data {
|
|||||||
struct dentry *dentry;
|
struct dentry *dentry;
|
||||||
|
|
||||||
/* except this scratch i/o buffer for ep0 */
|
/* except this scratch i/o buffer for ep0 */
|
||||||
u8 rbuf [256];
|
u8 rbuf[RBUF_SIZE];
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void get_dev (struct dev_data *data)
|
static inline void get_dev (struct dev_data *data)
|
||||||
@@ -1333,6 +1335,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
|
|||||||
u16 w_value = le16_to_cpu(ctrl->wValue);
|
u16 w_value = le16_to_cpu(ctrl->wValue);
|
||||||
u16 w_length = le16_to_cpu(ctrl->wLength);
|
u16 w_length = le16_to_cpu(ctrl->wLength);
|
||||||
|
|
||||||
|
if (w_length > RBUF_SIZE) {
|
||||||
|
if (ctrl->bRequestType & USB_DIR_IN) {
|
||||||
|
/* Cast away the const, we are going to overwrite on purpose. */
|
||||||
|
__le16 *temp = (__le16 *)&ctrl->wLength;
|
||||||
|
|
||||||
|
*temp = cpu_to_le16(RBUF_SIZE);
|
||||||
|
w_length = RBUF_SIZE;
|
||||||
|
} else {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock (&dev->lock);
|
spin_lock (&dev->lock);
|
||||||
dev->setup_abort = 0;
|
dev->setup_abort = 0;
|
||||||
if (dev->state == STATE_DEV_UNCONNECTED) {
|
if (dev->state == STATE_DEV_UNCONNECTED) {
|
||||||
|
@@ -1586,7 +1586,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
|
|||||||
status = 1;
|
status = 1;
|
||||||
}
|
}
|
||||||
if (!status && !reset_change) {
|
if (!status && !reset_change) {
|
||||||
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
|
xhci_dbg(xhci, "%s: stopping usb%d port polling\n",
|
||||||
|
__func__, hcd->self.busnum);
|
||||||
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
@@ -1618,7 +1619,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
|
|||||||
if (bus_state->resuming_ports || /* USB2 */
|
if (bus_state->resuming_ports || /* USB2 */
|
||||||
bus_state->port_remote_wakeup) { /* USB3 */
|
bus_state->port_remote_wakeup) { /* USB3 */
|
||||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||||
xhci_dbg(xhci, "suspend failed because a port is resuming\n");
|
xhci_dbg(xhci, "usb%d bus suspend to fail because a port is resuming\n",
|
||||||
|
hcd->self.busnum);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -832,9 +832,14 @@ static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
|
|||||||
|
|
||||||
ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
|
ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
|
||||||
|
|
||||||
if (td->cancel_status == TD_CLEARED)
|
if (td->cancel_status == TD_CLEARED) {
|
||||||
|
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
|
||||||
|
__func__, td->urb);
|
||||||
xhci_td_cleanup(ep->xhci, td, ring, td->status);
|
xhci_td_cleanup(ep->xhci, td, ring, td->status);
|
||||||
|
} else {
|
||||||
|
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
|
||||||
|
__func__, td->urb, td->cancel_status);
|
||||||
|
}
|
||||||
if (ep->xhci->xhc_state & XHCI_STATE_DYING)
|
if (ep->xhci->xhc_state & XHCI_STATE_DYING)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -852,6 +857,10 @@ static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
|
|||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
xhci_dbg(xhci, "%s-reset ep %u, slot %u\n",
|
||||||
|
(reset_type == EP_HARD_RESET) ? "Hard" : "Soft",
|
||||||
|
ep_index, slot_id);
|
||||||
|
|
||||||
ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
|
ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
|
||||||
done:
|
done:
|
||||||
if (ret)
|
if (ret)
|
||||||
@@ -885,7 +894,8 @@ static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ep->ep_state & EP_HALTED) {
|
if (ep->ep_state & EP_HALTED) {
|
||||||
xhci_dbg(xhci, "Reset ep command already pending\n");
|
xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
|
||||||
|
ep->ep_index);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -924,9 +934,10 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
|
|||||||
|
|
||||||
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
|
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
|
||||||
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||||
"Removing canceled TD starting at 0x%llx (dma).",
|
"Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
|
||||||
(unsigned long long)xhci_trb_virt_to_dma(
|
(unsigned long long)xhci_trb_virt_to_dma(
|
||||||
td->start_seg, td->first_trb));
|
td->start_seg, td->first_trb),
|
||||||
|
td->urb->stream_id, td->urb);
|
||||||
list_del_init(&td->td_list);
|
list_del_init(&td->td_list);
|
||||||
ring = xhci_urb_to_transfer_ring(xhci, td->urb);
|
ring = xhci_urb_to_transfer_ring(xhci, td->urb);
|
||||||
if (!ring) {
|
if (!ring) {
|
||||||
@@ -944,17 +955,21 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
|
|||||||
td->urb->stream_id);
|
td->urb->stream_id);
|
||||||
hw_deq &= ~0xf;
|
hw_deq &= ~0xf;
|
||||||
|
|
||||||
if (td->cancel_status == TD_HALTED) {
|
if (td->cancel_status == TD_HALTED ||
|
||||||
cached_td = td;
|
trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) {
|
||||||
} else if (trb_in_td(xhci, td->start_seg, td->first_trb,
|
|
||||||
td->last_trb, hw_deq, false)) {
|
|
||||||
switch (td->cancel_status) {
|
switch (td->cancel_status) {
|
||||||
case TD_CLEARED: /* TD is already no-op */
|
case TD_CLEARED: /* TD is already no-op */
|
||||||
case TD_CLEARING_CACHE: /* set TR deq command already queued */
|
case TD_CLEARING_CACHE: /* set TR deq command already queued */
|
||||||
break;
|
break;
|
||||||
case TD_DIRTY: /* TD is cached, clear it */
|
case TD_DIRTY: /* TD is cached, clear it */
|
||||||
case TD_HALTED:
|
case TD_HALTED:
|
||||||
/* FIXME stream case, several stopped rings */
|
td->cancel_status = TD_CLEARING_CACHE;
|
||||||
|
if (cached_td)
|
||||||
|
/* FIXME stream case, several stopped rings */
|
||||||
|
xhci_dbg(xhci,
|
||||||
|
"Move dq past stream %u URB %p instead of stream %u URB %p\n",
|
||||||
|
td->urb->stream_id, td->urb,
|
||||||
|
cached_td->urb->stream_id, cached_td->urb);
|
||||||
cached_td = td;
|
cached_td = td;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -963,18 +978,24 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
|
|||||||
td->cancel_status = TD_CLEARED;
|
td->cancel_status = TD_CLEARED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (cached_td) {
|
|
||||||
cached_td->cancel_status = TD_CLEARING_CACHE;
|
|
||||||
|
|
||||||
err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
|
/* If there's no need to move the dequeue pointer then we're done */
|
||||||
cached_td->urb->stream_id,
|
if (!cached_td)
|
||||||
cached_td);
|
return 0;
|
||||||
/* Failed to move past cached td, try just setting it noop */
|
|
||||||
if (err) {
|
err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
|
||||||
td_to_noop(xhci, ring, cached_td, false);
|
cached_td->urb->stream_id,
|
||||||
cached_td->cancel_status = TD_CLEARED;
|
cached_td);
|
||||||
|
if (err) {
|
||||||
|
/* Failed to move past cached td, just set cached TDs to no-op */
|
||||||
|
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
|
||||||
|
if (td->cancel_status != TD_CLEARING_CACHE)
|
||||||
|
continue;
|
||||||
|
xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
|
||||||
|
td->urb);
|
||||||
|
td_to_noop(xhci, ring, td, false);
|
||||||
|
td->cancel_status = TD_CLEARED;
|
||||||
}
|
}
|
||||||
cached_td = NULL;
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1071,6 +1092,8 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
|
|||||||
return;
|
return;
|
||||||
case EP_STATE_RUNNING:
|
case EP_STATE_RUNNING:
|
||||||
/* Race, HW handled stop ep cmd before ep was running */
|
/* Race, HW handled stop ep cmd before ep was running */
|
||||||
|
xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
|
||||||
|
|
||||||
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
|
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
|
||||||
if (!command)
|
if (!command)
|
||||||
xhci_stop_watchdog_timer_in_irq(xhci, ep);
|
xhci_stop_watchdog_timer_in_irq(xhci, ep);
|
||||||
@@ -1392,7 +1415,12 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
|
|||||||
ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
|
ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
|
||||||
if (td->cancel_status == TD_CLEARING_CACHE) {
|
if (td->cancel_status == TD_CLEARING_CACHE) {
|
||||||
td->cancel_status = TD_CLEARED;
|
td->cancel_status = TD_CLEARED;
|
||||||
|
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
|
||||||
|
__func__, td->urb);
|
||||||
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
|
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
|
||||||
|
} else {
|
||||||
|
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
|
||||||
|
__func__, td->urb, td->cancel_status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cleanup:
|
cleanup:
|
||||||
@@ -2005,7 +2033,8 @@ cleanup:
|
|||||||
* bits are still set. When an event occurs, switch over to
|
* bits are still set. When an event occurs, switch over to
|
||||||
* polling to avoid losing status changes.
|
* polling to avoid losing status changes.
|
||||||
*/
|
*/
|
||||||
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
|
xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
|
||||||
|
__func__, hcd->self.busnum);
|
||||||
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
||||||
spin_unlock(&xhci->lock);
|
spin_unlock(&xhci->lock);
|
||||||
/* Pass this up to the core */
|
/* Pass this up to the core */
|
||||||
|
@@ -993,7 +993,8 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
|
|||||||
xhci_dbc_suspend(xhci);
|
xhci_dbc_suspend(xhci);
|
||||||
|
|
||||||
/* Don't poll the roothubs on bus suspend. */
|
/* Don't poll the roothubs on bus suspend. */
|
||||||
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
|
xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
|
||||||
|
__func__, hcd->self.busnum);
|
||||||
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
||||||
del_timer_sync(&hcd->rh_timer);
|
del_timer_sync(&hcd->rh_timer);
|
||||||
clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
|
clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
|
||||||
@@ -1257,7 +1258,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
|||||||
usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
|
usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
|
||||||
|
|
||||||
/* Re-enable port polling. */
|
/* Re-enable port polling. */
|
||||||
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
|
xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
|
||||||
|
__func__, hcd->self.busnum);
|
||||||
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
|
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
|
||||||
usb_hcd_poll_rh_status(xhci->shared_hcd);
|
usb_hcd_poll_rh_status(xhci->shared_hcd);
|
||||||
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
||||||
@@ -4661,19 +4663,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
|
|||||||
{
|
{
|
||||||
unsigned long long timeout_ns;
|
unsigned long long timeout_ns;
|
||||||
|
|
||||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
|
||||||
timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
|
|
||||||
else
|
|
||||||
timeout_ns = udev->u1_params.sel;
|
|
||||||
|
|
||||||
/* Prevent U1 if service interval is shorter than U1 exit latency */
|
/* Prevent U1 if service interval is shorter than U1 exit latency */
|
||||||
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
|
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
|
||||||
if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
|
if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
|
||||||
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
|
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
|
||||||
return USB3_LPM_DISABLED;
|
return USB3_LPM_DISABLED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||||
|
timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
|
||||||
|
else
|
||||||
|
timeout_ns = udev->u1_params.sel;
|
||||||
|
|
||||||
/* The U1 timeout is encoded in 1us intervals.
|
/* The U1 timeout is encoded in 1us intervals.
|
||||||
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
|
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
|
||||||
*/
|
*/
|
||||||
@@ -4725,19 +4727,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
|
|||||||
{
|
{
|
||||||
unsigned long long timeout_ns;
|
unsigned long long timeout_ns;
|
||||||
|
|
||||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
|
||||||
timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
|
|
||||||
else
|
|
||||||
timeout_ns = udev->u2_params.sel;
|
|
||||||
|
|
||||||
/* Prevent U2 if service interval is shorter than U2 exit latency */
|
/* Prevent U2 if service interval is shorter than U2 exit latency */
|
||||||
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
|
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
|
||||||
if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
|
if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
|
||||||
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
|
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
|
||||||
return USB3_LPM_DISABLED;
|
return USB3_LPM_DISABLED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||||
|
timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
|
||||||
|
else
|
||||||
|
timeout_ns = udev->u2_params.sel;
|
||||||
|
|
||||||
/* The U2 timeout is encoded in 256us intervals */
|
/* The U2 timeout is encoded in 256us intervals */
|
||||||
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
|
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
|
||||||
/* If the necessary timeout value is bigger than what we can set in the
|
/* If the necessary timeout value is bigger than what we can set in the
|
||||||
|
@@ -4157,12 +4157,9 @@ static void run_state_machine(struct tcpm_port *port)
|
|||||||
0);
|
0);
|
||||||
port->debouncing = false;
|
port->debouncing = false;
|
||||||
} else {
|
} else {
|
||||||
/* Wait for VBUS, but not forever */
|
|
||||||
tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
|
|
||||||
port->debouncing = false;
|
port->debouncing = false;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SRC_TRY:
|
case SRC_TRY:
|
||||||
port->try_src_count++;
|
port->try_src_count++;
|
||||||
tcpm_set_cc(port, tcpm_rp_cc(port));
|
tcpm_set_cc(port, tcpm_rp_cc(port));
|
||||||
|
@@ -408,6 +408,13 @@ int virtio_device_restore(struct virtio_device *dev)
|
|||||||
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
|
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* Short path for stateful devices. Here we assume that if the device
|
||||||
|
* does not have a freeze callback, its state was not changed when
|
||||||
|
* suspended.
|
||||||
|
*/
|
||||||
|
if (drv && !drv->freeze)
|
||||||
|
goto on_config_enable;
|
||||||
|
|
||||||
/* We always start by resetting the device, in case a previous
|
/* We always start by resetting the device, in case a previous
|
||||||
* driver messed it up. */
|
* driver messed it up. */
|
||||||
dev->config->reset(dev);
|
dev->config->reset(dev);
|
||||||
@@ -439,6 +446,7 @@ int virtio_device_restore(struct virtio_device *dev)
|
|||||||
/* Finally, tell the device we're all set */
|
/* Finally, tell the device we're all set */
|
||||||
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
|
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
|
||||||
|
|
||||||
|
on_config_enable:
|
||||||
virtio_config_enable(dev);
|
virtio_config_enable(dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -1054,6 +1054,26 @@ static void vm_unregister_cmdline_devices(void)
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_PM_SLEEP
|
||||||
|
static int virtio_mmio_freeze(struct device *dev)
|
||||||
|
{
|
||||||
|
struct platform_device *pdev = to_platform_device(dev);
|
||||||
|
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
|
return virtio_device_freeze(&vm_dev->vdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int virtio_mmio_restore(struct device *dev)
|
||||||
|
{
|
||||||
|
struct platform_device *pdev = to_platform_device(dev);
|
||||||
|
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
|
return virtio_device_restore(&vm_dev->vdev);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static SIMPLE_DEV_PM_OPS(virtio_mmio_pm_ops, virtio_mmio_freeze, virtio_mmio_restore);
|
||||||
|
|
||||||
/* Platform driver */
|
/* Platform driver */
|
||||||
|
|
||||||
static const struct of_device_id virtio_mmio_match[] = {
|
static const struct of_device_id virtio_mmio_match[] = {
|
||||||
@@ -1077,6 +1097,7 @@ static struct platform_driver virtio_mmio_driver = {
|
|||||||
.name = "virtio-mmio",
|
.name = "virtio-mmio",
|
||||||
.of_match_table = virtio_mmio_match,
|
.of_match_table = virtio_mmio_match,
|
||||||
.acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
|
.acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
|
||||||
|
.pm = &virtio_mmio_pm_ops,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
184
fs/aio.c
184
fs/aio.c
@@ -182,8 +182,9 @@ struct poll_iocb {
|
|||||||
struct file *file;
|
struct file *file;
|
||||||
struct wait_queue_head *head;
|
struct wait_queue_head *head;
|
||||||
__poll_t events;
|
__poll_t events;
|
||||||
bool done;
|
|
||||||
bool cancelled;
|
bool cancelled;
|
||||||
|
bool work_scheduled;
|
||||||
|
bool work_need_resched;
|
||||||
struct wait_queue_entry wait;
|
struct wait_queue_entry wait;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
};
|
};
|
||||||
@@ -1621,6 +1622,51 @@ static void aio_poll_put_work(struct work_struct *work)
|
|||||||
iocb_put(iocb);
|
iocb_put(iocb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Safely lock the waitqueue which the request is on, synchronizing with the
|
||||||
|
* case where the ->poll() provider decides to free its waitqueue early.
|
||||||
|
*
|
||||||
|
* Returns true on success, meaning that req->head->lock was locked, req->wait
|
||||||
|
* is on req->head, and an RCU read lock was taken. Returns false if the
|
||||||
|
* request was already removed from its waitqueue (which might no longer exist).
|
||||||
|
*/
|
||||||
|
static bool poll_iocb_lock_wq(struct poll_iocb *req)
|
||||||
|
{
|
||||||
|
wait_queue_head_t *head;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* While we hold the waitqueue lock and the waitqueue is nonempty,
|
||||||
|
* wake_up_pollfree() will wait for us. However, taking the waitqueue
|
||||||
|
* lock in the first place can race with the waitqueue being freed.
|
||||||
|
*
|
||||||
|
* We solve this as eventpoll does: by taking advantage of the fact that
|
||||||
|
* all users of wake_up_pollfree() will RCU-delay the actual free. If
|
||||||
|
* we enter rcu_read_lock() and see that the pointer to the queue is
|
||||||
|
* non-NULL, we can then lock it without the memory being freed out from
|
||||||
|
* under us, then check whether the request is still on the queue.
|
||||||
|
*
|
||||||
|
* Keep holding rcu_read_lock() as long as we hold the queue lock, in
|
||||||
|
* case the caller deletes the entry from the queue, leaving it empty.
|
||||||
|
* In that case, only RCU prevents the queue memory from being freed.
|
||||||
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
|
head = smp_load_acquire(&req->head);
|
||||||
|
if (head) {
|
||||||
|
spin_lock(&head->lock);
|
||||||
|
if (!list_empty(&req->wait.entry))
|
||||||
|
return true;
|
||||||
|
spin_unlock(&head->lock);
|
||||||
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void poll_iocb_unlock_wq(struct poll_iocb *req)
|
||||||
|
{
|
||||||
|
spin_unlock(&req->head->lock);
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
static void aio_poll_complete_work(struct work_struct *work)
|
static void aio_poll_complete_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
|
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
|
||||||
@@ -1640,14 +1686,27 @@ static void aio_poll_complete_work(struct work_struct *work)
|
|||||||
* avoid further branches in the fast path.
|
* avoid further branches in the fast path.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&ctx->ctx_lock);
|
spin_lock_irq(&ctx->ctx_lock);
|
||||||
if (!mask && !READ_ONCE(req->cancelled)) {
|
if (poll_iocb_lock_wq(req)) {
|
||||||
add_wait_queue(req->head, &req->wait);
|
if (!mask && !READ_ONCE(req->cancelled)) {
|
||||||
spin_unlock_irq(&ctx->ctx_lock);
|
/*
|
||||||
return;
|
* The request isn't actually ready to be completed yet.
|
||||||
}
|
* Reschedule completion if another wakeup came in.
|
||||||
|
*/
|
||||||
|
if (req->work_need_resched) {
|
||||||
|
schedule_work(&req->work);
|
||||||
|
req->work_need_resched = false;
|
||||||
|
} else {
|
||||||
|
req->work_scheduled = false;
|
||||||
|
}
|
||||||
|
poll_iocb_unlock_wq(req);
|
||||||
|
spin_unlock_irq(&ctx->ctx_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
list_del_init(&req->wait.entry);
|
||||||
|
poll_iocb_unlock_wq(req);
|
||||||
|
} /* else, POLLFREE has freed the waitqueue, so we must complete */
|
||||||
list_del_init(&iocb->ki_list);
|
list_del_init(&iocb->ki_list);
|
||||||
iocb->ki_res.res = mangle_poll(mask);
|
iocb->ki_res.res = mangle_poll(mask);
|
||||||
req->done = true;
|
|
||||||
spin_unlock_irq(&ctx->ctx_lock);
|
spin_unlock_irq(&ctx->ctx_lock);
|
||||||
|
|
||||||
iocb_put(iocb);
|
iocb_put(iocb);
|
||||||
@@ -1659,13 +1718,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
|
|||||||
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
|
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
|
||||||
struct poll_iocb *req = &aiocb->poll;
|
struct poll_iocb *req = &aiocb->poll;
|
||||||
|
|
||||||
spin_lock(&req->head->lock);
|
if (poll_iocb_lock_wq(req)) {
|
||||||
WRITE_ONCE(req->cancelled, true);
|
WRITE_ONCE(req->cancelled, true);
|
||||||
if (!list_empty(&req->wait.entry)) {
|
if (!req->work_scheduled) {
|
||||||
list_del_init(&req->wait.entry);
|
schedule_work(&aiocb->poll.work);
|
||||||
schedule_work(&aiocb->poll.work);
|
req->work_scheduled = true;
|
||||||
}
|
}
|
||||||
spin_unlock(&req->head->lock);
|
poll_iocb_unlock_wq(req);
|
||||||
|
} /* else, the request was force-cancelled by POLLFREE already */
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1682,20 +1742,26 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
|||||||
if (mask && !(mask & req->events))
|
if (mask && !(mask & req->events))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
list_del_init(&req->wait.entry);
|
/*
|
||||||
|
* Complete the request inline if possible. This requires that three
|
||||||
if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
|
* conditions be met:
|
||||||
|
* 1. An event mask must have been passed. If a plain wakeup was done
|
||||||
|
* instead, then mask == 0 and we have to call vfs_poll() to get
|
||||||
|
* the events, so inline completion isn't possible.
|
||||||
|
* 2. The completion work must not have already been scheduled.
|
||||||
|
* 3. ctx_lock must not be busy. We have to use trylock because we
|
||||||
|
* already hold the waitqueue lock, so this inverts the normal
|
||||||
|
* locking order. Use irqsave/irqrestore because not all
|
||||||
|
* filesystems (e.g. fuse) call this function with IRQs disabled,
|
||||||
|
* yet IRQs have to be disabled before ctx_lock is obtained.
|
||||||
|
*/
|
||||||
|
if (mask && !req->work_scheduled &&
|
||||||
|
spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
|
||||||
struct kioctx *ctx = iocb->ki_ctx;
|
struct kioctx *ctx = iocb->ki_ctx;
|
||||||
|
|
||||||
/*
|
list_del_init(&req->wait.entry);
|
||||||
* Try to complete the iocb inline if we can. Use
|
|
||||||
* irqsave/irqrestore because not all filesystems (e.g. fuse)
|
|
||||||
* call this function with IRQs disabled and because IRQs
|
|
||||||
* have to be disabled before ctx_lock is obtained.
|
|
||||||
*/
|
|
||||||
list_del(&iocb->ki_list);
|
list_del(&iocb->ki_list);
|
||||||
iocb->ki_res.res = mangle_poll(mask);
|
iocb->ki_res.res = mangle_poll(mask);
|
||||||
req->done = true;
|
|
||||||
if (iocb->ki_eventfd && eventfd_signal_count()) {
|
if (iocb->ki_eventfd && eventfd_signal_count()) {
|
||||||
iocb = NULL;
|
iocb = NULL;
|
||||||
INIT_WORK(&req->work, aio_poll_put_work);
|
INIT_WORK(&req->work, aio_poll_put_work);
|
||||||
@@ -1705,7 +1771,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
|||||||
if (iocb)
|
if (iocb)
|
||||||
iocb_put(iocb);
|
iocb_put(iocb);
|
||||||
} else {
|
} else {
|
||||||
schedule_work(&req->work);
|
/*
|
||||||
|
* Schedule the completion work if needed. If it was already
|
||||||
|
* scheduled, record that another wakeup came in.
|
||||||
|
*
|
||||||
|
* Don't remove the request from the waitqueue here, as it might
|
||||||
|
* not actually be complete yet (we won't know until vfs_poll()
|
||||||
|
* is called), and we must not miss any wakeups. POLLFREE is an
|
||||||
|
* exception to this; see below.
|
||||||
|
*/
|
||||||
|
if (req->work_scheduled) {
|
||||||
|
req->work_need_resched = true;
|
||||||
|
} else {
|
||||||
|
schedule_work(&req->work);
|
||||||
|
req->work_scheduled = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the waitqueue is being freed early but we can't complete
|
||||||
|
* the request inline, we have to tear down the request as best
|
||||||
|
* we can. That means immediately removing the request from its
|
||||||
|
* waitqueue and preventing all further accesses to the
|
||||||
|
* waitqueue via the request. We also need to schedule the
|
||||||
|
* completion work (done above). Also mark the request as
|
||||||
|
* cancelled, to potentially skip an unneeded call to ->poll().
|
||||||
|
*/
|
||||||
|
if (mask & POLLFREE) {
|
||||||
|
WRITE_ONCE(req->cancelled, true);
|
||||||
|
list_del_init(&req->wait.entry);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Careful: this *must* be the last step, since as soon
|
||||||
|
* as req->head is NULL'ed out, the request can be
|
||||||
|
* completed and freed, since aio_poll_complete_work()
|
||||||
|
* will no longer need to take the waitqueue lock.
|
||||||
|
*/
|
||||||
|
smp_store_release(&req->head, NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@@ -1713,6 +1815,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
|||||||
struct aio_poll_table {
|
struct aio_poll_table {
|
||||||
struct poll_table_struct pt;
|
struct poll_table_struct pt;
|
||||||
struct aio_kiocb *iocb;
|
struct aio_kiocb *iocb;
|
||||||
|
bool queued;
|
||||||
int error;
|
int error;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1723,11 +1826,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
|
|||||||
struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
|
struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
|
||||||
|
|
||||||
/* multiple wait queues per file are not supported */
|
/* multiple wait queues per file are not supported */
|
||||||
if (unlikely(pt->iocb->poll.head)) {
|
if (unlikely(pt->queued)) {
|
||||||
pt->error = -EINVAL;
|
pt->error = -EINVAL;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pt->queued = true;
|
||||||
pt->error = 0;
|
pt->error = 0;
|
||||||
pt->iocb->poll.head = head;
|
pt->iocb->poll.head = head;
|
||||||
add_wait_queue(head, &pt->iocb->poll.wait);
|
add_wait_queue(head, &pt->iocb->poll.wait);
|
||||||
@@ -1752,12 +1856,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
|
|||||||
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
|
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
|
||||||
|
|
||||||
req->head = NULL;
|
req->head = NULL;
|
||||||
req->done = false;
|
|
||||||
req->cancelled = false;
|
req->cancelled = false;
|
||||||
|
req->work_scheduled = false;
|
||||||
|
req->work_need_resched = false;
|
||||||
|
|
||||||
apt.pt._qproc = aio_poll_queue_proc;
|
apt.pt._qproc = aio_poll_queue_proc;
|
||||||
apt.pt._key = req->events;
|
apt.pt._key = req->events;
|
||||||
apt.iocb = aiocb;
|
apt.iocb = aiocb;
|
||||||
|
apt.queued = false;
|
||||||
apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
|
apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
|
||||||
|
|
||||||
/* initialized the list so that we can do list_empty checks */
|
/* initialized the list so that we can do list_empty checks */
|
||||||
@@ -1766,23 +1872,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
|
|||||||
|
|
||||||
mask = vfs_poll(req->file, &apt.pt) & req->events;
|
mask = vfs_poll(req->file, &apt.pt) & req->events;
|
||||||
spin_lock_irq(&ctx->ctx_lock);
|
spin_lock_irq(&ctx->ctx_lock);
|
||||||
if (likely(req->head)) {
|
if (likely(apt.queued)) {
|
||||||
spin_lock(&req->head->lock);
|
bool on_queue = poll_iocb_lock_wq(req);
|
||||||
if (unlikely(list_empty(&req->wait.entry))) {
|
|
||||||
if (apt.error)
|
if (!on_queue || req->work_scheduled) {
|
||||||
|
/*
|
||||||
|
* aio_poll_wake() already either scheduled the async
|
||||||
|
* completion work, or completed the request inline.
|
||||||
|
*/
|
||||||
|
if (apt.error) /* unsupported case: multiple queues */
|
||||||
cancel = true;
|
cancel = true;
|
||||||
apt.error = 0;
|
apt.error = 0;
|
||||||
mask = 0;
|
mask = 0;
|
||||||
}
|
}
|
||||||
if (mask || apt.error) {
|
if (mask || apt.error) {
|
||||||
|
/* Steal to complete synchronously. */
|
||||||
list_del_init(&req->wait.entry);
|
list_del_init(&req->wait.entry);
|
||||||
} else if (cancel) {
|
} else if (cancel) {
|
||||||
|
/* Cancel if possible (may be too late though). */
|
||||||
WRITE_ONCE(req->cancelled, true);
|
WRITE_ONCE(req->cancelled, true);
|
||||||
} else if (!req->done) { /* actually waiting for an event */
|
} else if (on_queue) {
|
||||||
|
/*
|
||||||
|
* Actually waiting for an event, so add the request to
|
||||||
|
* active_reqs so that it can be cancelled if needed.
|
||||||
|
*/
|
||||||
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
|
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
|
||||||
aiocb->ki_cancel = aio_poll_cancel;
|
aiocb->ki_cancel = aio_poll_cancel;
|
||||||
}
|
}
|
||||||
spin_unlock(&req->head->lock);
|
if (on_queue)
|
||||||
|
poll_iocb_unlock_wq(req);
|
||||||
}
|
}
|
||||||
if (mask) { /* no async, we'd stolen it */
|
if (mask) { /* no async, we'd stolen it */
|
||||||
aiocb->ki_res.res = mangle_poll(mask);
|
aiocb->ki_res.res = mangle_poll(mask);
|
||||||
|
@@ -2515,6 +2515,11 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
|
|||||||
{
|
{
|
||||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||||
|
|
||||||
|
/* The below cases were checked when setting it. */
|
||||||
|
if (f2fs_is_pinned_file(inode))
|
||||||
|
return false;
|
||||||
|
if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
|
||||||
|
return true;
|
||||||
if (f2fs_lfs_mode(sbi))
|
if (f2fs_lfs_mode(sbi))
|
||||||
return true;
|
return true;
|
||||||
if (S_ISDIR(inode->i_mode))
|
if (S_ISDIR(inode->i_mode))
|
||||||
@@ -2523,8 +2528,6 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
|
|||||||
return true;
|
return true;
|
||||||
if (f2fs_is_atomic_file(inode))
|
if (f2fs_is_atomic_file(inode))
|
||||||
return true;
|
return true;
|
||||||
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
|
|
||||||
return true;
|
|
||||||
|
|
||||||
/* swap file is migrating in aligned write mode */
|
/* swap file is migrating in aligned write mode */
|
||||||
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
|
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
|
||||||
|
@@ -3250,17 +3250,17 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
|
|||||||
|
|
||||||
inode_lock(inode);
|
inode_lock(inode);
|
||||||
|
|
||||||
if (f2fs_should_update_outplace(inode, NULL)) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!pin) {
|
if (!pin) {
|
||||||
clear_inode_flag(inode, FI_PIN_FILE);
|
clear_inode_flag(inode, FI_PIN_FILE);
|
||||||
f2fs_i_gc_failures_write(inode, 0);
|
f2fs_i_gc_failures_write(inode, 0);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (f2fs_should_update_outplace(inode, NULL)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (f2fs_pin_file_control(inode, false)) {
|
if (f2fs_pin_file_control(inode, false)) {
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
goto out;
|
goto out;
|
||||||
|
@@ -35,17 +35,7 @@
|
|||||||
|
|
||||||
void signalfd_cleanup(struct sighand_struct *sighand)
|
void signalfd_cleanup(struct sighand_struct *sighand)
|
||||||
{
|
{
|
||||||
wait_queue_head_t *wqh = &sighand->signalfd_wqh;
|
wake_up_pollfree(&sighand->signalfd_wqh);
|
||||||
/*
|
|
||||||
* The lockless check can race with remove_wait_queue() in progress,
|
|
||||||
* but in this case its caller should run under rcu_read_lock() and
|
|
||||||
* sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
|
|
||||||
*/
|
|
||||||
if (likely(!waitqueue_active(wqh)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
|
|
||||||
wake_up_poll(wqh, EPOLLHUP | POLLFREE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct signalfd_ctx {
|
struct signalfd_ctx {
|
||||||
|
@@ -3285,7 +3285,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
|
|||||||
{
|
{
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
if (!vma->vm_file)
|
if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file)
|
||||||
return false;
|
return false;
|
||||||
if (!vma_is_dax(vma))
|
if (!vma_is_dax(vma))
|
||||||
return false;
|
return false;
|
||||||
|
@@ -833,6 +833,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
|
|||||||
return hdev->ll_driver == driver;
|
return hdev->ll_driver == driver;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool hid_is_usb(struct hid_device *hdev)
|
||||||
|
{
|
||||||
|
return hid_is_using_ll_driver(hdev, &usb_hid_driver);
|
||||||
|
}
|
||||||
|
|
||||||
#define PM_HINT_FULLON 1<<5
|
#define PM_HINT_FULLON 1<<5
|
||||||
#define PM_HINT_NORMAL 1<<1
|
#define PM_HINT_NORMAL 1<<1
|
||||||
|
|
||||||
|
@@ -6,6 +6,8 @@
|
|||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/mm_types.h>
|
#include <linux/mm_types.h>
|
||||||
#include <linux/mmap_lock.h>
|
#include <linux/mmap_lock.h>
|
||||||
|
#include <linux/percpu-rwsem.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
#include <linux/srcu.h>
|
#include <linux/srcu.h>
|
||||||
#include <linux/interval_tree.h>
|
#include <linux/interval_tree.h>
|
||||||
#include <linux/android_kabi.h>
|
#include <linux/android_kabi.h>
|
||||||
@@ -15,6 +17,13 @@ struct mmu_notifier;
|
|||||||
struct mmu_notifier_range;
|
struct mmu_notifier_range;
|
||||||
struct mmu_interval_notifier;
|
struct mmu_interval_notifier;
|
||||||
|
|
||||||
|
struct mmu_notifier_subscriptions_hdr {
|
||||||
|
bool valid;
|
||||||
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
|
struct percpu_rw_semaphore_atomic *mmu_notifier_lock;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* enum mmu_notifier_event - reason for the mmu notifier callback
|
* enum mmu_notifier_event - reason for the mmu notifier callback
|
||||||
* @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
|
* @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
|
||||||
@@ -281,9 +290,30 @@ struct mmu_notifier_range {
|
|||||||
void *migrate_pgmap_owner;
|
void *migrate_pgmap_owner;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline
|
||||||
|
struct mmu_notifier_subscriptions_hdr *get_notifier_subscriptions_hdr(
|
||||||
|
struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* container_of() can't be used here because mmu_notifier_subscriptions
|
||||||
|
* struct should be kept invisible to mm_struct, otherwise it
|
||||||
|
* introduces KMI CRC breakage. Therefore the callers don't know what
|
||||||
|
* members struct mmu_notifier_subscriptions contains and can't call
|
||||||
|
* container_of(), which requires a member name.
|
||||||
|
*
|
||||||
|
* WARNING: For this typecasting to work, mmu_notifier_subscriptions_hdr
|
||||||
|
* should be the first member of struct mmu_notifier_subscriptions.
|
||||||
|
*/
|
||||||
|
return (struct mmu_notifier_subscriptions_hdr *)mm->notifier_subscriptions;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int mm_has_notifiers(struct mm_struct *mm)
|
static inline int mm_has_notifiers(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
|
return unlikely(get_notifier_subscriptions_hdr(mm)->valid);
|
||||||
|
#else
|
||||||
return unlikely(mm->notifier_subscriptions);
|
return unlikely(mm->notifier_subscriptions);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
|
struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
|
||||||
@@ -502,9 +532,29 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
|
|||||||
__mmu_notifier_invalidate_range(mm, start, end);
|
__mmu_notifier_invalidate_range(mm, start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
|
|
||||||
|
extern bool mmu_notifier_subscriptions_init(struct mm_struct *mm);
|
||||||
|
extern void mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
|
||||||
|
|
||||||
|
static inline bool mmu_notifier_trylock(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
return percpu_down_read_trylock(
|
||||||
|
&get_notifier_subscriptions_hdr(mm)->mmu_notifier_lock->rw_sem);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mmu_notifier_unlock(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
percpu_up_read(
|
||||||
|
&get_notifier_subscriptions_hdr(mm)->mmu_notifier_lock->rw_sem);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||||
|
|
||||||
|
static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
mm->notifier_subscriptions = NULL;
|
mm->notifier_subscriptions = NULL;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
||||||
@@ -513,6 +563,16 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
|||||||
__mmu_notifier_subscriptions_destroy(mm);
|
__mmu_notifier_subscriptions_destroy(mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool mmu_notifier_trylock(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mmu_notifier_unlock(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||||
|
|
||||||
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
|
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
|
||||||
enum mmu_notifier_event event,
|
enum mmu_notifier_event event,
|
||||||
@@ -727,14 +787,24 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool mmu_notifier_trylock(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mmu_notifier_unlock(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#define mmu_notifier_range_update_to_read_only(r) false
|
#define mmu_notifier_range_update_to_read_only(r) false
|
||||||
|
|
||||||
#define ptep_clear_flush_young_notify ptep_clear_flush_young
|
#define ptep_clear_flush_young_notify ptep_clear_flush_young
|
||||||
|
@@ -20,6 +20,11 @@ struct percpu_rw_semaphore {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct percpu_rw_semaphore_atomic {
|
||||||
|
struct percpu_rw_semaphore rw_sem;
|
||||||
|
struct list_head destroy_list_entry;
|
||||||
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
|
#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
|
||||||
#else
|
#else
|
||||||
@@ -127,8 +132,12 @@ extern void percpu_up_write(struct percpu_rw_semaphore *);
|
|||||||
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
|
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
|
||||||
const char *, struct lock_class_key *);
|
const char *, struct lock_class_key *);
|
||||||
|
|
||||||
|
/* Can't be called in atomic context. */
|
||||||
extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
|
extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
|
||||||
|
|
||||||
|
/* Invokes percpu_free_rwsem and frees the semaphore from a worker thread. */
|
||||||
|
extern void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem);
|
||||||
|
|
||||||
#define percpu_init_rwsem(sem) \
|
#define percpu_init_rwsem(sem) \
|
||||||
({ \
|
({ \
|
||||||
static struct lock_class_key rwsem_key; \
|
static struct lock_class_key rwsem_key; \
|
||||||
|
@@ -207,6 +207,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
|
|||||||
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
|
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
|
||||||
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
|
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
|
||||||
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
|
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
|
||||||
|
void __wake_up_pollfree(struct wait_queue_head *wq_head);
|
||||||
|
|
||||||
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
|
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
|
||||||
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
|
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
|
||||||
@@ -235,6 +236,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
|
|||||||
#define wake_up_interruptible_sync_poll_locked(x, m) \
|
#define wake_up_interruptible_sync_poll_locked(x, m) \
|
||||||
__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
|
__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
|
||||||
|
|
||||||
|
/**
|
||||||
|
* wake_up_pollfree - signal that a polled waitqueue is going away
|
||||||
|
* @wq_head: the wait queue head
|
||||||
|
*
|
||||||
|
* In the very rare cases where a ->poll() implementation uses a waitqueue whose
|
||||||
|
* lifetime is tied to a task rather than to the 'struct file' being polled,
|
||||||
|
* this function must be called before the waitqueue is freed so that
|
||||||
|
* non-blocking polls (e.g. epoll) are notified that the queue is going away.
|
||||||
|
*
|
||||||
|
* The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
|
||||||
|
* an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
|
||||||
|
*/
|
||||||
|
static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* For performance reasons, we don't always take the queue lock here.
|
||||||
|
* Therefore, we might race with someone removing the last entry from
|
||||||
|
* the queue, and proceed while they still hold the queue lock.
|
||||||
|
* However, rcu_read_lock() is required to be held in such cases, so we
|
||||||
|
* can safely proceed with an RCU-delayed free.
|
||||||
|
*/
|
||||||
|
if (waitqueue_active(wq_head))
|
||||||
|
__wake_up_pollfree(wq_head);
|
||||||
|
}
|
||||||
|
|
||||||
#define ___wait_cond_timeout(condition) \
|
#define ___wait_cond_timeout(condition) \
|
||||||
({ \
|
({ \
|
||||||
bool __cond = (condition); \
|
bool __cond = (condition); \
|
||||||
|
@@ -18,6 +18,10 @@ DECLARE_HOOK(android_vh_alter_futex_plist_add,
|
|||||||
bool *already_on_hb),
|
bool *already_on_hb),
|
||||||
TP_ARGS(node, head, already_on_hb));
|
TP_ARGS(node, head, already_on_hb));
|
||||||
|
|
||||||
|
DECLARE_HOOK(android_vh_futex_sleep_start,
|
||||||
|
TP_PROTO(struct task_struct *p),
|
||||||
|
TP_ARGS(p));
|
||||||
|
|
||||||
/* macro versions of hooks are no longer required */
|
/* macro versions of hooks are no longer required */
|
||||||
|
|
||||||
#endif /* _TRACE_HOOK_FUTEX_H */
|
#endif /* _TRACE_HOOK_FUTEX_H */
|
||||||
|
@@ -126,6 +126,10 @@ DECLARE_HOOK(android_vh_mmap_region,
|
|||||||
DECLARE_HOOK(android_vh_try_to_unmap_one,
|
DECLARE_HOOK(android_vh_try_to_unmap_one,
|
||||||
TP_PROTO(struct vm_area_struct *vma, struct page *page, unsigned long addr, bool ret),
|
TP_PROTO(struct vm_area_struct *vma, struct page *page, unsigned long addr, bool ret),
|
||||||
TP_ARGS(vma, page, addr, ret));
|
TP_ARGS(vma, page, addr, ret));
|
||||||
|
struct device;
|
||||||
|
DECLARE_HOOK(android_vh_subpage_dma_contig_alloc,
|
||||||
|
TP_PROTO(bool *allow_subpage_alloc, struct device *dev, size_t *size),
|
||||||
|
TP_ARGS(allow_subpage_alloc, dev, size));
|
||||||
/* macro versions of hooks are no longer required */
|
/* macro versions of hooks are no longer required */
|
||||||
|
|
||||||
#endif /* _TRACE_HOOK_MM_H */
|
#endif /* _TRACE_HOOK_MM_H */
|
||||||
|
@@ -29,7 +29,7 @@
|
|||||||
#define POLLRDHUP 0x2000
|
#define POLLRDHUP 0x2000
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
|
#define POLLFREE (__force __poll_t)0x4000
|
||||||
|
|
||||||
#define POLL_BUSY_LOOP (__force __poll_t)0x8000
|
#define POLL_BUSY_LOOP (__force __poll_t)0x8000
|
||||||
|
|
||||||
|
@@ -50,6 +50,7 @@
|
|||||||
#include <linux/sizes.h>
|
#include <linux/sizes.h>
|
||||||
#include <linux/dma-map-ops.h>
|
#include <linux/dma-map-ops.h>
|
||||||
#include <linux/cma.h>
|
#include <linux/cma.h>
|
||||||
|
#include <trace/hooks/mm.h>
|
||||||
|
|
||||||
#ifdef CONFIG_CMA_SIZE_MBYTES
|
#ifdef CONFIG_CMA_SIZE_MBYTES
|
||||||
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
|
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
|
||||||
@@ -309,14 +310,19 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
|||||||
#ifdef CONFIG_DMA_PERNUMA_CMA
|
#ifdef CONFIG_DMA_PERNUMA_CMA
|
||||||
int nid = dev_to_node(dev);
|
int nid = dev_to_node(dev);
|
||||||
#endif
|
#endif
|
||||||
|
bool allow_subpage_alloc = false;
|
||||||
|
|
||||||
/* CMA can be used only in the context which permits sleeping */
|
/* CMA can be used only in the context which permits sleeping */
|
||||||
if (!gfpflags_allow_blocking(gfp))
|
if (!gfpflags_allow_blocking(gfp))
|
||||||
return NULL;
|
return NULL;
|
||||||
if (dev->cma_area)
|
if (dev->cma_area)
|
||||||
return cma_alloc_aligned(dev->cma_area, size, gfp);
|
return cma_alloc_aligned(dev->cma_area, size, gfp);
|
||||||
if (size <= PAGE_SIZE)
|
|
||||||
return NULL;
|
if (size <= PAGE_SIZE) {
|
||||||
|
trace_android_vh_subpage_dma_contig_alloc(&allow_subpage_alloc, dev, &size);
|
||||||
|
if (!allow_subpage_alloc)
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DMA_PERNUMA_CMA
|
#ifdef CONFIG_DMA_PERNUMA_CMA
|
||||||
if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
|
if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
|
||||||
|
@@ -1072,7 +1072,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
|||||||
mm_init_owner(mm, p);
|
mm_init_owner(mm, p);
|
||||||
mm_init_pasid(mm);
|
mm_init_pasid(mm);
|
||||||
RCU_INIT_POINTER(mm->exe_file, NULL);
|
RCU_INIT_POINTER(mm->exe_file, NULL);
|
||||||
mmu_notifier_subscriptions_init(mm);
|
if (!mmu_notifier_subscriptions_init(mm))
|
||||||
|
goto fail_nopgd;
|
||||||
init_tlb_flush_pending(mm);
|
init_tlb_flush_pending(mm);
|
||||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
||||||
mm->pmd_huge_pte = NULL;
|
mm->pmd_huge_pte = NULL;
|
||||||
|
@@ -2607,8 +2607,10 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
|
|||||||
* flagged for rescheduling. Only call schedule if there
|
* flagged for rescheduling. Only call schedule if there
|
||||||
* is no timeout, or if it has yet to expire.
|
* is no timeout, or if it has yet to expire.
|
||||||
*/
|
*/
|
||||||
if (!timeout || timeout->task)
|
if (!timeout || timeout->task) {
|
||||||
|
trace_android_vh_futex_sleep_start(current);
|
||||||
freezable_schedule();
|
freezable_schedule();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
}
|
}
|
||||||
|
@@ -7,6 +7,7 @@
|
|||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/sched/task.h>
|
#include <linux/sched/task.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
|
|
||||||
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
|
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
|
||||||
@@ -268,3 +269,34 @@ void percpu_up_write(struct percpu_rw_semaphore *sem)
|
|||||||
rcu_sync_exit(&sem->rss);
|
rcu_sync_exit(&sem->rss);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(percpu_up_write);
|
EXPORT_SYMBOL_GPL(percpu_up_write);
|
||||||
|
|
||||||
|
static LIST_HEAD(destroy_list);
|
||||||
|
static DEFINE_SPINLOCK(destroy_list_lock);
|
||||||
|
|
||||||
|
static void destroy_list_workfn(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct percpu_rw_semaphore_atomic *sem, *sem2;
|
||||||
|
LIST_HEAD(to_destroy);
|
||||||
|
|
||||||
|
spin_lock(&destroy_list_lock);
|
||||||
|
list_splice_init(&destroy_list, &to_destroy);
|
||||||
|
spin_unlock(&destroy_list_lock);
|
||||||
|
|
||||||
|
if (list_empty(&to_destroy))
|
||||||
|
return;
|
||||||
|
|
||||||
|
list_for_each_entry_safe(sem, sem2, &to_destroy, destroy_list_entry) {
|
||||||
|
percpu_free_rwsem(&sem->rw_sem);
|
||||||
|
kfree(sem);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static DECLARE_WORK(destroy_list_work, destroy_list_workfn);
|
||||||
|
|
||||||
|
void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem)
|
||||||
|
{
|
||||||
|
spin_lock(&destroy_list_lock);
|
||||||
|
list_add_tail(&sem->destroy_list_entry, &destroy_list);
|
||||||
|
spin_unlock(&destroy_list_lock);
|
||||||
|
schedule_work(&destroy_list_work);
|
||||||
|
}
|
||||||
|
@@ -227,6 +227,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
|
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
|
||||||
|
|
||||||
|
void __wake_up_pollfree(struct wait_queue_head *wq_head)
|
||||||
|
{
|
||||||
|
__wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
|
||||||
|
/* POLLFREE must have cleared the queue. */
|
||||||
|
WARN_ON_ONCE(waitqueue_active(wq_head));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: we use "set_current_state()" _after_ the wait-queue add,
|
* Note: we use "set_current_state()" _after_ the wait-queue add,
|
||||||
* because we need a memory barrier there on SMP, so that any
|
* because we need a memory barrier there on SMP, so that any
|
||||||
|
83
mm/gup.c
83
mm/gup.c
@@ -948,6 +948,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
|
|||||||
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
|
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
|
if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (write) {
|
if (write) {
|
||||||
if (!(vm_flags & VM_WRITE)) {
|
if (!(vm_flags & VM_WRITE)) {
|
||||||
if (!(gup_flags & FOLL_FORCE))
|
if (!(gup_flags & FOLL_FORCE))
|
||||||
@@ -1085,10 +1088,14 @@ static long __get_user_pages(struct mm_struct *mm,
|
|||||||
goto next_page;
|
goto next_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!vma || check_vma_flags(vma, gup_flags)) {
|
if (!vma) {
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
ret = check_vma_flags(vma, gup_flags);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (is_vm_hugetlb_page(vma)) {
|
if (is_vm_hugetlb_page(vma)) {
|
||||||
i = follow_hugetlb_page(mm, vma, pages, vmas,
|
i = follow_hugetlb_page(mm, vma, pages, vmas,
|
||||||
&start, &nr_pages, i,
|
&start, &nr_pages, i,
|
||||||
@@ -1592,26 +1599,6 @@ struct page *get_dump_page(unsigned long addr)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_ELF_CORE */
|
#endif /* CONFIG_ELF_CORE */
|
||||||
|
|
||||||
#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
|
|
||||||
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
|
|
||||||
{
|
|
||||||
long i;
|
|
||||||
struct vm_area_struct *vma_prev = NULL;
|
|
||||||
|
|
||||||
for (i = 0; i < nr_pages; i++) {
|
|
||||||
struct vm_area_struct *vma = vmas[i];
|
|
||||||
|
|
||||||
if (vma == vma_prev)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
vma_prev = vma;
|
|
||||||
|
|
||||||
if (vma_is_fsdax(vma))
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_CMA
|
#ifdef CONFIG_CMA
|
||||||
static long check_and_migrate_cma_pages(struct mm_struct *mm,
|
static long check_and_migrate_cma_pages(struct mm_struct *mm,
|
||||||
unsigned long start,
|
unsigned long start,
|
||||||
@@ -1730,63 +1717,23 @@ static long __gup_longterm_locked(struct mm_struct *mm,
|
|||||||
struct vm_area_struct **vmas,
|
struct vm_area_struct **vmas,
|
||||||
unsigned int gup_flags)
|
unsigned int gup_flags)
|
||||||
{
|
{
|
||||||
struct vm_area_struct **vmas_tmp = vmas;
|
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
long rc, i;
|
long rc;
|
||||||
|
|
||||||
if (gup_flags & FOLL_LONGTERM) {
|
if (gup_flags & FOLL_LONGTERM)
|
||||||
if (!pages)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (!vmas_tmp) {
|
|
||||||
vmas_tmp = kcalloc(nr_pages,
|
|
||||||
sizeof(struct vm_area_struct *),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!vmas_tmp)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
flags = memalloc_nocma_save();
|
flags = memalloc_nocma_save();
|
||||||
}
|
|
||||||
|
|
||||||
rc = __get_user_pages_locked(mm, start, nr_pages, pages,
|
rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL,
|
||||||
vmas_tmp, NULL, gup_flags);
|
gup_flags);
|
||||||
|
|
||||||
if (gup_flags & FOLL_LONGTERM) {
|
if (gup_flags & FOLL_LONGTERM) {
|
||||||
if (rc < 0)
|
if (rc > 0)
|
||||||
goto out;
|
rc = check_and_migrate_cma_pages(mm, start, rc, pages,
|
||||||
|
vmas, gup_flags);
|
||||||
if (check_dax_vmas(vmas_tmp, rc)) {
|
|
||||||
if (gup_flags & FOLL_PIN)
|
|
||||||
unpin_user_pages(pages, rc);
|
|
||||||
else
|
|
||||||
for (i = 0; i < rc; i++)
|
|
||||||
put_page(pages[i]);
|
|
||||||
rc = -EOPNOTSUPP;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
rc = check_and_migrate_cma_pages(mm, start, rc, pages,
|
|
||||||
vmas_tmp, gup_flags);
|
|
||||||
out:
|
|
||||||
memalloc_nocma_restore(flags);
|
memalloc_nocma_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vmas_tmp != vmas)
|
|
||||||
kfree(vmas_tmp);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_FS_DAX && !CONFIG_CMA */
|
|
||||||
static __always_inline long __gup_longterm_locked(struct mm_struct *mm,
|
|
||||||
unsigned long start,
|
|
||||||
unsigned long nr_pages,
|
|
||||||
struct page **pages,
|
|
||||||
struct vm_area_struct **vmas,
|
|
||||||
unsigned int flags)
|
|
||||||
{
|
|
||||||
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
|
|
||||||
NULL, flags);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_FS_DAX || CONFIG_CMA */
|
|
||||||
|
|
||||||
static bool is_valid_gup_flags(unsigned int gup_flags)
|
static bool is_valid_gup_flags(unsigned int gup_flags)
|
||||||
{
|
{
|
||||||
|
15
mm/memory.c
15
mm/memory.c
@@ -4717,8 +4717,19 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
if (vmf->flags & FAULT_FLAG_WRITE) {
|
if (vmf->flags & FAULT_FLAG_WRITE) {
|
||||||
if (!pte_write(entry))
|
if (!pte_write(entry)) {
|
||||||
return do_wp_page(vmf);
|
if (!(vmf->flags & FAULT_FLAG_SPECULATIVE))
|
||||||
|
return do_wp_page(vmf);
|
||||||
|
|
||||||
|
if (!mmu_notifier_trylock(vmf->vma->vm_mm)) {
|
||||||
|
ret = VM_FAULT_RETRY;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = do_wp_page(vmf);
|
||||||
|
mmu_notifier_unlock(vmf->vma->vm_mm);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
entry = pte_mkdirty(entry);
|
entry = pte_mkdirty(entry);
|
||||||
}
|
}
|
||||||
entry = pte_mkyoung(entry);
|
entry = pte_mkyoung(entry);
|
||||||
|
@@ -1136,9 +1136,6 @@ int add_memory_subsection(int nid, u64 start, u64 size)
|
|||||||
struct resource *res;
|
struct resource *res;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (size == memory_block_size_bytes())
|
|
||||||
return add_memory(nid, start, size, MHP_NONE);
|
|
||||||
|
|
||||||
if (!IS_ALIGNED(start, SUBSECTION_SIZE) ||
|
if (!IS_ALIGNED(start, SUBSECTION_SIZE) ||
|
||||||
!IS_ALIGNED(size, SUBSECTION_SIZE)) {
|
!IS_ALIGNED(size, SUBSECTION_SIZE)) {
|
||||||
pr_err("%s: start 0x%llx size 0x%llx not aligned to subsection size\n",
|
pr_err("%s: start 0x%llx size 0x%llx not aligned to subsection size\n",
|
||||||
@@ -1837,9 +1834,6 @@ EXPORT_SYMBOL_GPL(remove_memory);
|
|||||||
|
|
||||||
int remove_memory_subsection(int nid, u64 start, u64 size)
|
int remove_memory_subsection(int nid, u64 start, u64 size)
|
||||||
{
|
{
|
||||||
if (size == memory_block_size_bytes())
|
|
||||||
return remove_memory(nid, start, size);
|
|
||||||
|
|
||||||
if (!IS_ALIGNED(start, SUBSECTION_SIZE) ||
|
if (!IS_ALIGNED(start, SUBSECTION_SIZE) ||
|
||||||
!IS_ALIGNED(size, SUBSECTION_SIZE)) {
|
!IS_ALIGNED(size, SUBSECTION_SIZE)) {
|
||||||
pr_err("%s: start 0x%llx size 0x%llx not aligned to subsection size\n",
|
pr_err("%s: start 0x%llx size 0x%llx not aligned to subsection size\n",
|
||||||
|
@@ -35,6 +35,12 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
|
|||||||
* in mmdrop().
|
* in mmdrop().
|
||||||
*/
|
*/
|
||||||
struct mmu_notifier_subscriptions {
|
struct mmu_notifier_subscriptions {
|
||||||
|
/*
|
||||||
|
* WARNING: hdr should be the first member of this structure
|
||||||
|
* so that it can be typecasted into mmu_notifier_subscriptions_hdr.
|
||||||
|
* This is required to avoid KMI CRC breakage.
|
||||||
|
*/
|
||||||
|
struct mmu_notifier_subscriptions_hdr hdr;
|
||||||
/* all mmu notifiers registered in this mm are queued in this list */
|
/* all mmu notifiers registered in this mm are queued in this list */
|
||||||
struct hlist_head list;
|
struct hlist_head list;
|
||||||
bool has_itree;
|
bool has_itree;
|
||||||
@@ -621,6 +627,37 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
|
|||||||
srcu_read_unlock(&srcu, id);
|
srcu_read_unlock(&srcu, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
|
|
||||||
|
static inline void mmu_notifier_write_lock(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
percpu_down_write(
|
||||||
|
&mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void mmu_notifier_write_unlock(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
percpu_up_write(
|
||||||
|
&mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||||
|
|
||||||
|
static inline void mmu_notifier_write_lock(struct mm_struct *mm) {}
|
||||||
|
static inline void mmu_notifier_write_unlock(struct mm_struct *mm) {}
|
||||||
|
|
||||||
|
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||||
|
|
||||||
|
static void init_subscriptions(struct mmu_notifier_subscriptions *subscriptions)
|
||||||
|
{
|
||||||
|
INIT_HLIST_HEAD(&subscriptions->list);
|
||||||
|
spin_lock_init(&subscriptions->lock);
|
||||||
|
subscriptions->invalidate_seq = 2;
|
||||||
|
subscriptions->itree = RB_ROOT_CACHED;
|
||||||
|
init_waitqueue_head(&subscriptions->wq);
|
||||||
|
INIT_HLIST_HEAD(&subscriptions->deferred_list);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Same as mmu_notifier_register but here the caller must hold the mmap_lock in
|
* Same as mmu_notifier_register but here the caller must hold the mmap_lock in
|
||||||
* write mode. A NULL mn signals the notifier is being registered for itree
|
* write mode. A NULL mn signals the notifier is being registered for itree
|
||||||
@@ -653,17 +690,16 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
|
|||||||
if (!subscriptions)
|
if (!subscriptions)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
INIT_HLIST_HEAD(&subscriptions->list);
|
init_subscriptions(subscriptions);
|
||||||
spin_lock_init(&subscriptions->lock);
|
|
||||||
subscriptions->invalidate_seq = 2;
|
|
||||||
subscriptions->itree = RB_ROOT_CACHED;
|
|
||||||
init_waitqueue_head(&subscriptions->wq);
|
|
||||||
INIT_HLIST_HEAD(&subscriptions->deferred_list);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mmu_notifier_write_lock(mm);
|
||||||
|
|
||||||
ret = mm_take_all_locks(mm);
|
ret = mm_take_all_locks(mm);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret)) {
|
||||||
|
mmu_notifier_write_unlock(mm);
|
||||||
goto out_clean;
|
goto out_clean;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Serialize the update against mmu_notifier_unregister. A
|
* Serialize the update against mmu_notifier_unregister. A
|
||||||
@@ -683,6 +719,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
|
|||||||
*/
|
*/
|
||||||
if (subscriptions)
|
if (subscriptions)
|
||||||
smp_store_release(&mm->notifier_subscriptions, subscriptions);
|
smp_store_release(&mm->notifier_subscriptions, subscriptions);
|
||||||
|
mm->notifier_subscriptions->hdr.valid = true;
|
||||||
|
|
||||||
if (subscription) {
|
if (subscription) {
|
||||||
/* Pairs with the mmdrop in mmu_notifier_unregister_* */
|
/* Pairs with the mmdrop in mmu_notifier_unregister_* */
|
||||||
@@ -698,6 +735,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
|
|||||||
mm->notifier_subscriptions->has_itree = true;
|
mm->notifier_subscriptions->has_itree = true;
|
||||||
|
|
||||||
mm_drop_all_locks(mm);
|
mm_drop_all_locks(mm);
|
||||||
|
mmu_notifier_write_unlock(mm);
|
||||||
BUG_ON(atomic_read(&mm->mm_users) <= 0);
|
BUG_ON(atomic_read(&mm->mm_users) <= 0);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@@ -1125,3 +1163,41 @@ mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
|
|||||||
return range->vma->vm_flags & VM_READ;
|
return range->vma->vm_flags & VM_READ;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
|
EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||||
|
|
||||||
|
bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
struct mmu_notifier_subscriptions *subscriptions;
|
||||||
|
struct percpu_rw_semaphore_atomic *sem;
|
||||||
|
|
||||||
|
subscriptions = kzalloc(
|
||||||
|
sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
|
||||||
|
if (!subscriptions)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
sem = kzalloc(sizeof(struct percpu_rw_semaphore_atomic), GFP_KERNEL);
|
||||||
|
if (!sem) {
|
||||||
|
kfree(subscriptions);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
percpu_init_rwsem(&sem->rw_sem);
|
||||||
|
|
||||||
|
init_subscriptions(subscriptions);
|
||||||
|
subscriptions->has_itree = true;
|
||||||
|
subscriptions->hdr.valid = false;
|
||||||
|
subscriptions->hdr.mmu_notifier_lock = sem;
|
||||||
|
mm->notifier_subscriptions = subscriptions;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
percpu_rwsem_async_destroy(
|
||||||
|
mm->notifier_subscriptions->hdr.mmu_notifier_lock);
|
||||||
|
kfree(mm->notifier_subscriptions);
|
||||||
|
mm->notifier_subscriptions = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||||
|
73
mm/slub.c
73
mm/slub.c
@@ -433,6 +433,18 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
|
|||||||
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
|
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
|
||||||
static DEFINE_SPINLOCK(object_map_lock);
|
static DEFINE_SPINLOCK(object_map_lock);
|
||||||
|
|
||||||
|
static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
|
||||||
|
struct page *page)
|
||||||
|
{
|
||||||
|
void *addr = page_address(page);
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
bitmap_zero(obj_map, page->objects);
|
||||||
|
|
||||||
|
for (p = page->freelist; p; p = get_freepointer(s, p))
|
||||||
|
set_bit(__obj_to_index(s, addr, p), obj_map);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine a map of object in use on a page.
|
* Determine a map of object in use on a page.
|
||||||
*
|
*
|
||||||
@@ -442,17 +454,11 @@ static DEFINE_SPINLOCK(object_map_lock);
|
|||||||
static unsigned long *get_map(struct kmem_cache *s, struct page *page)
|
static unsigned long *get_map(struct kmem_cache *s, struct page *page)
|
||||||
__acquires(&object_map_lock)
|
__acquires(&object_map_lock)
|
||||||
{
|
{
|
||||||
void *p;
|
|
||||||
void *addr = page_address(page);
|
|
||||||
|
|
||||||
VM_BUG_ON(!irqs_disabled());
|
VM_BUG_ON(!irqs_disabled());
|
||||||
|
|
||||||
spin_lock(&object_map_lock);
|
spin_lock(&object_map_lock);
|
||||||
|
|
||||||
bitmap_zero(object_map, page->objects);
|
__fill_map(object_map, s, page);
|
||||||
|
|
||||||
for (p = page->freelist; p; p = get_freepointer(s, p))
|
|
||||||
set_bit(__obj_to_index(s, addr, p), object_map);
|
|
||||||
|
|
||||||
return object_map;
|
return object_map;
|
||||||
}
|
}
|
||||||
@@ -1597,7 +1603,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
||||||
void **head, void **tail)
|
void **head, void **tail,
|
||||||
|
int *cnt)
|
||||||
{
|
{
|
||||||
|
|
||||||
void *object;
|
void *object;
|
||||||
@@ -1624,6 +1631,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
|||||||
*head = object;
|
*head = object;
|
||||||
if (!*tail)
|
if (!*tail)
|
||||||
*tail = object;
|
*tail = object;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Adjust the reconstructed freelist depth
|
||||||
|
* accordingly if object's reuse is delayed.
|
||||||
|
*/
|
||||||
|
--(*cnt);
|
||||||
}
|
}
|
||||||
} while (object != old_tail);
|
} while (object != old_tail);
|
||||||
|
|
||||||
@@ -3148,7 +3161,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
|
|||||||
struct kmem_cache_cpu *c;
|
struct kmem_cache_cpu *c;
|
||||||
unsigned long tid;
|
unsigned long tid;
|
||||||
|
|
||||||
memcg_slab_free_hook(s, &head, 1);
|
/* memcg_slab_free_hook() is already called for bulk free. */
|
||||||
|
if (!tail)
|
||||||
|
memcg_slab_free_hook(s, &head, 1);
|
||||||
redo:
|
redo:
|
||||||
/*
|
/*
|
||||||
* Determine the currently cpus per cpu slab.
|
* Determine the currently cpus per cpu slab.
|
||||||
@@ -3192,7 +3207,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
|
|||||||
* With KASAN enabled slab_free_freelist_hook modifies the freelist
|
* With KASAN enabled slab_free_freelist_hook modifies the freelist
|
||||||
* to remove objects, whose reuse must be delayed.
|
* to remove objects, whose reuse must be delayed.
|
||||||
*/
|
*/
|
||||||
if (slab_free_freelist_hook(s, &head, &tail))
|
if (slab_free_freelist_hook(s, &head, &tail, &cnt))
|
||||||
do_slab_free(s, page, head, tail, cnt, addr);
|
do_slab_free(s, page, head, tail, cnt, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3888,8 +3903,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
|
|||||||
if (alloc_kmem_cache_cpus(s))
|
if (alloc_kmem_cache_cpus(s))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_kmem_cache_nodes(s);
|
|
||||||
error:
|
error:
|
||||||
|
__kmem_cache_release(s);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4501,13 +4516,15 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err = sysfs_slab_add(s);
|
err = sysfs_slab_add(s);
|
||||||
if (err)
|
if (err) {
|
||||||
__kmem_cache_release(s);
|
__kmem_cache_release(s);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
if (s->flags & SLAB_STORE_USER)
|
if (s->flags & SLAB_STORE_USER)
|
||||||
debugfs_slab_add(s);
|
debugfs_slab_add(s);
|
||||||
|
|
||||||
return err;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
||||||
@@ -4778,17 +4795,17 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void process_slab(struct loc_track *t, struct kmem_cache *s,
|
static void process_slab(struct loc_track *t, struct kmem_cache *s,
|
||||||
struct page *page, enum track_item alloc)
|
struct page *page, enum track_item alloc,
|
||||||
|
unsigned long *obj_map)
|
||||||
{
|
{
|
||||||
void *addr = page_address(page);
|
void *addr = page_address(page);
|
||||||
void *p;
|
void *p;
|
||||||
unsigned long *map;
|
|
||||||
|
|
||||||
map = get_map(s, page);
|
__fill_map(obj_map, s, page);
|
||||||
|
|
||||||
for_each_object(p, s, addr, page->objects)
|
for_each_object(p, s, addr, page->objects)
|
||||||
if (!test_bit(__obj_to_index(s, addr, p), map))
|
if (!test_bit(__obj_to_index(s, addr, p), obj_map))
|
||||||
add_location(t, s, get_track(s, p, alloc));
|
add_location(t, s, get_track(s, p, alloc));
|
||||||
put_map(map);
|
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_DEBUG_FS */
|
#endif /* CONFIG_DEBUG_FS */
|
||||||
#endif /* CONFIG_SLUB_DEBUG */
|
#endif /* CONFIG_SLUB_DEBUG */
|
||||||
@@ -5783,14 +5800,27 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
|
|||||||
struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
|
struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
|
||||||
sizeof(struct loc_track));
|
sizeof(struct loc_track));
|
||||||
struct kmem_cache *s = file_inode(filep)->i_private;
|
struct kmem_cache *s = file_inode(filep)->i_private;
|
||||||
|
unsigned long *obj_map;
|
||||||
|
|
||||||
|
if (!t)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
|
||||||
|
if (!obj_map) {
|
||||||
|
seq_release_private(inode, filep);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
|
if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
|
||||||
alloc = TRACK_ALLOC;
|
alloc = TRACK_ALLOC;
|
||||||
else
|
else
|
||||||
alloc = TRACK_FREE;
|
alloc = TRACK_FREE;
|
||||||
|
|
||||||
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL))
|
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
|
||||||
|
bitmap_free(obj_map);
|
||||||
|
seq_release_private(inode, filep);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
/* Push back cpu slabs */
|
/* Push back cpu slabs */
|
||||||
flush_all(s);
|
flush_all(s);
|
||||||
@@ -5804,12 +5834,13 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
|
|||||||
|
|
||||||
spin_lock_irqsave(&n->list_lock, flags);
|
spin_lock_irqsave(&n->list_lock, flags);
|
||||||
list_for_each_entry(page, &n->partial, slab_list)
|
list_for_each_entry(page, &n->partial, slab_list)
|
||||||
process_slab(t, s, page, alloc);
|
process_slab(t, s, page, alloc, obj_map);
|
||||||
list_for_each_entry(page, &n->full, slab_list)
|
list_for_each_entry(page, &n->full, slab_list)
|
||||||
process_slab(t, s, page, alloc);
|
process_slab(t, s, page, alloc, obj_map);
|
||||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bitmap_free(obj_map);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -133,6 +133,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
|
|||||||
if (WARN_ON(!dmab))
|
if (WARN_ON(!dmab))
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
|
size = PAGE_ALIGN(size);
|
||||||
dmab->dev.type = type;
|
dmab->dev.type = type;
|
||||||
dmab->dev.dev = device;
|
dmab->dev.dev = device;
|
||||||
dmab->bytes = 0;
|
dmab->bytes = 0;
|
||||||
|
Reference in New Issue
Block a user