Merge "Merge keystone/android12-5.10-keystone-qcom-release.66+ (698fa19
) into msm-5.10"
This commit is contained in:

committed by
Gerrit - the friendly Code Review server

commit
20fbd1abd0
@@ -1 +1 @@
|
||||
672d51b2a7fa01a15c9d55f33884808a115db18d
|
||||
a7ab784f601a93a78c1c22cd0aacc2af64d8e3c8
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -63,6 +63,7 @@
|
||||
bitmap_to_arr32
|
||||
__bitmap_weight
|
||||
blk_alloc_queue
|
||||
blk_check_plugged
|
||||
blk_cleanup_queue
|
||||
blkdev_get_by_dev
|
||||
blkdev_put
|
||||
@@ -686,6 +687,7 @@
|
||||
drm_property_create_range
|
||||
drm_property_lookup_blob
|
||||
drm_property_replace_blob
|
||||
drm_puts
|
||||
__drm_puts_seq_file
|
||||
drm_read
|
||||
drm_rect_clip_scaled
|
||||
@@ -1012,6 +1014,7 @@
|
||||
__kfifo_in
|
||||
__kfifo_init
|
||||
__kfifo_out
|
||||
__kfifo_out_peek
|
||||
kfree
|
||||
kfree_sensitive
|
||||
kfree_skb
|
||||
@@ -1032,6 +1035,7 @@
|
||||
kobject_add
|
||||
kobject_create_and_add
|
||||
kobject_del
|
||||
kobject_get
|
||||
kobject_init
|
||||
kobject_init_and_add
|
||||
kobject_put
|
||||
@@ -1079,6 +1083,7 @@
|
||||
ktime_get_real_seconds
|
||||
ktime_get_real_ts64
|
||||
ktime_get_seconds
|
||||
ktime_get_snapshot
|
||||
ktime_get_ts64
|
||||
ktime_get_with_offset
|
||||
kvfree
|
||||
@@ -1590,6 +1595,7 @@
|
||||
rtc_valid_tm
|
||||
__rt_mutex_init
|
||||
rt_mutex_lock
|
||||
rt_mutex_trylock
|
||||
rt_mutex_unlock
|
||||
rtnl_is_locked
|
||||
rtnl_lock
|
||||
|
@@ -20,6 +20,9 @@
|
||||
bcmp
|
||||
bdput
|
||||
__bitmap_and
|
||||
blocking_notifier_call_chain
|
||||
blocking_notifier_chain_register
|
||||
blocking_notifier_chain_unregister
|
||||
bpf_trace_run1
|
||||
bpf_trace_run2
|
||||
bpf_trace_run3
|
||||
@@ -103,6 +106,7 @@
|
||||
debugfs_create_dir
|
||||
debugfs_create_file
|
||||
debugfs_remove
|
||||
default_llseek
|
||||
deferred_free
|
||||
delayed_work_timer_fn
|
||||
del_timer
|
||||
@@ -161,6 +165,7 @@
|
||||
devm_phy_create
|
||||
devm_phy_get
|
||||
devm_pinctrl_get
|
||||
devm_pinctrl_put
|
||||
devm_platform_ioremap_resource
|
||||
__devm_regmap_init
|
||||
__devm_regmap_init_i2c
|
||||
@@ -202,6 +207,12 @@
|
||||
dma_buf_map_attachment
|
||||
dma_buf_put
|
||||
dma_buf_unmap_attachment
|
||||
dma_fence_context_alloc
|
||||
dma_fence_default_wait
|
||||
dma_fence_init
|
||||
dma_fence_release
|
||||
dma_fence_signal_locked
|
||||
dma_fence_wait_timeout
|
||||
dma_free_attrs
|
||||
dma_heap_add
|
||||
dma_heap_get_dev
|
||||
@@ -369,6 +380,7 @@
|
||||
event_triggers_call
|
||||
extcon_set_state_sync
|
||||
failure_tracking
|
||||
fd_install
|
||||
find_next_bit
|
||||
finish_wait
|
||||
flush_work
|
||||
@@ -399,6 +411,7 @@
|
||||
get_cpu_device
|
||||
get_device
|
||||
get_random_bytes
|
||||
get_unused_fd_flags
|
||||
gic_nonsecure_priorities
|
||||
gpiochip_generic_free
|
||||
gpiochip_generic_request
|
||||
@@ -431,6 +444,7 @@
|
||||
i2c_recover_bus
|
||||
i2c_register_driver
|
||||
i2c_smbus_read_byte_data
|
||||
i2c_smbus_write_byte_data
|
||||
i2c_transfer
|
||||
i2c_transfer_buffer_flags
|
||||
i2c_unregister_device
|
||||
@@ -511,6 +525,7 @@
|
||||
__list_add_valid
|
||||
__list_del_entry_valid
|
||||
__local_bh_enable_ip
|
||||
__lock_page
|
||||
__log_post_read_mmio
|
||||
__log_read_mmio
|
||||
__log_write_mmio
|
||||
@@ -532,6 +547,7 @@
|
||||
__memcpy_fromio
|
||||
memdup_user
|
||||
memmove
|
||||
memparse
|
||||
memset
|
||||
__memset_io
|
||||
memstart_addr
|
||||
@@ -616,6 +632,7 @@
|
||||
of_get_next_child
|
||||
of_get_parent
|
||||
of_get_property
|
||||
of_get_regulator_init_data
|
||||
of_graph_get_next_endpoint
|
||||
of_graph_get_port_by_id
|
||||
of_graph_get_remote_node
|
||||
@@ -883,6 +900,7 @@
|
||||
__stack_chk_fail
|
||||
__stack_chk_guard
|
||||
strcasecmp
|
||||
strcat
|
||||
strcmp
|
||||
strcpy
|
||||
strlcpy
|
||||
@@ -891,11 +909,14 @@
|
||||
strncpy
|
||||
strrchr
|
||||
strscpy
|
||||
strsep
|
||||
strstr
|
||||
__sw_hweight16
|
||||
__sw_hweight32
|
||||
__sw_hweight64
|
||||
__sw_hweight8
|
||||
sync_file_create
|
||||
sync_file_get_fence
|
||||
synchronize_irq
|
||||
synchronize_net
|
||||
synchronize_rcu
|
||||
@@ -948,6 +969,7 @@
|
||||
uart_update_timeout
|
||||
uart_write_wakeup
|
||||
__udelay
|
||||
unlock_page
|
||||
__unregister_chrdev
|
||||
unregister_chrdev_region
|
||||
unregister_inet6addr_notifier
|
||||
@@ -1061,6 +1083,7 @@
|
||||
vmap
|
||||
vsnprintf
|
||||
vunmap
|
||||
wait_for_completion
|
||||
wait_for_completion_interruptible
|
||||
wait_for_completion_interruptible_timeout
|
||||
wait_for_completion_timeout
|
||||
@@ -1252,9 +1275,6 @@
|
||||
mmc_cqe_request_done
|
||||
|
||||
# required by device_cooling.ko
|
||||
blocking_notifier_call_chain
|
||||
blocking_notifier_chain_register
|
||||
blocking_notifier_chain_unregister
|
||||
thermal_cooling_device_unregister
|
||||
thermal_of_cooling_device_register
|
||||
|
||||
@@ -1286,6 +1306,29 @@
|
||||
devm_phy_optional_get
|
||||
drm_of_encoder_active_endpoint
|
||||
|
||||
# required by fb.ko
|
||||
__arch_copy_in_user
|
||||
compat_alloc_user_space
|
||||
console_lock
|
||||
console_unlock
|
||||
fb_mode_option
|
||||
file_update_time
|
||||
file_write_and_wait_range
|
||||
ignore_console_lock_warning
|
||||
int_sqrt
|
||||
is_console_locked
|
||||
__memcpy_toio
|
||||
of_get_videomode
|
||||
page_mkclean
|
||||
proc_create_seq_private
|
||||
simple_strtol
|
||||
vm_get_page_prot
|
||||
vm_iomap_memory
|
||||
|
||||
# required by fb_fence.ko
|
||||
put_unused_fd
|
||||
system_unbound_wq
|
||||
|
||||
# required by fec.ko
|
||||
ethtool_op_get_ts_info
|
||||
mdiobus_alloc_size
|
||||
@@ -1332,6 +1375,12 @@
|
||||
tso_count_descs
|
||||
tso_start
|
||||
|
||||
# required by fp9931-core.ko
|
||||
devm_mfd_add_devices
|
||||
|
||||
# required by fp9931-regulator.ko
|
||||
gpiod_get_raw_value
|
||||
|
||||
# required by fsl-edma-v3.ko
|
||||
dma_get_slave_channel
|
||||
of_dma_controller_free
|
||||
@@ -1358,13 +1407,7 @@
|
||||
dev_pm_opp_add
|
||||
dev_pm_opp_remove
|
||||
dma_fence_array_ops
|
||||
dma_fence_context_alloc
|
||||
dma_fence_default_wait
|
||||
dma_fence_init
|
||||
dma_fence_release
|
||||
dma_fence_signal
|
||||
dma_fence_signal_locked
|
||||
dma_fence_wait_timeout
|
||||
down
|
||||
driver_create_file
|
||||
driver_remove_file
|
||||
@@ -1372,10 +1415,8 @@
|
||||
drm_gem_object_lookup
|
||||
drm_gem_object_release
|
||||
drm_gem_private_object_init
|
||||
fd_install
|
||||
find_vma
|
||||
find_vpid
|
||||
get_unused_fd_flags
|
||||
get_user_pages
|
||||
hrtimer_resolution
|
||||
iommu_attach_device
|
||||
@@ -1392,24 +1433,18 @@
|
||||
platform_bus_type
|
||||
reset_control_reset
|
||||
schedule_hrtimeout
|
||||
sync_file_create
|
||||
sync_file_get_fence
|
||||
__task_pid_nr_ns
|
||||
_totalram_pages
|
||||
vm_mmap
|
||||
vm_munmap
|
||||
vm_zone_stat
|
||||
|
||||
# required by gmsl-max9286.ko
|
||||
i2c_smbus_write_byte_data
|
||||
|
||||
# required by goodix.ko
|
||||
gpiod_direction_input
|
||||
input_alloc_absinfo
|
||||
input_mt_sync_frame
|
||||
touchscreen_parse_properties
|
||||
touchscreen_report_pos
|
||||
wait_for_completion
|
||||
|
||||
# required by gpio-imx-rpmsg.ko
|
||||
__irq_alloc_descs
|
||||
@@ -1444,7 +1479,6 @@
|
||||
# required by gpio-regulator.ko
|
||||
devm_kstrdup
|
||||
gpiod_count
|
||||
of_get_regulator_init_data
|
||||
|
||||
# required by gpio-reset.ko
|
||||
reset_controller_register
|
||||
@@ -1762,11 +1796,9 @@
|
||||
sdio_writeb
|
||||
sdio_writesb
|
||||
skb_realloc_headroom
|
||||
strcat
|
||||
strchr
|
||||
strim
|
||||
strncasecmp
|
||||
strsep
|
||||
vprintk
|
||||
wakeup_source_add
|
||||
wakeup_source_remove
|
||||
@@ -1831,6 +1863,9 @@
|
||||
v4l2_m2m_dqbuf
|
||||
v4l2_m2m_qbuf
|
||||
|
||||
# required by mxc_epdc_v2_fb.ko
|
||||
fb_get_options
|
||||
|
||||
# required by mxs-dma.ko
|
||||
dmaenginem_async_device_register
|
||||
tasklet_setup
|
||||
@@ -1868,9 +1903,6 @@
|
||||
v4l2_event_subdev_unsubscribe
|
||||
__v4l2_find_nearest_size
|
||||
|
||||
# required by ov5640_camera_mipi_v2.ko
|
||||
devm_pinctrl_put
|
||||
|
||||
# required by panel-raydium-rm67191.ko
|
||||
devm_backlight_device_register
|
||||
mipi_dsi_dcs_get_display_brightness
|
||||
@@ -2125,7 +2157,6 @@
|
||||
snd_interval_refine
|
||||
|
||||
# required by snd-soc-imx-audmux.ko
|
||||
default_llseek
|
||||
simple_open
|
||||
simple_read_from_buffer
|
||||
|
||||
@@ -2339,7 +2370,6 @@
|
||||
fsync_bdev
|
||||
__get_free_pages
|
||||
__init_rwsem
|
||||
memparse
|
||||
memset64
|
||||
__num_online_cpus
|
||||
page_endio
|
||||
@@ -2362,7 +2392,6 @@
|
||||
kern_mount
|
||||
kern_unmount
|
||||
kill_anon_super
|
||||
__lock_page
|
||||
page_mapping
|
||||
_raw_read_lock
|
||||
_raw_read_unlock
|
||||
@@ -2370,5 +2399,4 @@
|
||||
_raw_write_unlock
|
||||
register_shrinker
|
||||
__SetPageMovable
|
||||
unlock_page
|
||||
unregister_shrinker
|
||||
|
@@ -3327,6 +3327,7 @@
|
||||
woken_wake_function
|
||||
work_busy
|
||||
work_on_cpu
|
||||
wq_worker_comm
|
||||
ww_mutex_lock
|
||||
ww_mutex_unlock
|
||||
__xa_alloc
|
||||
|
@@ -899,6 +899,7 @@
|
||||
finish_wait
|
||||
firmware_request_nowarn
|
||||
flush_dcache_page
|
||||
flush_delayed_fput
|
||||
flush_delayed_work
|
||||
__flush_icache_range
|
||||
flush_work
|
||||
@@ -1104,6 +1105,7 @@
|
||||
idr_replace
|
||||
iio_channel_get_all
|
||||
iio_read_channel_processed
|
||||
iio_write_channel_raw
|
||||
import_iovec
|
||||
in4_pton
|
||||
in6_pton
|
||||
@@ -2613,6 +2615,7 @@
|
||||
__tracepoint_android_rvh_cpu_cgroup_online
|
||||
__tracepoint_android_rvh_cpufreq_transition
|
||||
__tracepoint_android_rvh_dequeue_task
|
||||
__tracepoint_android_rvh_do_ptrauth_fault
|
||||
__tracepoint_android_rvh_do_sched_yield
|
||||
__tracepoint_android_rvh_enqueue_task
|
||||
__tracepoint_android_rvh_find_busiest_queue
|
||||
@@ -2696,6 +2699,7 @@
|
||||
__tracepoint_android_vh_show_max_freq
|
||||
__tracepoint_android_vh_show_resume_epoch_val
|
||||
__tracepoint_android_vh_show_suspend_epoch_val
|
||||
__tracepoint_android_vh_subpage_dma_contig_alloc
|
||||
__tracepoint_android_vh_timer_calc_index
|
||||
__tracepoint_android_vh_ufs_check_int_errors
|
||||
__tracepoint_android_vh_ufs_clock_scaling
|
||||
|
@@ -4587,23 +4587,20 @@ static int binder_thread_release(struct binder_proc *proc,
|
||||
__release(&t->lock);
|
||||
|
||||
/*
|
||||
* If this thread used poll, make sure we remove the waitqueue
|
||||
* from any epoll data structures holding it with POLLFREE.
|
||||
* waitqueue_active() is safe to use here because we're holding
|
||||
* the inner lock.
|
||||
* If this thread used poll, make sure we remove the waitqueue from any
|
||||
* poll data structures holding it.
|
||||
*/
|
||||
if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
|
||||
waitqueue_active(&thread->wait)) {
|
||||
wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
|
||||
}
|
||||
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
||||
wake_up_pollfree(&thread->wait);
|
||||
|
||||
binder_inner_proc_unlock(thread->proc);
|
||||
|
||||
/*
|
||||
* This is needed to avoid races between wake_up_poll() above and
|
||||
* and ep_remove_waitqueue() called for other reasons (eg the epoll file
|
||||
* descriptor being closed); ep_remove_waitqueue() holds an RCU read
|
||||
* lock, so we can be sure it's done after calling synchronize_rcu().
|
||||
* This is needed to avoid races between wake_up_pollfree() above and
|
||||
* someone else removing the last entry from the queue for other reasons
|
||||
* (e.g. ep_remove_wait_queue() being called due to an epoll file
|
||||
* descriptor being closed). Such other users hold an RCU read lock, so
|
||||
* we can be sure they're done after we call synchronize_rcu().
|
||||
*/
|
||||
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
||||
synchronize_rcu();
|
||||
|
@@ -110,6 +110,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_finished);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_rwsem_list_add);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_futex_plist_add);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_futex_sleep_start);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start);
|
||||
@@ -400,3 +401,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_offline);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_css_online);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_free);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mem_cgroup_alloc);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_subpage_dma_contig_alloc);
|
||||
|
@@ -3469,6 +3469,14 @@ static int __clk_core_init(struct clk_core *core)
|
||||
|
||||
clk_prepare_lock();
|
||||
|
||||
/*
|
||||
* Set hw->core after grabbing the prepare_lock to synchronize with
|
||||
* callers of clk_core_fill_parent_index() where we treat hw->core
|
||||
* being NULL as the clk not being registered yet. This is crucial so
|
||||
* that clks aren't parented until their parent is fully registered.
|
||||
*/
|
||||
core->hw->core = core;
|
||||
|
||||
ret = clk_pm_runtime_get(core);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
@@ -3640,8 +3648,10 @@ static int __clk_core_init(struct clk_core *core)
|
||||
out:
|
||||
clk_pm_runtime_put(core);
|
||||
unlock:
|
||||
if (ret)
|
||||
if (ret) {
|
||||
hlist_del_init(&core->child_node);
|
||||
core->hw->core = NULL;
|
||||
}
|
||||
|
||||
clk_prepare_unlock();
|
||||
|
||||
@@ -3905,7 +3915,6 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
|
||||
core->num_parents = init->num_parents;
|
||||
core->min_rate = 0;
|
||||
core->max_rate = ULONG_MAX;
|
||||
hw->core = core;
|
||||
|
||||
ret = clk_core_populate_parent_map(core, init);
|
||||
if (ret)
|
||||
@@ -3923,7 +3932,7 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
|
||||
goto fail_create_clk;
|
||||
}
|
||||
|
||||
clk_core_link_consumer(hw->core, hw->clk);
|
||||
clk_core_link_consumer(core, hw->clk);
|
||||
|
||||
ret = __clk_core_init(core);
|
||||
if (!ret)
|
||||
|
@@ -350,29 +350,18 @@ out:
|
||||
|
||||
static long _dma_buf_set_name(struct dma_buf *dmabuf, const char *name)
|
||||
{
|
||||
long ret = 0;
|
||||
|
||||
dma_resv_lock(dmabuf->resv, NULL);
|
||||
if (!list_empty(&dmabuf->attachments)) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
spin_lock(&dmabuf->name_lock);
|
||||
kfree(dmabuf->name);
|
||||
dmabuf->name = name;
|
||||
spin_unlock(&dmabuf->name_lock);
|
||||
|
||||
out_unlock:
|
||||
dma_resv_unlock(dmabuf->resv);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
|
||||
* The name of the dma-buf buffer can only be set when the dma-buf is not
|
||||
* attached to any devices. It could theoritically support changing the
|
||||
* name of the dma-buf if the same piece of memory is used for multiple
|
||||
* purpose between different devices.
|
||||
* It could support changing the name of the dma-buf if the same piece of
|
||||
* memory is used for multiple purpose between different devices.
|
||||
*
|
||||
* @dmabuf: [in] dmabuf buffer that will be renamed.
|
||||
* @buf: [in] A piece of userspace memory that contains the name of
|
||||
|
@@ -207,14 +207,14 @@ config HID_CHERRY
|
||||
|
||||
config HID_CHICONY
|
||||
tristate "Chicony devices"
|
||||
depends on HID
|
||||
depends on USB_HID
|
||||
default !EXPERT
|
||||
help
|
||||
Support for Chicony Tactical pad and special keys on Chicony keyboards.
|
||||
|
||||
config HID_CORSAIR
|
||||
tristate "Corsair devices"
|
||||
depends on HID && USB && LEDS_CLASS
|
||||
depends on USB_HID && LEDS_CLASS
|
||||
help
|
||||
Support for Corsair devices that are not fully compliant with the
|
||||
HID standard.
|
||||
@@ -245,7 +245,7 @@ config HID_MACALLY
|
||||
|
||||
config HID_PRODIKEYS
|
||||
tristate "Prodikeys PC-MIDI Keyboard support"
|
||||
depends on HID && SND
|
||||
depends on USB_HID && SND
|
||||
select SND_RAWMIDI
|
||||
help
|
||||
Support for Prodikeys PC-MIDI Keyboard device support.
|
||||
@@ -541,7 +541,7 @@ config HID_LENOVO
|
||||
|
||||
config HID_LOGITECH
|
||||
tristate "Logitech devices"
|
||||
depends on HID
|
||||
depends on USB_HID
|
||||
depends on LEDS_CLASS
|
||||
default !EXPERT
|
||||
help
|
||||
@@ -918,7 +918,7 @@ config HID_SAITEK
|
||||
|
||||
config HID_SAMSUNG
|
||||
tristate "Samsung InfraRed remote control or keyboards"
|
||||
depends on HID
|
||||
depends on USB_HID
|
||||
help
|
||||
Support for Samsung InfraRed remote control or keyboards.
|
||||
|
||||
|
@@ -918,8 +918,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
|
||||
drvdata->tp = &asus_i2c_tp;
|
||||
|
||||
if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
|
||||
hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
||||
if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
|
||||
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
|
||||
@@ -947,8 +946,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
drvdata->tp = &asus_t100chi_tp;
|
||||
}
|
||||
|
||||
if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
|
||||
hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
||||
if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) {
|
||||
struct usb_host_interface *alt =
|
||||
to_usb_interface(hdev->dev.parent)->altsetting;
|
||||
|
||||
|
@@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work)
|
||||
struct bigben_device, worker);
|
||||
struct hid_field *report_field = bigben->report->field[0];
|
||||
|
||||
if (bigben->removed)
|
||||
if (bigben->removed || !report_field)
|
||||
return;
|
||||
|
||||
if (bigben->work_led) {
|
||||
|
@@ -58,8 +58,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
||||
static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||
unsigned int *rsize)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
|
||||
struct usb_interface *intf;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return rdesc;
|
||||
|
||||
intf = to_usb_interface(hdev->dev.parent);
|
||||
if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
|
||||
/* Change usage maximum and logical maximum from 0x7fff to
|
||||
* 0x2fff, so they don't exceed HID_MAX_USAGES */
|
||||
|
@@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
|
||||
int ret;
|
||||
unsigned long quirks = id->driver_data;
|
||||
struct corsair_drvdata *drvdata;
|
||||
struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
|
||||
struct usb_interface *usbif;
|
||||
|
||||
if (!hid_is_usb(dev))
|
||||
return -EINVAL;
|
||||
|
||||
usbif = to_usb_interface(dev->dev.parent);
|
||||
|
||||
drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
|
||||
GFP_KERNEL);
|
||||
|
@@ -50,7 +50,7 @@ struct elan_drvdata {
|
||||
|
||||
static int is_not_elan_touchpad(struct hid_device *hdev)
|
||||
{
|
||||
if (hdev->bus == BUS_USB) {
|
||||
if (hid_is_usb(hdev)) {
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
|
||||
return (intf->altsetting->desc.bInterfaceNumber !=
|
||||
|
@@ -229,6 +229,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
struct elo_priv *priv;
|
||||
int ret;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
@@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
|
||||
static int holtek_kbd_probe(struct hid_device *hdev,
|
||||
const struct hid_device_id *id)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
int ret = hid_parse(hdev);
|
||||
struct usb_interface *intf;
|
||||
int ret;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = hid_parse(hdev);
|
||||
if (!ret)
|
||||
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
|
||||
|
||||
intf = to_usb_interface(hdev->dev.parent);
|
||||
if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
|
||||
struct hid_input *hidinput;
|
||||
list_for_each_entry(hidinput, &hdev->inputs, list) {
|
||||
|
@@ -62,6 +62,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||
return rdesc;
|
||||
}
|
||||
|
||||
static int holtek_mouse_probe(struct hid_device *hdev,
|
||||
const struct hid_device_id *id)
|
||||
{
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct hid_device_id holtek_mouse_devices[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
|
||||
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
|
||||
@@ -83,6 +91,7 @@ static struct hid_driver holtek_mouse_driver = {
|
||||
.name = "holtek_mouse",
|
||||
.id_table = holtek_mouse_devices,
|
||||
.report_fixup = holtek_mouse_report_fixup,
|
||||
.probe = holtek_mouse_probe,
|
||||
};
|
||||
|
||||
module_hid_driver(holtek_mouse_driver);
|
||||
|
@@ -769,12 +769,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
|
||||
|
||||
static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
{
|
||||
struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
|
||||
__u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
|
||||
struct usb_interface *iface;
|
||||
__u8 iface_num;
|
||||
unsigned int connect_mask = HID_CONNECT_DEFAULT;
|
||||
struct lg_drv_data *drv_data;
|
||||
int ret;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
iface = to_usb_interface(hdev->dev.parent);
|
||||
iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
|
||||
|
||||
/* G29 only work with the 1st interface */
|
||||
if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
|
||||
(iface_num != 0)) {
|
||||
|
@@ -1693,7 +1693,7 @@ static int logi_dj_probe(struct hid_device *hdev,
|
||||
case recvr_type_27mhz: no_dj_interfaces = 2; break;
|
||||
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
|
||||
}
|
||||
if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
||||
if (hid_is_usb(hdev)) {
|
||||
intf = to_usb_interface(hdev->dev.parent);
|
||||
if (intf && intf->altsetting->desc.bInterfaceNumber >=
|
||||
no_dj_interfaces) {
|
||||
|
@@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
|
||||
static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
{
|
||||
int ret;
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
|
||||
struct usb_interface *intf;
|
||||
unsigned short ifnum;
|
||||
unsigned long quirks = id->driver_data;
|
||||
struct pk_device *pk;
|
||||
struct pcmidi_snd *pm = NULL;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
intf = to_usb_interface(hdev->dev.parent);
|
||||
ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
|
||||
|
||||
pk = kzalloc(sizeof(*pk), GFP_KERNEL);
|
||||
if (pk == NULL) {
|
||||
hid_err(hdev, "can't alloc descriptor\n");
|
||||
|
@@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev,
|
||||
int ret;
|
||||
unsigned int cmask = HID_CONNECT_DEFAULT;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = hid_parse(hdev);
|
||||
if (ret) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@@ -286,7 +286,7 @@ static int u2fzero_probe(struct hid_device *hdev,
|
||||
unsigned int minor;
|
||||
int ret;
|
||||
|
||||
if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
|
||||
|
@@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev,
|
||||
struct uclogic_drvdata *drvdata = NULL;
|
||||
bool params_initialized = false;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* libinput requires the pad interface to be on a different node
|
||||
* than the pen, so use QUIRK_MULTI_INPUT for all tablets.
|
||||
|
@@ -841,8 +841,7 @@ int uclogic_params_init(struct uclogic_params *params,
|
||||
struct uclogic_params p = {0, };
|
||||
|
||||
/* Check arguments */
|
||||
if (params == NULL || hdev == NULL ||
|
||||
!hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
||||
if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) {
|
||||
rc = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
@@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
|
||||
* Skip the query for this type and modify defaults based on
|
||||
* interface number.
|
||||
*/
|
||||
if (features->type == WIRELESS) {
|
||||
if (features->type == WIRELESS && intf) {
|
||||
if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
|
||||
features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
|
||||
else
|
||||
@@ -2217,7 +2217,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
|
||||
if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
|
||||
char *product_name = wacom->hdev->name;
|
||||
|
||||
if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
|
||||
if (hid_is_usb(wacom->hdev)) {
|
||||
struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
product_name = dev->product;
|
||||
@@ -2448,6 +2448,9 @@ static void wacom_wireless_work(struct work_struct *work)
|
||||
|
||||
wacom_destroy_battery(wacom);
|
||||
|
||||
if (!usbdev)
|
||||
return;
|
||||
|
||||
/* Stylus interface */
|
||||
hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
|
||||
wacom1 = hid_get_drvdata(hdev1);
|
||||
@@ -2727,8 +2730,6 @@ static void wacom_mode_change_work(struct work_struct *work)
|
||||
static int wacom_probe(struct hid_device *hdev,
|
||||
const struct hid_device_id *id)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
struct wacom *wacom;
|
||||
struct wacom_wac *wacom_wac;
|
||||
struct wacom_features *features;
|
||||
@@ -2763,8 +2764,14 @@ static int wacom_probe(struct hid_device *hdev,
|
||||
wacom_wac->hid_data.inputmode = -1;
|
||||
wacom_wac->mode_report = -1;
|
||||
|
||||
wacom->usbdev = dev;
|
||||
wacom->intf = intf;
|
||||
if (hid_is_usb(hdev)) {
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
|
||||
wacom->usbdev = dev;
|
||||
wacom->intf = intf;
|
||||
}
|
||||
|
||||
mutex_init(&wacom->lock);
|
||||
INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
|
||||
INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
|
||||
|
@@ -5180,6 +5180,19 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
|
||||
return retval;
|
||||
}
|
||||
|
||||
/* Release the resources allocated for processing a SCSI command. */
|
||||
static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
|
||||
struct ufshcd_lrb *lrbp)
|
||||
{
|
||||
struct scsi_cmnd *cmd = lrbp->cmd;
|
||||
|
||||
scsi_dma_unmap(cmd);
|
||||
ufshcd_crypto_clear_prdt(hba, lrbp);
|
||||
lrbp->cmd = NULL; /* Mark the command as completed. */
|
||||
ufshcd_release(hba);
|
||||
ufshcd_clk_scaling_update_busy(hba);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
|
||||
* @hba: per adapter instance
|
||||
@@ -5190,9 +5203,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
{
|
||||
struct ufshcd_lrb *lrbp;
|
||||
struct scsi_cmnd *cmd;
|
||||
int result;
|
||||
int index;
|
||||
bool update_scaling = false;
|
||||
|
||||
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
|
||||
if (!test_and_clear_bit(index, &hba->outstanding_reqs))
|
||||
@@ -5205,16 +5216,10 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
ufshcd_update_monitor(hba, lrbp);
|
||||
trace_android_vh_ufs_compl_command(hba, lrbp);
|
||||
ufshcd_add_command_trace(hba, index, "complete");
|
||||
result = ufshcd_transfer_rsp_status(hba, lrbp);
|
||||
scsi_dma_unmap(cmd);
|
||||
cmd->result = result;
|
||||
ufshcd_crypto_clear_prdt(hba, lrbp);
|
||||
/* Mark completed command as NULL in LRB */
|
||||
lrbp->cmd = NULL;
|
||||
cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
|
||||
ufshcd_release_scsi_cmd(hba, lrbp);
|
||||
/* Do not touch lrbp after scsi done */
|
||||
cmd->scsi_done(cmd);
|
||||
ufshcd_release(hba);
|
||||
update_scaling = true;
|
||||
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
|
||||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
|
||||
if (hba->dev_cmd.complete) {
|
||||
@@ -5222,11 +5227,9 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
|
||||
ufshcd_add_command_trace(hba, index,
|
||||
"dev_complete");
|
||||
complete(hba->dev_cmd.complete);
|
||||
update_scaling = true;
|
||||
ufshcd_clk_scaling_update_busy(hba);
|
||||
}
|
||||
}
|
||||
if (update_scaling)
|
||||
ufshcd_clk_scaling_update_busy(hba);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6366,9 +6369,8 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
int tag;
|
||||
|
||||
pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
|
||||
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
|
||||
issued = hba->outstanding_tasks & ~pending;
|
||||
for_each_set_bit(tag, &issued, hba->nutmrs) {
|
||||
struct request *req = tmf_rqs[tag];
|
||||
@@ -6529,11 +6531,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
|
||||
err = wait_for_completion_io_timeout(&wait,
|
||||
msecs_to_jiffies(TM_CMD_TIMEOUT));
|
||||
if (!err) {
|
||||
/*
|
||||
* Make sure that ufshcd_compl_tm() does not trigger a
|
||||
* use-after-free.
|
||||
*/
|
||||
req->end_io_data = NULL;
|
||||
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
|
||||
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
|
||||
__func__, tm_function);
|
||||
@@ -6944,6 +6941,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||||
unsigned long flags;
|
||||
unsigned int tag;
|
||||
int err = FAILED, res;
|
||||
bool outstanding;
|
||||
struct ufshcd_lrb *lrbp;
|
||||
u32 reg;
|
||||
|
||||
@@ -7030,6 +7028,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
|
||||
goto release;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the corresponding bit from outstanding_reqs since the command
|
||||
* has been aborted successfully.
|
||||
*/
|
||||
spin_lock_irqsave(host->host_lock, flags);
|
||||
outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
|
||||
spin_unlock_irqrestore(host->host_lock, flags);
|
||||
|
||||
if (outstanding)
|
||||
ufshcd_release_scsi_cmd(hba, lrbp);
|
||||
|
||||
err = SUCCESS;
|
||||
|
||||
release:
|
||||
|
@@ -264,19 +264,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
|
||||
{
|
||||
u32 reg;
|
||||
int retries = 1000;
|
||||
int ret;
|
||||
|
||||
usb_phy_init(dwc->usb2_phy);
|
||||
usb_phy_init(dwc->usb3_phy);
|
||||
ret = phy_init(dwc->usb2_generic_phy);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = phy_init(dwc->usb3_generic_phy);
|
||||
if (ret < 0) {
|
||||
phy_exit(dwc->usb2_generic_phy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're resetting only the device side because, if we're in host mode,
|
||||
@@ -310,9 +297,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
|
||||
udelay(1);
|
||||
} while (--retries);
|
||||
|
||||
phy_exit(dwc->usb3_generic_phy);
|
||||
phy_exit(dwc->usb2_generic_phy);
|
||||
|
||||
return -ETIMEDOUT;
|
||||
|
||||
done:
|
||||
@@ -982,9 +966,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
|
||||
dwc->phys_ready = true;
|
||||
}
|
||||
|
||||
usb_phy_init(dwc->usb2_phy);
|
||||
usb_phy_init(dwc->usb3_phy);
|
||||
ret = phy_init(dwc->usb2_generic_phy);
|
||||
if (ret < 0)
|
||||
goto err0a;
|
||||
|
||||
ret = phy_init(dwc->usb3_generic_phy);
|
||||
if (ret < 0) {
|
||||
phy_exit(dwc->usb2_generic_phy);
|
||||
goto err0a;
|
||||
}
|
||||
|
||||
ret = dwc3_core_soft_reset(dwc);
|
||||
if (ret)
|
||||
goto err0a;
|
||||
goto err1;
|
||||
|
||||
if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
|
||||
!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
|
||||
|
@@ -1724,6 +1724,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
|
||||
struct usb_function *f = NULL;
|
||||
u8 endp;
|
||||
|
||||
if (w_length > USB_COMP_EP0_BUFSIZ) {
|
||||
if (ctrl->bRequestType & USB_DIR_IN) {
|
||||
/* Cast away the const, we are going to overwrite on purpose. */
|
||||
__le16 *temp = (__le16 *)&ctrl->wLength;
|
||||
|
||||
*temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
|
||||
w_length = USB_COMP_EP0_BUFSIZ;
|
||||
} else {
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
/* partial re-init of the response message; the function or the
|
||||
* gadget might need to intercept e.g. a control-OUT completion
|
||||
* when we delegate to it.
|
||||
@@ -2254,7 +2266,7 @@ int composite_dev_prepare(struct usb_composite_driver *composite,
|
||||
if (!cdev->req)
|
||||
return -ENOMEM;
|
||||
|
||||
cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
|
||||
cdev->req->buf = kzalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL);
|
||||
if (!cdev->req->buf)
|
||||
goto fail;
|
||||
|
||||
|
@@ -117,6 +117,7 @@ struct uvc_device {
|
||||
enum uvc_state state;
|
||||
struct usb_function func;
|
||||
struct uvc_video video;
|
||||
bool func_connected;
|
||||
|
||||
/* Descriptors */
|
||||
struct {
|
||||
@@ -147,6 +148,7 @@ static inline struct uvc_device *to_uvc(struct usb_function *f)
|
||||
struct uvc_file_handle {
|
||||
struct v4l2_fh vfh;
|
||||
struct uvc_video *device;
|
||||
bool is_uvc_app_handle;
|
||||
};
|
||||
|
||||
#define to_uvc_file_handle(handle) \
|
||||
|
@@ -227,17 +227,55 @@ static int
|
||||
uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
|
||||
const struct v4l2_event_subscription *sub)
|
||||
{
|
||||
struct uvc_device *uvc = video_get_drvdata(fh->vdev);
|
||||
struct uvc_file_handle *handle = to_uvc_file_handle(fh);
|
||||
int ret;
|
||||
|
||||
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
|
||||
return -EINVAL;
|
||||
|
||||
return v4l2_event_subscribe(fh, sub, 2, NULL);
|
||||
if (sub->type == UVC_EVENT_SETUP && uvc->func_connected)
|
||||
return -EBUSY;
|
||||
|
||||
ret = v4l2_event_subscribe(fh, sub, 2, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (sub->type == UVC_EVENT_SETUP) {
|
||||
uvc->func_connected = true;
|
||||
handle->is_uvc_app_handle = true;
|
||||
uvc_function_connect(uvc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uvc_v4l2_disable(struct uvc_device *uvc)
|
||||
{
|
||||
uvc->func_connected = false;
|
||||
uvc_function_disconnect(uvc);
|
||||
uvcg_video_enable(&uvc->video, 0);
|
||||
uvcg_free_buffers(&uvc->video.queue);
|
||||
}
|
||||
|
||||
static int
|
||||
uvc_v4l2_unsubscribe_event(struct v4l2_fh *fh,
|
||||
const struct v4l2_event_subscription *sub)
|
||||
{
|
||||
return v4l2_event_unsubscribe(fh, sub);
|
||||
struct uvc_device *uvc = video_get_drvdata(fh->vdev);
|
||||
struct uvc_file_handle *handle = to_uvc_file_handle(fh);
|
||||
int ret;
|
||||
|
||||
ret = v4l2_event_unsubscribe(fh, sub);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (sub->type == UVC_EVENT_SETUP && handle->is_uvc_app_handle) {
|
||||
uvc_v4l2_disable(uvc);
|
||||
handle->is_uvc_app_handle = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long
|
||||
@@ -292,7 +330,6 @@ uvc_v4l2_open(struct file *file)
|
||||
handle->device = &uvc->video;
|
||||
file->private_data = &handle->vfh;
|
||||
|
||||
uvc_function_connect(uvc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -304,11 +341,9 @@ uvc_v4l2_release(struct file *file)
|
||||
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
|
||||
struct uvc_video *video = handle->device;
|
||||
|
||||
uvc_function_disconnect(uvc);
|
||||
|
||||
mutex_lock(&video->mutex);
|
||||
uvcg_video_enable(video, 0);
|
||||
uvcg_free_buffers(&video->queue);
|
||||
if (handle->is_uvc_app_handle)
|
||||
uvc_v4l2_disable(uvc);
|
||||
mutex_unlock(&video->mutex);
|
||||
|
||||
file->private_data = NULL;
|
||||
|
@@ -137,7 +137,7 @@ static int dbgp_enable_ep_req(struct usb_ep *ep)
|
||||
goto fail_1;
|
||||
}
|
||||
|
||||
req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL);
|
||||
req->buf = kzalloc(DBGP_REQ_LEN, GFP_KERNEL);
|
||||
if (!req->buf) {
|
||||
err = -ENOMEM;
|
||||
stp = 2;
|
||||
@@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget,
|
||||
void *data = NULL;
|
||||
u16 len = 0;
|
||||
|
||||
if (length > DBGP_REQ_LEN) {
|
||||
if (ctrl->bRequestType & USB_DIR_IN) {
|
||||
/* Cast away the const, we are going to overwrite on purpose. */
|
||||
__le16 *temp = (__le16 *)&ctrl->wLength;
|
||||
|
||||
*temp = cpu_to_le16(DBGP_REQ_LEN);
|
||||
length = DBGP_REQ_LEN;
|
||||
} else {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (request == USB_REQ_GET_DESCRIPTOR) {
|
||||
switch (value>>8) {
|
||||
case USB_DT_DEVICE:
|
||||
|
@@ -110,6 +110,8 @@ enum ep0_state {
|
||||
/* enough for the whole queue: most events invalidate others */
|
||||
#define N_EVENT 5
|
||||
|
||||
#define RBUF_SIZE 256
|
||||
|
||||
struct dev_data {
|
||||
spinlock_t lock;
|
||||
refcount_t count;
|
||||
@@ -144,7 +146,7 @@ struct dev_data {
|
||||
struct dentry *dentry;
|
||||
|
||||
/* except this scratch i/o buffer for ep0 */
|
||||
u8 rbuf [256];
|
||||
u8 rbuf[RBUF_SIZE];
|
||||
};
|
||||
|
||||
static inline void get_dev (struct dev_data *data)
|
||||
@@ -1333,6 +1335,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
|
||||
u16 w_value = le16_to_cpu(ctrl->wValue);
|
||||
u16 w_length = le16_to_cpu(ctrl->wLength);
|
||||
|
||||
if (w_length > RBUF_SIZE) {
|
||||
if (ctrl->bRequestType & USB_DIR_IN) {
|
||||
/* Cast away the const, we are going to overwrite on purpose. */
|
||||
__le16 *temp = (__le16 *)&ctrl->wLength;
|
||||
|
||||
*temp = cpu_to_le16(RBUF_SIZE);
|
||||
w_length = RBUF_SIZE;
|
||||
} else {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock (&dev->lock);
|
||||
dev->setup_abort = 0;
|
||||
if (dev->state == STATE_DEV_UNCONNECTED) {
|
||||
|
@@ -1586,7 +1586,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
|
||||
status = 1;
|
||||
}
|
||||
if (!status && !reset_change) {
|
||||
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
|
||||
xhci_dbg(xhci, "%s: stopping usb%d port polling\n",
|
||||
__func__, hcd->self.busnum);
|
||||
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
@@ -1618,7 +1619,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
|
||||
if (bus_state->resuming_ports || /* USB2 */
|
||||
bus_state->port_remote_wakeup) { /* USB3 */
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
xhci_dbg(xhci, "suspend failed because a port is resuming\n");
|
||||
xhci_dbg(xhci, "usb%d bus suspend to fail because a port is resuming\n",
|
||||
hcd->self.busnum);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
@@ -832,9 +832,14 @@ static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
|
||||
|
||||
ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
|
||||
|
||||
if (td->cancel_status == TD_CLEARED)
|
||||
if (td->cancel_status == TD_CLEARED) {
|
||||
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
|
||||
__func__, td->urb);
|
||||
xhci_td_cleanup(ep->xhci, td, ring, td->status);
|
||||
|
||||
} else {
|
||||
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
|
||||
__func__, td->urb, td->cancel_status);
|
||||
}
|
||||
if (ep->xhci->xhc_state & XHCI_STATE_DYING)
|
||||
return;
|
||||
}
|
||||
@@ -852,6 +857,10 @@ static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
|
||||
goto done;
|
||||
}
|
||||
|
||||
xhci_dbg(xhci, "%s-reset ep %u, slot %u\n",
|
||||
(reset_type == EP_HARD_RESET) ? "Hard" : "Soft",
|
||||
ep_index, slot_id);
|
||||
|
||||
ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
|
||||
done:
|
||||
if (ret)
|
||||
@@ -885,7 +894,8 @@ static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
|
||||
}
|
||||
|
||||
if (ep->ep_state & EP_HALTED) {
|
||||
xhci_dbg(xhci, "Reset ep command already pending\n");
|
||||
xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
|
||||
ep->ep_index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -924,9 +934,10 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
|
||||
|
||||
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
|
||||
"Removing canceled TD starting at 0x%llx (dma).",
|
||||
(unsigned long long)xhci_trb_virt_to_dma(
|
||||
td->start_seg, td->first_trb));
|
||||
"Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
|
||||
(unsigned long long)xhci_trb_virt_to_dma(
|
||||
td->start_seg, td->first_trb),
|
||||
td->urb->stream_id, td->urb);
|
||||
list_del_init(&td->td_list);
|
||||
ring = xhci_urb_to_transfer_ring(xhci, td->urb);
|
||||
if (!ring) {
|
||||
@@ -944,17 +955,21 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
|
||||
td->urb->stream_id);
|
||||
hw_deq &= ~0xf;
|
||||
|
||||
if (td->cancel_status == TD_HALTED) {
|
||||
cached_td = td;
|
||||
} else if (trb_in_td(xhci, td->start_seg, td->first_trb,
|
||||
td->last_trb, hw_deq, false)) {
|
||||
if (td->cancel_status == TD_HALTED ||
|
||||
trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) {
|
||||
switch (td->cancel_status) {
|
||||
case TD_CLEARED: /* TD is already no-op */
|
||||
case TD_CLEARING_CACHE: /* set TR deq command already queued */
|
||||
break;
|
||||
case TD_DIRTY: /* TD is cached, clear it */
|
||||
case TD_HALTED:
|
||||
/* FIXME stream case, several stopped rings */
|
||||
td->cancel_status = TD_CLEARING_CACHE;
|
||||
if (cached_td)
|
||||
/* FIXME stream case, several stopped rings */
|
||||
xhci_dbg(xhci,
|
||||
"Move dq past stream %u URB %p instead of stream %u URB %p\n",
|
||||
td->urb->stream_id, td->urb,
|
||||
cached_td->urb->stream_id, cached_td->urb);
|
||||
cached_td = td;
|
||||
break;
|
||||
}
|
||||
@@ -963,18 +978,24 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
|
||||
td->cancel_status = TD_CLEARED;
|
||||
}
|
||||
}
|
||||
if (cached_td) {
|
||||
cached_td->cancel_status = TD_CLEARING_CACHE;
|
||||
|
||||
err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
|
||||
cached_td->urb->stream_id,
|
||||
cached_td);
|
||||
/* Failed to move past cached td, try just setting it noop */
|
||||
if (err) {
|
||||
td_to_noop(xhci, ring, cached_td, false);
|
||||
cached_td->cancel_status = TD_CLEARED;
|
||||
/* If there's no need to move the dequeue pointer then we're done */
|
||||
if (!cached_td)
|
||||
return 0;
|
||||
|
||||
err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
|
||||
cached_td->urb->stream_id,
|
||||
cached_td);
|
||||
if (err) {
|
||||
/* Failed to move past cached td, just set cached TDs to no-op */
|
||||
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
|
||||
if (td->cancel_status != TD_CLEARING_CACHE)
|
||||
continue;
|
||||
xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
|
||||
td->urb);
|
||||
td_to_noop(xhci, ring, td, false);
|
||||
td->cancel_status = TD_CLEARED;
|
||||
}
|
||||
cached_td = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -1071,6 +1092,8 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
|
||||
return;
|
||||
case EP_STATE_RUNNING:
|
||||
/* Race, HW handled stop ep cmd before ep was running */
|
||||
xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
|
||||
|
||||
command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
|
||||
if (!command)
|
||||
xhci_stop_watchdog_timer_in_irq(xhci, ep);
|
||||
@@ -1392,7 +1415,12 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
|
||||
ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
|
||||
if (td->cancel_status == TD_CLEARING_CACHE) {
|
||||
td->cancel_status = TD_CLEARED;
|
||||
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
|
||||
__func__, td->urb);
|
||||
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
|
||||
} else {
|
||||
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
|
||||
__func__, td->urb, td->cancel_status);
|
||||
}
|
||||
}
|
||||
cleanup:
|
||||
@@ -2005,7 +2033,8 @@ cleanup:
|
||||
* bits are still set. When an event occurs, switch over to
|
||||
* polling to avoid losing status changes.
|
||||
*/
|
||||
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
|
||||
xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
|
||||
__func__, hcd->self.busnum);
|
||||
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
||||
spin_unlock(&xhci->lock);
|
||||
/* Pass this up to the core */
|
||||
|
@@ -993,7 +993,8 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
|
||||
xhci_dbc_suspend(xhci);
|
||||
|
||||
/* Don't poll the roothubs on bus suspend. */
|
||||
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
|
||||
xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
|
||||
__func__, hcd->self.busnum);
|
||||
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
||||
del_timer_sync(&hcd->rh_timer);
|
||||
clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
|
||||
@@ -1257,7 +1258,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
||||
usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
|
||||
|
||||
/* Re-enable port polling. */
|
||||
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
|
||||
xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
|
||||
__func__, hcd->self.busnum);
|
||||
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
|
||||
usb_hcd_poll_rh_status(xhci->shared_hcd);
|
||||
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
|
||||
@@ -4661,19 +4663,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
|
||||
{
|
||||
unsigned long long timeout_ns;
|
||||
|
||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||
timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
|
||||
else
|
||||
timeout_ns = udev->u1_params.sel;
|
||||
|
||||
/* Prevent U1 if service interval is shorter than U1 exit latency */
|
||||
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
|
||||
if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
|
||||
if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
|
||||
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
|
||||
return USB3_LPM_DISABLED;
|
||||
}
|
||||
}
|
||||
|
||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||
timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
|
||||
else
|
||||
timeout_ns = udev->u1_params.sel;
|
||||
|
||||
/* The U1 timeout is encoded in 1us intervals.
|
||||
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
|
||||
*/
|
||||
@@ -4725,19 +4727,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
|
||||
{
|
||||
unsigned long long timeout_ns;
|
||||
|
||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||
timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
|
||||
else
|
||||
timeout_ns = udev->u2_params.sel;
|
||||
|
||||
/* Prevent U2 if service interval is shorter than U2 exit latency */
|
||||
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
|
||||
if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
|
||||
if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
|
||||
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
|
||||
return USB3_LPM_DISABLED;
|
||||
}
|
||||
}
|
||||
|
||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||
timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
|
||||
else
|
||||
timeout_ns = udev->u2_params.sel;
|
||||
|
||||
/* The U2 timeout is encoded in 256us intervals */
|
||||
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
|
||||
/* If the necessary timeout value is bigger than what we can set in the
|
||||
|
@@ -4157,12 +4157,9 @@ static void run_state_machine(struct tcpm_port *port)
|
||||
0);
|
||||
port->debouncing = false;
|
||||
} else {
|
||||
/* Wait for VBUS, but not forever */
|
||||
tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
|
||||
port->debouncing = false;
|
||||
}
|
||||
break;
|
||||
|
||||
case SRC_TRY:
|
||||
port->try_src_count++;
|
||||
tcpm_set_cc(port, tcpm_rp_cc(port));
|
||||
|
@@ -408,6 +408,13 @@ int virtio_device_restore(struct virtio_device *dev)
|
||||
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
|
||||
int ret;
|
||||
|
||||
/* Short path for stateful devices. Here we assume that if the device
|
||||
* does not have a freeze callback, its state was not changed when
|
||||
* suspended.
|
||||
*/
|
||||
if (drv && !drv->freeze)
|
||||
goto on_config_enable;
|
||||
|
||||
/* We always start by resetting the device, in case a previous
|
||||
* driver messed it up. */
|
||||
dev->config->reset(dev);
|
||||
@@ -439,6 +446,7 @@ int virtio_device_restore(struct virtio_device *dev)
|
||||
/* Finally, tell the device we're all set */
|
||||
virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
|
||||
|
||||
on_config_enable:
|
||||
virtio_config_enable(dev);
|
||||
|
||||
return 0;
|
||||
|
@@ -1054,6 +1054,26 @@ static void vm_unregister_cmdline_devices(void)
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int virtio_mmio_freeze(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
|
||||
|
||||
return virtio_device_freeze(&vm_dev->vdev);
|
||||
}
|
||||
|
||||
static int virtio_mmio_restore(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
|
||||
|
||||
return virtio_device_restore(&vm_dev->vdev);
|
||||
}
|
||||
#endif
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(virtio_mmio_pm_ops, virtio_mmio_freeze, virtio_mmio_restore);
|
||||
|
||||
/* Platform driver */
|
||||
|
||||
static const struct of_device_id virtio_mmio_match[] = {
|
||||
@@ -1077,6 +1097,7 @@ static struct platform_driver virtio_mmio_driver = {
|
||||
.name = "virtio-mmio",
|
||||
.of_match_table = virtio_mmio_match,
|
||||
.acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
|
||||
.pm = &virtio_mmio_pm_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
184
fs/aio.c
184
fs/aio.c
@@ -182,8 +182,9 @@ struct poll_iocb {
|
||||
struct file *file;
|
||||
struct wait_queue_head *head;
|
||||
__poll_t events;
|
||||
bool done;
|
||||
bool cancelled;
|
||||
bool work_scheduled;
|
||||
bool work_need_resched;
|
||||
struct wait_queue_entry wait;
|
||||
struct work_struct work;
|
||||
};
|
||||
@@ -1621,6 +1622,51 @@ static void aio_poll_put_work(struct work_struct *work)
|
||||
iocb_put(iocb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Safely lock the waitqueue which the request is on, synchronizing with the
|
||||
* case where the ->poll() provider decides to free its waitqueue early.
|
||||
*
|
||||
* Returns true on success, meaning that req->head->lock was locked, req->wait
|
||||
* is on req->head, and an RCU read lock was taken. Returns false if the
|
||||
* request was already removed from its waitqueue (which might no longer exist).
|
||||
*/
|
||||
static bool poll_iocb_lock_wq(struct poll_iocb *req)
|
||||
{
|
||||
wait_queue_head_t *head;
|
||||
|
||||
/*
|
||||
* While we hold the waitqueue lock and the waitqueue is nonempty,
|
||||
* wake_up_pollfree() will wait for us. However, taking the waitqueue
|
||||
* lock in the first place can race with the waitqueue being freed.
|
||||
*
|
||||
* We solve this as eventpoll does: by taking advantage of the fact that
|
||||
* all users of wake_up_pollfree() will RCU-delay the actual free. If
|
||||
* we enter rcu_read_lock() and see that the pointer to the queue is
|
||||
* non-NULL, we can then lock it without the memory being freed out from
|
||||
* under us, then check whether the request is still on the queue.
|
||||
*
|
||||
* Keep holding rcu_read_lock() as long as we hold the queue lock, in
|
||||
* case the caller deletes the entry from the queue, leaving it empty.
|
||||
* In that case, only RCU prevents the queue memory from being freed.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
head = smp_load_acquire(&req->head);
|
||||
if (head) {
|
||||
spin_lock(&head->lock);
|
||||
if (!list_empty(&req->wait.entry))
|
||||
return true;
|
||||
spin_unlock(&head->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
static void poll_iocb_unlock_wq(struct poll_iocb *req)
|
||||
{
|
||||
spin_unlock(&req->head->lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void aio_poll_complete_work(struct work_struct *work)
|
||||
{
|
||||
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
|
||||
@@ -1640,14 +1686,27 @@ static void aio_poll_complete_work(struct work_struct *work)
|
||||
* avoid further branches in the fast path.
|
||||
*/
|
||||
spin_lock_irq(&ctx->ctx_lock);
|
||||
if (!mask && !READ_ONCE(req->cancelled)) {
|
||||
add_wait_queue(req->head, &req->wait);
|
||||
spin_unlock_irq(&ctx->ctx_lock);
|
||||
return;
|
||||
}
|
||||
if (poll_iocb_lock_wq(req)) {
|
||||
if (!mask && !READ_ONCE(req->cancelled)) {
|
||||
/*
|
||||
* The request isn't actually ready to be completed yet.
|
||||
* Reschedule completion if another wakeup came in.
|
||||
*/
|
||||
if (req->work_need_resched) {
|
||||
schedule_work(&req->work);
|
||||
req->work_need_resched = false;
|
||||
} else {
|
||||
req->work_scheduled = false;
|
||||
}
|
||||
poll_iocb_unlock_wq(req);
|
||||
spin_unlock_irq(&ctx->ctx_lock);
|
||||
return;
|
||||
}
|
||||
list_del_init(&req->wait.entry);
|
||||
poll_iocb_unlock_wq(req);
|
||||
} /* else, POLLFREE has freed the waitqueue, so we must complete */
|
||||
list_del_init(&iocb->ki_list);
|
||||
iocb->ki_res.res = mangle_poll(mask);
|
||||
req->done = true;
|
||||
spin_unlock_irq(&ctx->ctx_lock);
|
||||
|
||||
iocb_put(iocb);
|
||||
@@ -1659,13 +1718,14 @@ static int aio_poll_cancel(struct kiocb *iocb)
|
||||
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
|
||||
struct poll_iocb *req = &aiocb->poll;
|
||||
|
||||
spin_lock(&req->head->lock);
|
||||
WRITE_ONCE(req->cancelled, true);
|
||||
if (!list_empty(&req->wait.entry)) {
|
||||
list_del_init(&req->wait.entry);
|
||||
schedule_work(&aiocb->poll.work);
|
||||
}
|
||||
spin_unlock(&req->head->lock);
|
||||
if (poll_iocb_lock_wq(req)) {
|
||||
WRITE_ONCE(req->cancelled, true);
|
||||
if (!req->work_scheduled) {
|
||||
schedule_work(&aiocb->poll.work);
|
||||
req->work_scheduled = true;
|
||||
}
|
||||
poll_iocb_unlock_wq(req);
|
||||
} /* else, the request was force-cancelled by POLLFREE already */
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1682,20 +1742,26 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
||||
if (mask && !(mask & req->events))
|
||||
return 0;
|
||||
|
||||
list_del_init(&req->wait.entry);
|
||||
|
||||
if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
|
||||
/*
|
||||
* Complete the request inline if possible. This requires that three
|
||||
* conditions be met:
|
||||
* 1. An event mask must have been passed. If a plain wakeup was done
|
||||
* instead, then mask == 0 and we have to call vfs_poll() to get
|
||||
* the events, so inline completion isn't possible.
|
||||
* 2. The completion work must not have already been scheduled.
|
||||
* 3. ctx_lock must not be busy. We have to use trylock because we
|
||||
* already hold the waitqueue lock, so this inverts the normal
|
||||
* locking order. Use irqsave/irqrestore because not all
|
||||
* filesystems (e.g. fuse) call this function with IRQs disabled,
|
||||
* yet IRQs have to be disabled before ctx_lock is obtained.
|
||||
*/
|
||||
if (mask && !req->work_scheduled &&
|
||||
spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
|
||||
struct kioctx *ctx = iocb->ki_ctx;
|
||||
|
||||
/*
|
||||
* Try to complete the iocb inline if we can. Use
|
||||
* irqsave/irqrestore because not all filesystems (e.g. fuse)
|
||||
* call this function with IRQs disabled and because IRQs
|
||||
* have to be disabled before ctx_lock is obtained.
|
||||
*/
|
||||
list_del_init(&req->wait.entry);
|
||||
list_del(&iocb->ki_list);
|
||||
iocb->ki_res.res = mangle_poll(mask);
|
||||
req->done = true;
|
||||
if (iocb->ki_eventfd && eventfd_signal_count()) {
|
||||
iocb = NULL;
|
||||
INIT_WORK(&req->work, aio_poll_put_work);
|
||||
@@ -1705,7 +1771,43 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
||||
if (iocb)
|
||||
iocb_put(iocb);
|
||||
} else {
|
||||
schedule_work(&req->work);
|
||||
/*
|
||||
* Schedule the completion work if needed. If it was already
|
||||
* scheduled, record that another wakeup came in.
|
||||
*
|
||||
* Don't remove the request from the waitqueue here, as it might
|
||||
* not actually be complete yet (we won't know until vfs_poll()
|
||||
* is called), and we must not miss any wakeups. POLLFREE is an
|
||||
* exception to this; see below.
|
||||
*/
|
||||
if (req->work_scheduled) {
|
||||
req->work_need_resched = true;
|
||||
} else {
|
||||
schedule_work(&req->work);
|
||||
req->work_scheduled = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the waitqueue is being freed early but we can't complete
|
||||
* the request inline, we have to tear down the request as best
|
||||
* we can. That means immediately removing the request from its
|
||||
* waitqueue and preventing all further accesses to the
|
||||
* waitqueue via the request. We also need to schedule the
|
||||
* completion work (done above). Also mark the request as
|
||||
* cancelled, to potentially skip an unneeded call to ->poll().
|
||||
*/
|
||||
if (mask & POLLFREE) {
|
||||
WRITE_ONCE(req->cancelled, true);
|
||||
list_del_init(&req->wait.entry);
|
||||
|
||||
/*
|
||||
* Careful: this *must* be the last step, since as soon
|
||||
* as req->head is NULL'ed out, the request can be
|
||||
* completed and freed, since aio_poll_complete_work()
|
||||
* will no longer need to take the waitqueue lock.
|
||||
*/
|
||||
smp_store_release(&req->head, NULL);
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@@ -1713,6 +1815,7 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
||||
struct aio_poll_table {
|
||||
struct poll_table_struct pt;
|
||||
struct aio_kiocb *iocb;
|
||||
bool queued;
|
||||
int error;
|
||||
};
|
||||
|
||||
@@ -1723,11 +1826,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
|
||||
struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
|
||||
|
||||
/* multiple wait queues per file are not supported */
|
||||
if (unlikely(pt->iocb->poll.head)) {
|
||||
if (unlikely(pt->queued)) {
|
||||
pt->error = -EINVAL;
|
||||
return;
|
||||
}
|
||||
|
||||
pt->queued = true;
|
||||
pt->error = 0;
|
||||
pt->iocb->poll.head = head;
|
||||
add_wait_queue(head, &pt->iocb->poll.wait);
|
||||
@@ -1752,12 +1856,14 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
|
||||
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
|
||||
|
||||
req->head = NULL;
|
||||
req->done = false;
|
||||
req->cancelled = false;
|
||||
req->work_scheduled = false;
|
||||
req->work_need_resched = false;
|
||||
|
||||
apt.pt._qproc = aio_poll_queue_proc;
|
||||
apt.pt._key = req->events;
|
||||
apt.iocb = aiocb;
|
||||
apt.queued = false;
|
||||
apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
|
||||
|
||||
/* initialized the list so that we can do list_empty checks */
|
||||
@@ -1766,23 +1872,35 @@ static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
|
||||
|
||||
mask = vfs_poll(req->file, &apt.pt) & req->events;
|
||||
spin_lock_irq(&ctx->ctx_lock);
|
||||
if (likely(req->head)) {
|
||||
spin_lock(&req->head->lock);
|
||||
if (unlikely(list_empty(&req->wait.entry))) {
|
||||
if (apt.error)
|
||||
if (likely(apt.queued)) {
|
||||
bool on_queue = poll_iocb_lock_wq(req);
|
||||
|
||||
if (!on_queue || req->work_scheduled) {
|
||||
/*
|
||||
* aio_poll_wake() already either scheduled the async
|
||||
* completion work, or completed the request inline.
|
||||
*/
|
||||
if (apt.error) /* unsupported case: multiple queues */
|
||||
cancel = true;
|
||||
apt.error = 0;
|
||||
mask = 0;
|
||||
}
|
||||
if (mask || apt.error) {
|
||||
/* Steal to complete synchronously. */
|
||||
list_del_init(&req->wait.entry);
|
||||
} else if (cancel) {
|
||||
/* Cancel if possible (may be too late though). */
|
||||
WRITE_ONCE(req->cancelled, true);
|
||||
} else if (!req->done) { /* actually waiting for an event */
|
||||
} else if (on_queue) {
|
||||
/*
|
||||
* Actually waiting for an event, so add the request to
|
||||
* active_reqs so that it can be cancelled if needed.
|
||||
*/
|
||||
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
|
||||
aiocb->ki_cancel = aio_poll_cancel;
|
||||
}
|
||||
spin_unlock(&req->head->lock);
|
||||
if (on_queue)
|
||||
poll_iocb_unlock_wq(req);
|
||||
}
|
||||
if (mask) { /* no async, we'd stolen it */
|
||||
aiocb->ki_res.res = mangle_poll(mask);
|
||||
|
@@ -2515,6 +2515,11 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
|
||||
/* The below cases were checked when setting it. */
|
||||
if (f2fs_is_pinned_file(inode))
|
||||
return false;
|
||||
if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
|
||||
return true;
|
||||
if (f2fs_lfs_mode(sbi))
|
||||
return true;
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
@@ -2523,8 +2528,6 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
|
||||
return true;
|
||||
if (f2fs_is_atomic_file(inode))
|
||||
return true;
|
||||
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
|
||||
return true;
|
||||
|
||||
/* swap file is migrating in aligned write mode */
|
||||
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
|
||||
|
@@ -3250,17 +3250,17 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
if (f2fs_should_update_outplace(inode, NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!pin) {
|
||||
clear_inode_flag(inode, FI_PIN_FILE);
|
||||
f2fs_i_gc_failures_write(inode, 0);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (f2fs_should_update_outplace(inode, NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (f2fs_pin_file_control(inode, false)) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
|
@@ -35,17 +35,7 @@
|
||||
|
||||
void signalfd_cleanup(struct sighand_struct *sighand)
|
||||
{
|
||||
wait_queue_head_t *wqh = &sighand->signalfd_wqh;
|
||||
/*
|
||||
* The lockless check can race with remove_wait_queue() in progress,
|
||||
* but in this case its caller should run under rcu_read_lock() and
|
||||
* sighand_cachep is SLAB_TYPESAFE_BY_RCU, we can safely return.
|
||||
*/
|
||||
if (likely(!waitqueue_active(wqh)))
|
||||
return;
|
||||
|
||||
/* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
|
||||
wake_up_poll(wqh, EPOLLHUP | POLLFREE);
|
||||
wake_up_pollfree(&sighand->signalfd_wqh);
|
||||
}
|
||||
|
||||
struct signalfd_ctx {
|
||||
|
@@ -3285,7 +3285,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
if (!vma->vm_file)
|
||||
if (!IS_ENABLED(CONFIG_FS_DAX) || !vma->vm_file)
|
||||
return false;
|
||||
if (!vma_is_dax(vma))
|
||||
return false;
|
||||
|
@@ -833,6 +833,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
|
||||
return hdev->ll_driver == driver;
|
||||
}
|
||||
|
||||
static inline bool hid_is_usb(struct hid_device *hdev)
|
||||
{
|
||||
return hid_is_using_ll_driver(hdev, &usb_hid_driver);
|
||||
}
|
||||
|
||||
#define PM_HINT_FULLON 1<<5
|
||||
#define PM_HINT_NORMAL 1<<1
|
||||
|
||||
|
@@ -6,6 +6,8 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/mmap_lock.h>
|
||||
#include <linux/percpu-rwsem.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/srcu.h>
|
||||
#include <linux/interval_tree.h>
|
||||
#include <linux/android_kabi.h>
|
||||
@@ -15,6 +17,13 @@ struct mmu_notifier;
|
||||
struct mmu_notifier_range;
|
||||
struct mmu_interval_notifier;
|
||||
|
||||
struct mmu_notifier_subscriptions_hdr {
|
||||
bool valid;
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
struct percpu_rw_semaphore_atomic *mmu_notifier_lock;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* enum mmu_notifier_event - reason for the mmu notifier callback
|
||||
* @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
|
||||
@@ -281,9 +290,30 @@ struct mmu_notifier_range {
|
||||
void *migrate_pgmap_owner;
|
||||
};
|
||||
|
||||
static inline
|
||||
struct mmu_notifier_subscriptions_hdr *get_notifier_subscriptions_hdr(
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* container_of() can't be used here because mmu_notifier_subscriptions
|
||||
* struct should be kept invisible to mm_struct, otherwise it
|
||||
* introduces KMI CRC breakage. Therefore the callers don't know what
|
||||
* members struct mmu_notifier_subscriptions contains and can't call
|
||||
* container_of(), which requires a member name.
|
||||
*
|
||||
* WARNING: For this typecasting to work, mmu_notifier_subscriptions_hdr
|
||||
* should be the first member of struct mmu_notifier_subscriptions.
|
||||
*/
|
||||
return (struct mmu_notifier_subscriptions_hdr *)mm->notifier_subscriptions;
|
||||
}
|
||||
|
||||
static inline int mm_has_notifiers(struct mm_struct *mm)
|
||||
{
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
return unlikely(get_notifier_subscriptions_hdr(mm)->valid);
|
||||
#else
|
||||
return unlikely(mm->notifier_subscriptions);
|
||||
#endif
|
||||
}
|
||||
|
||||
struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
|
||||
@@ -502,9 +532,29 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
|
||||
__mmu_notifier_invalidate_range(mm, start, end);
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
|
||||
extern bool mmu_notifier_subscriptions_init(struct mm_struct *mm);
|
||||
extern void mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
|
||||
|
||||
static inline bool mmu_notifier_trylock(struct mm_struct *mm)
|
||||
{
|
||||
return percpu_down_read_trylock(
|
||||
&get_notifier_subscriptions_hdr(mm)->mmu_notifier_lock->rw_sem);
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_unlock(struct mm_struct *mm)
|
||||
{
|
||||
percpu_up_read(
|
||||
&get_notifier_subscriptions_hdr(mm)->mmu_notifier_lock->rw_sem);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
|
||||
static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||
{
|
||||
mm->notifier_subscriptions = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
||||
@@ -513,6 +563,16 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
||||
__mmu_notifier_subscriptions_destroy(mm);
|
||||
}
|
||||
|
||||
static inline bool mmu_notifier_trylock(struct mm_struct *mm)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_unlock(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
|
||||
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
|
||||
enum mmu_notifier_event event,
|
||||
@@ -727,14 +787,24 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||
static inline bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool mmu_notifier_trylock(struct mm_struct *mm)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_unlock(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
#define mmu_notifier_range_update_to_read_only(r) false
|
||||
|
||||
#define ptep_clear_flush_young_notify ptep_clear_flush_young
|
||||
|
@@ -20,6 +20,11 @@ struct percpu_rw_semaphore {
|
||||
#endif
|
||||
};
|
||||
|
||||
struct percpu_rw_semaphore_atomic {
|
||||
struct percpu_rw_semaphore rw_sem;
|
||||
struct list_head destroy_list_entry;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
|
||||
#else
|
||||
@@ -127,8 +132,12 @@ extern void percpu_up_write(struct percpu_rw_semaphore *);
|
||||
extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
|
||||
const char *, struct lock_class_key *);
|
||||
|
||||
/* Can't be called in atomic context. */
|
||||
extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
|
||||
|
||||
/* Invokes percpu_free_rwsem and frees the semaphore from a worker thread. */
|
||||
extern void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem);
|
||||
|
||||
#define percpu_init_rwsem(sem) \
|
||||
({ \
|
||||
static struct lock_class_key rwsem_key; \
|
||||
|
@@ -207,6 +207,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
|
||||
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
|
||||
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
|
||||
void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
|
||||
void __wake_up_pollfree(struct wait_queue_head *wq_head);
|
||||
|
||||
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
|
||||
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
|
||||
@@ -235,6 +236,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
|
||||
#define wake_up_interruptible_sync_poll_locked(x, m) \
|
||||
__wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
|
||||
|
||||
/**
|
||||
* wake_up_pollfree - signal that a polled waitqueue is going away
|
||||
* @wq_head: the wait queue head
|
||||
*
|
||||
* In the very rare cases where a ->poll() implementation uses a waitqueue whose
|
||||
* lifetime is tied to a task rather than to the 'struct file' being polled,
|
||||
* this function must be called before the waitqueue is freed so that
|
||||
* non-blocking polls (e.g. epoll) are notified that the queue is going away.
|
||||
*
|
||||
* The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
|
||||
* an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
|
||||
*/
|
||||
static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
|
||||
{
|
||||
/*
|
||||
* For performance reasons, we don't always take the queue lock here.
|
||||
* Therefore, we might race with someone removing the last entry from
|
||||
* the queue, and proceed while they still hold the queue lock.
|
||||
* However, rcu_read_lock() is required to be held in such cases, so we
|
||||
* can safely proceed with an RCU-delayed free.
|
||||
*/
|
||||
if (waitqueue_active(wq_head))
|
||||
__wake_up_pollfree(wq_head);
|
||||
}
|
||||
|
||||
#define ___wait_cond_timeout(condition) \
|
||||
({ \
|
||||
bool __cond = (condition); \
|
||||
|
@@ -18,6 +18,10 @@ DECLARE_HOOK(android_vh_alter_futex_plist_add,
|
||||
bool *already_on_hb),
|
||||
TP_ARGS(node, head, already_on_hb));
|
||||
|
||||
DECLARE_HOOK(android_vh_futex_sleep_start,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p));
|
||||
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_FUTEX_H */
|
||||
|
@@ -126,6 +126,10 @@ DECLARE_HOOK(android_vh_mmap_region,
|
||||
DECLARE_HOOK(android_vh_try_to_unmap_one,
|
||||
TP_PROTO(struct vm_area_struct *vma, struct page *page, unsigned long addr, bool ret),
|
||||
TP_ARGS(vma, page, addr, ret));
|
||||
struct device;
|
||||
DECLARE_HOOK(android_vh_subpage_dma_contig_alloc,
|
||||
TP_PROTO(bool *allow_subpage_alloc, struct device *dev, size_t *size),
|
||||
TP_ARGS(allow_subpage_alloc, dev, size));
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_MM_H */
|
||||
|
@@ -29,7 +29,7 @@
|
||||
#define POLLRDHUP 0x2000
|
||||
#endif
|
||||
|
||||
#define POLLFREE (__force __poll_t)0x4000 /* currently only for epoll */
|
||||
#define POLLFREE (__force __poll_t)0x4000
|
||||
|
||||
#define POLL_BUSY_LOOP (__force __poll_t)0x8000
|
||||
|
||||
|
@@ -50,6 +50,7 @@
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/cma.h>
|
||||
#include <trace/hooks/mm.h>
|
||||
|
||||
#ifdef CONFIG_CMA_SIZE_MBYTES
|
||||
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
|
||||
@@ -309,14 +310,19 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
||||
#ifdef CONFIG_DMA_PERNUMA_CMA
|
||||
int nid = dev_to_node(dev);
|
||||
#endif
|
||||
bool allow_subpage_alloc = false;
|
||||
|
||||
/* CMA can be used only in the context which permits sleeping */
|
||||
if (!gfpflags_allow_blocking(gfp))
|
||||
return NULL;
|
||||
if (dev->cma_area)
|
||||
return cma_alloc_aligned(dev->cma_area, size, gfp);
|
||||
if (size <= PAGE_SIZE)
|
||||
return NULL;
|
||||
|
||||
if (size <= PAGE_SIZE) {
|
||||
trace_android_vh_subpage_dma_contig_alloc(&allow_subpage_alloc, dev, &size);
|
||||
if (!allow_subpage_alloc)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DMA_PERNUMA_CMA
|
||||
if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
|
||||
|
@@ -1072,7 +1072,8 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
||||
mm_init_owner(mm, p);
|
||||
mm_init_pasid(mm);
|
||||
RCU_INIT_POINTER(mm->exe_file, NULL);
|
||||
mmu_notifier_subscriptions_init(mm);
|
||||
if (!mmu_notifier_subscriptions_init(mm))
|
||||
goto fail_nopgd;
|
||||
init_tlb_flush_pending(mm);
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
|
||||
mm->pmd_huge_pte = NULL;
|
||||
|
@@ -2607,8 +2607,10 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
|
||||
* flagged for rescheduling. Only call schedule if there
|
||||
* is no timeout, or if it has yet to expire.
|
||||
*/
|
||||
if (!timeout || timeout->task)
|
||||
if (!timeout || timeout->task) {
|
||||
trace_android_vh_futex_sleep_start(current);
|
||||
freezable_schedule();
|
||||
}
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
}
|
||||
|
@@ -7,6 +7,7 @@
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
|
||||
@@ -268,3 +269,34 @@ void percpu_up_write(struct percpu_rw_semaphore *sem)
|
||||
rcu_sync_exit(&sem->rss);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_up_write);
|
||||
|
||||
static LIST_HEAD(destroy_list);
|
||||
static DEFINE_SPINLOCK(destroy_list_lock);
|
||||
|
||||
static void destroy_list_workfn(struct work_struct *work)
|
||||
{
|
||||
struct percpu_rw_semaphore_atomic *sem, *sem2;
|
||||
LIST_HEAD(to_destroy);
|
||||
|
||||
spin_lock(&destroy_list_lock);
|
||||
list_splice_init(&destroy_list, &to_destroy);
|
||||
spin_unlock(&destroy_list_lock);
|
||||
|
||||
if (list_empty(&to_destroy))
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(sem, sem2, &to_destroy, destroy_list_entry) {
|
||||
percpu_free_rwsem(&sem->rw_sem);
|
||||
kfree(sem);
|
||||
}
|
||||
}
|
||||
|
||||
static DECLARE_WORK(destroy_list_work, destroy_list_workfn);
|
||||
|
||||
void percpu_rwsem_async_destroy(struct percpu_rw_semaphore_atomic *sem)
|
||||
{
|
||||
spin_lock(&destroy_list_lock);
|
||||
list_add_tail(&sem->destroy_list_entry, &destroy_list);
|
||||
spin_unlock(&destroy_list_lock);
|
||||
schedule_work(&destroy_list_work);
|
||||
}
|
||||
|
@@ -227,6 +227,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
|
||||
|
||||
void __wake_up_pollfree(struct wait_queue_head *wq_head)
|
||||
{
|
||||
__wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
|
||||
/* POLLFREE must have cleared the queue. */
|
||||
WARN_ON_ONCE(waitqueue_active(wq_head));
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: we use "set_current_state()" _after_ the wait-queue add,
|
||||
* because we need a memory barrier there on SMP, so that any
|
||||
|
83
mm/gup.c
83
mm/gup.c
@@ -948,6 +948,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
|
||||
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
|
||||
return -EFAULT;
|
||||
|
||||
if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (write) {
|
||||
if (!(vm_flags & VM_WRITE)) {
|
||||
if (!(gup_flags & FOLL_FORCE))
|
||||
@@ -1085,10 +1088,14 @@ static long __get_user_pages(struct mm_struct *mm,
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
if (!vma || check_vma_flags(vma, gup_flags)) {
|
||||
if (!vma) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
ret = check_vma_flags(vma, gup_flags);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (is_vm_hugetlb_page(vma)) {
|
||||
i = follow_hugetlb_page(mm, vma, pages, vmas,
|
||||
&start, &nr_pages, i,
|
||||
@@ -1592,26 +1599,6 @@ struct page *get_dump_page(unsigned long addr)
|
||||
}
|
||||
#endif /* CONFIG_ELF_CORE */
|
||||
|
||||
#if defined(CONFIG_FS_DAX) || defined (CONFIG_CMA)
|
||||
static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
|
||||
{
|
||||
long i;
|
||||
struct vm_area_struct *vma_prev = NULL;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct vm_area_struct *vma = vmas[i];
|
||||
|
||||
if (vma == vma_prev)
|
||||
continue;
|
||||
|
||||
vma_prev = vma;
|
||||
|
||||
if (vma_is_fsdax(vma))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
static long check_and_migrate_cma_pages(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
@@ -1730,63 +1717,23 @@ static long __gup_longterm_locked(struct mm_struct *mm,
|
||||
struct vm_area_struct **vmas,
|
||||
unsigned int gup_flags)
|
||||
{
|
||||
struct vm_area_struct **vmas_tmp = vmas;
|
||||
unsigned long flags = 0;
|
||||
long rc, i;
|
||||
long rc;
|
||||
|
||||
if (gup_flags & FOLL_LONGTERM) {
|
||||
if (!pages)
|
||||
return -EINVAL;
|
||||
|
||||
if (!vmas_tmp) {
|
||||
vmas_tmp = kcalloc(nr_pages,
|
||||
sizeof(struct vm_area_struct *),
|
||||
GFP_KERNEL);
|
||||
if (!vmas_tmp)
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (gup_flags & FOLL_LONGTERM)
|
||||
flags = memalloc_nocma_save();
|
||||
}
|
||||
|
||||
rc = __get_user_pages_locked(mm, start, nr_pages, pages,
|
||||
vmas_tmp, NULL, gup_flags);
|
||||
rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL,
|
||||
gup_flags);
|
||||
|
||||
if (gup_flags & FOLL_LONGTERM) {
|
||||
if (rc < 0)
|
||||
goto out;
|
||||
|
||||
if (check_dax_vmas(vmas_tmp, rc)) {
|
||||
if (gup_flags & FOLL_PIN)
|
||||
unpin_user_pages(pages, rc);
|
||||
else
|
||||
for (i = 0; i < rc; i++)
|
||||
put_page(pages[i]);
|
||||
rc = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = check_and_migrate_cma_pages(mm, start, rc, pages,
|
||||
vmas_tmp, gup_flags);
|
||||
out:
|
||||
if (rc > 0)
|
||||
rc = check_and_migrate_cma_pages(mm, start, rc, pages,
|
||||
vmas, gup_flags);
|
||||
memalloc_nocma_restore(flags);
|
||||
}
|
||||
|
||||
if (vmas_tmp != vmas)
|
||||
kfree(vmas_tmp);
|
||||
return rc;
|
||||
}
|
||||
#else /* !CONFIG_FS_DAX && !CONFIG_CMA */
|
||||
static __always_inline long __gup_longterm_locked(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
unsigned int flags)
|
||||
{
|
||||
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
|
||||
NULL, flags);
|
||||
}
|
||||
#endif /* CONFIG_FS_DAX || CONFIG_CMA */
|
||||
|
||||
static bool is_valid_gup_flags(unsigned int gup_flags)
|
||||
{
|
||||
|
15
mm/memory.c
15
mm/memory.c
@@ -4717,8 +4717,19 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
|
||||
goto unlock;
|
||||
}
|
||||
if (vmf->flags & FAULT_FLAG_WRITE) {
|
||||
if (!pte_write(entry))
|
||||
return do_wp_page(vmf);
|
||||
if (!pte_write(entry)) {
|
||||
if (!(vmf->flags & FAULT_FLAG_SPECULATIVE))
|
||||
return do_wp_page(vmf);
|
||||
|
||||
if (!mmu_notifier_trylock(vmf->vma->vm_mm)) {
|
||||
ret = VM_FAULT_RETRY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = do_wp_page(vmf);
|
||||
mmu_notifier_unlock(vmf->vma->vm_mm);
|
||||
return ret;
|
||||
}
|
||||
entry = pte_mkdirty(entry);
|
||||
}
|
||||
entry = pte_mkyoung(entry);
|
||||
|
@@ -1136,9 +1136,6 @@ int add_memory_subsection(int nid, u64 start, u64 size)
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
if (size == memory_block_size_bytes())
|
||||
return add_memory(nid, start, size, MHP_NONE);
|
||||
|
||||
if (!IS_ALIGNED(start, SUBSECTION_SIZE) ||
|
||||
!IS_ALIGNED(size, SUBSECTION_SIZE)) {
|
||||
pr_err("%s: start 0x%llx size 0x%llx not aligned to subsection size\n",
|
||||
@@ -1837,9 +1834,6 @@ EXPORT_SYMBOL_GPL(remove_memory);
|
||||
|
||||
int remove_memory_subsection(int nid, u64 start, u64 size)
|
||||
{
|
||||
if (size == memory_block_size_bytes())
|
||||
return remove_memory(nid, start, size);
|
||||
|
||||
if (!IS_ALIGNED(start, SUBSECTION_SIZE) ||
|
||||
!IS_ALIGNED(size, SUBSECTION_SIZE)) {
|
||||
pr_err("%s: start 0x%llx size 0x%llx not aligned to subsection size\n",
|
||||
|
@@ -35,6 +35,12 @@ struct lockdep_map __mmu_notifier_invalidate_range_start_map = {
|
||||
* in mmdrop().
|
||||
*/
|
||||
struct mmu_notifier_subscriptions {
|
||||
/*
|
||||
* WARNING: hdr should be the first member of this structure
|
||||
* so that it can be typecasted into mmu_notifier_subscriptions_hdr.
|
||||
* This is required to avoid KMI CRC breakage.
|
||||
*/
|
||||
struct mmu_notifier_subscriptions_hdr hdr;
|
||||
/* all mmu notifiers registered in this mm are queued in this list */
|
||||
struct hlist_head list;
|
||||
bool has_itree;
|
||||
@@ -621,6 +627,37 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
|
||||
srcu_read_unlock(&srcu, id);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
|
||||
static inline void mmu_notifier_write_lock(struct mm_struct *mm)
|
||||
{
|
||||
percpu_down_write(
|
||||
&mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_write_unlock(struct mm_struct *mm)
|
||||
{
|
||||
percpu_up_write(
|
||||
&mm->notifier_subscriptions->hdr.mmu_notifier_lock->rw_sem);
|
||||
}
|
||||
|
||||
#else /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
|
||||
static inline void mmu_notifier_write_lock(struct mm_struct *mm) {}
|
||||
static inline void mmu_notifier_write_unlock(struct mm_struct *mm) {}
|
||||
|
||||
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
|
||||
static void init_subscriptions(struct mmu_notifier_subscriptions *subscriptions)
|
||||
{
|
||||
INIT_HLIST_HEAD(&subscriptions->list);
|
||||
spin_lock_init(&subscriptions->lock);
|
||||
subscriptions->invalidate_seq = 2;
|
||||
subscriptions->itree = RB_ROOT_CACHED;
|
||||
init_waitqueue_head(&subscriptions->wq);
|
||||
INIT_HLIST_HEAD(&subscriptions->deferred_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as mmu_notifier_register but here the caller must hold the mmap_lock in
|
||||
* write mode. A NULL mn signals the notifier is being registered for itree
|
||||
@@ -653,17 +690,16 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
|
||||
if (!subscriptions)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_HLIST_HEAD(&subscriptions->list);
|
||||
spin_lock_init(&subscriptions->lock);
|
||||
subscriptions->invalidate_seq = 2;
|
||||
subscriptions->itree = RB_ROOT_CACHED;
|
||||
init_waitqueue_head(&subscriptions->wq);
|
||||
INIT_HLIST_HEAD(&subscriptions->deferred_list);
|
||||
init_subscriptions(subscriptions);
|
||||
}
|
||||
|
||||
mmu_notifier_write_lock(mm);
|
||||
|
||||
ret = mm_take_all_locks(mm);
|
||||
if (unlikely(ret))
|
||||
if (unlikely(ret)) {
|
||||
mmu_notifier_write_unlock(mm);
|
||||
goto out_clean;
|
||||
}
|
||||
|
||||
/*
|
||||
* Serialize the update against mmu_notifier_unregister. A
|
||||
@@ -683,6 +719,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
|
||||
*/
|
||||
if (subscriptions)
|
||||
smp_store_release(&mm->notifier_subscriptions, subscriptions);
|
||||
mm->notifier_subscriptions->hdr.valid = true;
|
||||
|
||||
if (subscription) {
|
||||
/* Pairs with the mmdrop in mmu_notifier_unregister_* */
|
||||
@@ -698,6 +735,7 @@ int __mmu_notifier_register(struct mmu_notifier *subscription,
|
||||
mm->notifier_subscriptions->has_itree = true;
|
||||
|
||||
mm_drop_all_locks(mm);
|
||||
mmu_notifier_write_unlock(mm);
|
||||
BUG_ON(atomic_read(&mm->mm_users) <= 0);
|
||||
return 0;
|
||||
|
||||
@@ -1125,3 +1163,41 @@ mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
|
||||
return range->vma->vm_flags & VM_READ;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
|
||||
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
|
||||
bool mmu_notifier_subscriptions_init(struct mm_struct *mm)
|
||||
{
|
||||
struct mmu_notifier_subscriptions *subscriptions;
|
||||
struct percpu_rw_semaphore_atomic *sem;
|
||||
|
||||
subscriptions = kzalloc(
|
||||
sizeof(struct mmu_notifier_subscriptions), GFP_KERNEL);
|
||||
if (!subscriptions)
|
||||
return false;
|
||||
|
||||
sem = kzalloc(sizeof(struct percpu_rw_semaphore_atomic), GFP_KERNEL);
|
||||
if (!sem) {
|
||||
kfree(subscriptions);
|
||||
return false;
|
||||
}
|
||||
percpu_init_rwsem(&sem->rw_sem);
|
||||
|
||||
init_subscriptions(subscriptions);
|
||||
subscriptions->has_itree = true;
|
||||
subscriptions->hdr.valid = false;
|
||||
subscriptions->hdr.mmu_notifier_lock = sem;
|
||||
mm->notifier_subscriptions = subscriptions;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
|
||||
{
|
||||
percpu_rwsem_async_destroy(
|
||||
mm->notifier_subscriptions->hdr.mmu_notifier_lock);
|
||||
kfree(mm->notifier_subscriptions);
|
||||
mm->notifier_subscriptions = NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
|
73
mm/slub.c
73
mm/slub.c
@@ -433,6 +433,18 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
|
||||
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
|
||||
static DEFINE_SPINLOCK(object_map_lock);
|
||||
|
||||
static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
|
||||
struct page *page)
|
||||
{
|
||||
void *addr = page_address(page);
|
||||
void *p;
|
||||
|
||||
bitmap_zero(obj_map, page->objects);
|
||||
|
||||
for (p = page->freelist; p; p = get_freepointer(s, p))
|
||||
set_bit(__obj_to_index(s, addr, p), obj_map);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine a map of object in use on a page.
|
||||
*
|
||||
@@ -442,17 +454,11 @@ static DEFINE_SPINLOCK(object_map_lock);
|
||||
static unsigned long *get_map(struct kmem_cache *s, struct page *page)
|
||||
__acquires(&object_map_lock)
|
||||
{
|
||||
void *p;
|
||||
void *addr = page_address(page);
|
||||
|
||||
VM_BUG_ON(!irqs_disabled());
|
||||
|
||||
spin_lock(&object_map_lock);
|
||||
|
||||
bitmap_zero(object_map, page->objects);
|
||||
|
||||
for (p = page->freelist; p; p = get_freepointer(s, p))
|
||||
set_bit(__obj_to_index(s, addr, p), object_map);
|
||||
__fill_map(object_map, s, page);
|
||||
|
||||
return object_map;
|
||||
}
|
||||
@@ -1597,7 +1603,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
|
||||
}
|
||||
|
||||
static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
||||
void **head, void **tail)
|
||||
void **head, void **tail,
|
||||
int *cnt)
|
||||
{
|
||||
|
||||
void *object;
|
||||
@@ -1624,6 +1631,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
|
||||
*head = object;
|
||||
if (!*tail)
|
||||
*tail = object;
|
||||
} else {
|
||||
/*
|
||||
* Adjust the reconstructed freelist depth
|
||||
* accordingly if object's reuse is delayed.
|
||||
*/
|
||||
--(*cnt);
|
||||
}
|
||||
} while (object != old_tail);
|
||||
|
||||
@@ -3148,7 +3161,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
|
||||
struct kmem_cache_cpu *c;
|
||||
unsigned long tid;
|
||||
|
||||
memcg_slab_free_hook(s, &head, 1);
|
||||
/* memcg_slab_free_hook() is already called for bulk free. */
|
||||
if (!tail)
|
||||
memcg_slab_free_hook(s, &head, 1);
|
||||
redo:
|
||||
/*
|
||||
* Determine the currently cpus per cpu slab.
|
||||
@@ -3192,7 +3207,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
|
||||
* With KASAN enabled slab_free_freelist_hook modifies the freelist
|
||||
* to remove objects, whose reuse must be delayed.
|
||||
*/
|
||||
if (slab_free_freelist_hook(s, &head, &tail))
|
||||
if (slab_free_freelist_hook(s, &head, &tail, &cnt))
|
||||
do_slab_free(s, page, head, tail, cnt, addr);
|
||||
}
|
||||
|
||||
@@ -3888,8 +3903,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
|
||||
if (alloc_kmem_cache_cpus(s))
|
||||
return 0;
|
||||
|
||||
free_kmem_cache_nodes(s);
|
||||
error:
|
||||
__kmem_cache_release(s);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@@ -4501,13 +4516,15 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
|
||||
return 0;
|
||||
|
||||
err = sysfs_slab_add(s);
|
||||
if (err)
|
||||
if (err) {
|
||||
__kmem_cache_release(s);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (s->flags & SLAB_STORE_USER)
|
||||
debugfs_slab_add(s);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
||||
@@ -4778,17 +4795,17 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
|
||||
}
|
||||
|
||||
static void process_slab(struct loc_track *t, struct kmem_cache *s,
|
||||
struct page *page, enum track_item alloc)
|
||||
struct page *page, enum track_item alloc,
|
||||
unsigned long *obj_map)
|
||||
{
|
||||
void *addr = page_address(page);
|
||||
void *p;
|
||||
unsigned long *map;
|
||||
|
||||
map = get_map(s, page);
|
||||
__fill_map(obj_map, s, page);
|
||||
|
||||
for_each_object(p, s, addr, page->objects)
|
||||
if (!test_bit(__obj_to_index(s, addr, p), map))
|
||||
if (!test_bit(__obj_to_index(s, addr, p), obj_map))
|
||||
add_location(t, s, get_track(s, p, alloc));
|
||||
put_map(map);
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
#endif /* CONFIG_SLUB_DEBUG */
|
||||
@@ -5783,14 +5800,27 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
|
||||
struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
|
||||
sizeof(struct loc_track));
|
||||
struct kmem_cache *s = file_inode(filep)->i_private;
|
||||
unsigned long *obj_map;
|
||||
|
||||
if (!t)
|
||||
return -ENOMEM;
|
||||
|
||||
obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
|
||||
if (!obj_map) {
|
||||
seq_release_private(inode, filep);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0)
|
||||
alloc = TRACK_ALLOC;
|
||||
else
|
||||
alloc = TRACK_FREE;
|
||||
|
||||
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL))
|
||||
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
|
||||
bitmap_free(obj_map);
|
||||
seq_release_private(inode, filep);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Push back cpu slabs */
|
||||
flush_all(s);
|
||||
@@ -5804,12 +5834,13 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
|
||||
|
||||
spin_lock_irqsave(&n->list_lock, flags);
|
||||
list_for_each_entry(page, &n->partial, slab_list)
|
||||
process_slab(t, s, page, alloc);
|
||||
process_slab(t, s, page, alloc, obj_map);
|
||||
list_for_each_entry(page, &n->full, slab_list)
|
||||
process_slab(t, s, page, alloc);
|
||||
process_slab(t, s, page, alloc, obj_map);
|
||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||
}
|
||||
|
||||
bitmap_free(obj_map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -133,6 +133,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
|
||||
if (WARN_ON(!dmab))
|
||||
return -ENXIO;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
dmab->dev.type = type;
|
||||
dmab->dev.dev = device;
|
||||
dmab->bytes = 0;
|
||||
|
Reference in New Issue
Block a user