diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml
index 23ff6746bb82..efd5a33c372a 100644
--- a/android/abi_gki_aarch64.xml
+++ b/android/abi_gki_aarch64.xml
@@ -312,6 +312,11 @@
+
+
+
+
+
@@ -352,6 +357,7 @@
+
@@ -439,6 +445,7 @@
+
@@ -463,7 +470,19 @@
+
+
+
+
+
+
+
+
+
+
+
+
@@ -512,6 +531,7 @@
+
@@ -548,6 +568,7 @@
+
@@ -693,6 +714,7 @@
+
@@ -6394,6 +6416,8 @@
+
+
@@ -6422,6 +6446,11 @@
+
+
+
+
+
@@ -6462,6 +6491,7 @@
+
@@ -6549,6 +6579,7 @@
+
@@ -6573,7 +6604,19 @@
+
+
+
+
+
+
+
+
+
+
+
+
@@ -6622,6 +6665,7 @@
+
@@ -6658,6 +6702,7 @@
+
@@ -6803,6 +6848,7 @@
+
@@ -7521,6 +7567,7 @@
+
@@ -13314,7 +13361,6 @@
-
@@ -13561,6 +13607,11 @@
+
+
+
+
+
@@ -15192,6 +15243,10 @@
+
+
+
+
@@ -15531,7 +15586,6 @@
-
@@ -16874,6 +16928,7 @@
+
@@ -17833,9 +17888,11 @@
+
+
@@ -18743,6 +18800,7 @@
+
@@ -22031,6 +22089,10 @@
+
+
+
+
@@ -22498,11 +22560,6 @@
-
-
-
-
-
@@ -23493,6 +23550,10 @@
+
+
+
+
@@ -24476,17 +24537,6 @@
-
-
-
-
-
-
-
-
-
-
-
@@ -24702,6 +24752,7 @@
+
@@ -28507,6 +28558,7 @@
+
@@ -30696,6 +30748,14 @@
+
+
+
+
+
+
+
+
@@ -33252,6 +33312,11 @@
+
+
+
+
+
@@ -44069,6 +44134,7 @@
+
@@ -45242,11 +45308,57 @@
-
+
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -46506,6 +46618,7 @@
+
@@ -46532,6 +46645,13 @@
+
+
+
+
+
+
+
@@ -47178,6 +47298,7 @@
+
@@ -50585,7 +50706,6 @@
-
@@ -50655,6 +50775,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -51793,6 +51927,7 @@
+
@@ -52281,6 +52416,14 @@
+
+
+
+
+
+
+
+
@@ -53643,6 +53786,7 @@
+
@@ -54231,11 +54375,6 @@
-
-
-
-
-
@@ -55344,6 +55483,7 @@
+
@@ -55726,6 +55866,7 @@
+
@@ -56208,6 +56349,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -56610,6 +56765,7 @@
+
@@ -59838,6 +59994,7 @@
+
@@ -66313,14 +66470,6 @@
-
-
-
-
-
-
-
-
@@ -67271,6 +67420,56 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -68815,11 +69014,6 @@
-
-
-
-
-
@@ -72942,7 +73136,6 @@
-
@@ -73614,11 +73807,7 @@
-
-
-
-
-
+
@@ -78369,11 +78558,6 @@
-
-
-
-
-
@@ -80890,6 +81074,32 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -82888,7 +83098,6 @@
-
@@ -82971,6 +83180,7 @@
+
@@ -83034,6 +83244,7 @@
+
@@ -84837,12 +85048,6 @@
-
-
-
-
-
-
@@ -86663,7 +86868,6 @@
-
@@ -89225,53 +89429,6 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -90789,7 +90946,6 @@
-
@@ -92029,7 +92185,6 @@
-
@@ -94801,6 +94956,9 @@
+
+
+
@@ -95999,17 +96157,6 @@
-
-
-
-
-
-
-
-
-
-
-
@@ -103470,6 +103617,7 @@
+
@@ -103739,6 +103887,7 @@
+
@@ -109178,6 +109327,26 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -109649,6 +109818,10 @@
+
+
+
+
@@ -109692,6 +109865,7 @@
+
@@ -110188,6 +110362,7 @@
+
@@ -112407,6 +112582,7 @@
+
@@ -112504,6 +112680,7 @@
+
@@ -113877,6 +114054,32 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -116587,9 +116790,9 @@
-
+
-
+
@@ -116723,9 +116926,9 @@
-
-
-
+
+
+
@@ -118392,6 +118595,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -118665,6 +118901,13 @@
+
+
+
+
+
+
+
@@ -119233,6 +119476,14 @@
+
+
+
+
+
+
+
+
@@ -119405,19 +119656,97 @@
-
-
-
-
-
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -119724,6 +120053,13 @@
+
+
+
+
+
+
+
@@ -119949,6 +120285,13 @@
+
+
+
+
+
+
+
@@ -120824,6 +121167,14 @@
+
+
+
+
+
+
+
+
@@ -121553,6 +121904,11 @@
+
+
+
+
+
@@ -121593,6 +121949,7 @@
+
@@ -121680,6 +122037,7 @@
+
@@ -121704,8 +122062,20 @@
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -121753,6 +122123,7 @@
+
@@ -121789,6 +122160,7 @@
+
@@ -121934,6 +122306,7 @@
+
@@ -122495,12 +122868,12 @@
-
-
-
-
-
-
+
+
+
+
+
+
@@ -123014,25 +123387,25 @@
-
-
-
-
-
+
+
+
+
+
-
-
-
-
-
-
+
+
+
+
+
+
-
-
-
-
+
+
+
+
@@ -123044,9 +123417,9 @@
-
-
-
+
+
+
@@ -123061,40 +123434,40 @@
-
-
+
+
-
-
-
-
+
+
+
+
-
-
+
+
-
-
+
+
-
-
-
-
-
+
+
+
+
+
-
-
+
+
-
-
-
-
-
+
+
+
+
+
@@ -123187,8 +123560,8 @@
-
-
+
+
@@ -123198,14 +123571,14 @@
-
-
-
-
+
+
+
+
-
-
+
+
@@ -123223,27 +123596,27 @@
-
-
+
+
-
-
+
+
-
-
-
-
+
+
+
+
-
-
-
+
+
+
@@ -123268,29 +123641,29 @@
-
-
-
-
+
+
+
+
-
-
-
-
-
+
+
+
+
+
-
-
+
+
-
-
+
+
-
-
+
+
@@ -123298,22 +123671,22 @@
-
-
-
+
+
+
-
-
-
+
+
+
-
-
+
+
-
-
+
+
@@ -123329,15 +123702,15 @@
-
-
+
+
-
-
-
-
-
+
+
+
+
+
@@ -123350,31 +123723,31 @@
-
-
+
+
-
-
-
+
+
+
-
-
+
+
-
-
-
+
+
+
-
-
-
+
+
+
@@ -123397,49 +123770,49 @@
-
-
+
+
-
-
-
+
+
+
-
-
+
+
-
-
-
+
+
+
-
-
-
-
+
+
+
+
-
-
+
+
-
-
+
+
-
-
-
+
+
+
@@ -123448,22 +123821,22 @@
-
-
+
+
-
-
-
-
+
+
+
+
-
-
+
+
-
-
+
+
@@ -123491,19 +123864,19 @@
-
-
-
+
+
+
-
-
-
+
+
+
-
-
-
+
+
+
@@ -123634,28 +124007,28 @@
-
-
+
+
-
-
+
+
-
-
+
+
-
-
+
+
@@ -123663,10 +124036,10 @@
-
-
-
-
+
+
+
+
@@ -124809,9 +125182,9 @@
-
-
-
+
+
+
@@ -127865,10 +128238,10 @@
-
-
-
-
+
+
+
+
@@ -127877,10 +128250,10 @@
-
-
-
-
+
+
+
+
@@ -128472,8 +128845,8 @@
-
-
+
+
@@ -131282,16 +131655,16 @@
-
-
-
-
+
+
+
+
-
-
-
-
+
+
+
+
@@ -131924,10 +132297,10 @@
-
-
-
-
+
+
+
+
@@ -132049,7 +132422,7 @@
-
+
@@ -139055,16 +139428,16 @@
-
-
-
-
+
+
+
+
-
-
-
-
+
+
+
+
@@ -141471,8 +141844,8 @@
-
-
+
+
@@ -142657,8 +143030,8 @@
-
-
+
+
@@ -146175,16 +146548,16 @@
-
-
+
+
-
-
+
+
-
-
+
+
@@ -151045,6 +151418,14 @@
+
+
+
+
+
+
+
+
diff --git a/android/abi_gki_aarch64_oplus b/android/abi_gki_aarch64_oplus
index 4117c5405e2b..078753815e90 100644
--- a/android/abi_gki_aarch64_oplus
+++ b/android/abi_gki_aarch64_oplus
@@ -2976,6 +2976,7 @@
__traceiter_android_vh_free_oem_binder_struct
__traceiter_android_vh_binder_special_task
__traceiter_android_vh_binder_free_buf
+ __traceiter_android_vh_binder_buffer_release
__tracepoint_android_rvh_account_irq
__tracepoint_android_rvh_after_enqueue_task
__tracepoint_android_rvh_build_perf_domains
@@ -3257,6 +3258,7 @@
__tracepoint_android_vh_free_oem_binder_struct
__tracepoint_android_vh_binder_special_task
__tracepoint_android_vh_binder_free_buf
+ __tracepoint_android_vh_binder_buffer_release
trace_print_array_seq
trace_print_flags_seq
trace_print_hex_seq
@@ -3721,4 +3723,6 @@
xhci_ring_cmd_db
xhci_ring_free
xhci_trb_virt_to_dma
+ xt_register_match
+ xt_unregister_match
zero_pfn
diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom
index 32951b84a548..7a133e627fc2 100644
--- a/android/abi_gki_aarch64_qcom
+++ b/android/abi_gki_aarch64_qcom
@@ -2607,6 +2607,7 @@
__traceiter_android_vh_show_suspend_epoch_val
__traceiter_android_vh_subpage_dma_contig_alloc
__traceiter_android_vh_timer_calc_index
+ __traceiter_android_vh_try_fixup_sea
__traceiter_android_vh_ufs_check_int_errors
__traceiter_android_vh_ufs_clock_scaling
__traceiter_android_vh_ufs_compl_command
@@ -2735,6 +2736,7 @@
__tracepoint_android_vh_show_suspend_epoch_val
__tracepoint_android_vh_subpage_dma_contig_alloc
__tracepoint_android_vh_timer_calc_index
+ __tracepoint_android_vh_try_fixup_sea
__tracepoint_android_vh_ufs_check_int_errors
__tracepoint_android_vh_ufs_clock_scaling
__tracepoint_android_vh_ufs_compl_command
diff --git a/android/abi_gki_aarch64_transsion b/android/abi_gki_aarch64_transsion
index d238c721db9e..0cf3b5d164b9 100644
--- a/android/abi_gki_aarch64_transsion
+++ b/android/abi_gki_aarch64_transsion
@@ -46,6 +46,26 @@
__traceiter_android_vh_unuse_swap_page
__traceiter_android_vh_waiting_for_page_migration
__traceiter_android_vh_should_end_madvise
+ __traceiter_android_vh_exit_check
+ __traceiter_android_vh_bio_free
+ __traceiter_android_rvh_internal_blk_mq_alloc_request
+ __traceiter_android_vh_internal_blk_mq_free_request
+ __traceiter_android_vh_blk_mq_complete_request
+ __traceiter_android_vh_blk_mq_add_to_requeue_list
+ __traceiter_android_rvh_blk_mq_delay_run_hw_queue
+ __traceiter_android_vh_blk_mq_run_hw_queue
+ __traceiter_android_vh_blk_mq_insert_request
+ __traceiter_android_rvh_blk_mq_alloc_rq_map
+ __traceiter_android_rvh_blk_mq_init_allocated_queue
+ __traceiter_android_vh_blk_mq_exit_queue
+ __traceiter_android_vh_blk_mq_alloc_tag_set
+ __traceiter_android_rvh_blk_allocated_queue_init
+ __traceiter_android_rvh_blk_flush_plug_list
+ __traceiter_android_vh_blk_alloc_flush_queue
+ __traceiter_android_vh_blk_mq_all_tag_iter
+ __traceiter_android_vh_blk_mq_queue_tag_busy_iter
+ __traceiter_android_vh_blk_mq_free_tags
+ __traceiter_android_vh_blk_mq_sched_insert_request
__tracepoint_android_rvh_alloc_si
__tracepoint_android_rvh_alloc_swap_slot_cache
__tracepoint_android_rvh_drain_slots_cache_cpu
@@ -82,4 +102,24 @@
__tracepoint_android_vh_unuse_swap_page
__tracepoint_android_vh_waiting_for_page_migration
__tracepoint_android_vh_should_end_madvise
+ __tracepoint_android_vh_exit_check
+ __tracepoint_android_vh_bio_free
+ __tracepoint_android_rvh_internal_blk_mq_alloc_request
+ __tracepoint_android_vh_internal_blk_mq_free_request
+ __tracepoint_android_vh_blk_mq_complete_request
+ __tracepoint_android_vh_blk_mq_add_to_requeue_list
+ __tracepoint_android_rvh_blk_mq_delay_run_hw_queue
+ __tracepoint_android_vh_blk_mq_run_hw_queue
+ __tracepoint_android_vh_blk_mq_insert_request
+ __tracepoint_android_rvh_blk_mq_alloc_rq_map
+ __tracepoint_android_rvh_blk_mq_init_allocated_queue
+ __tracepoint_android_vh_blk_mq_exit_queue
+ __tracepoint_android_vh_blk_mq_alloc_tag_set
+ __tracepoint_android_rvh_blk_allocated_queue_init
+ __tracepoint_android_rvh_blk_flush_plug_list
+ __tracepoint_android_vh_blk_alloc_flush_queue
+ __tracepoint_android_vh_blk_mq_all_tag_iter
+ __tracepoint_android_vh_blk_mq_queue_tag_busy_iter
+ __tracepoint_android_vh_blk_mq_free_tags
+ __tracepoint_android_vh_blk_mq_sched_insert_request
zero_pfn
diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig
index 28136c6f6435..9688b5ec5372 100644
--- a/arch/arm64/configs/gki_defconfig
+++ b/arch/arm64/configs/gki_defconfig
@@ -478,6 +478,7 @@ CONFIG_USB_HIDDEV=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PCI_RENESAS=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index a070335cae20..058881de389e 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -728,6 +728,11 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
{
const struct fault_info *inf;
unsigned long siaddr;
+ bool can_fixup = false;
+
+ trace_android_vh_try_fixup_sea(far, esr, regs, &can_fixup);
+ if (can_fixup && fixup_exception(regs))
+ return 0;
inf = esr_to_fault_info(esr);
diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig
index 9c6d1a0e661d..986af3f86e1e 100644
--- a/arch/x86/configs/gki_defconfig
+++ b/arch/x86/configs/gki_defconfig
@@ -429,6 +429,7 @@ CONFIG_HID_WIIMOTE=y
CONFIG_USB_HIDDEV=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PCI_RENESAS=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 279ad47a8273..4bf351e6ba0c 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -19,6 +19,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
# optimization flags.
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
+# When LTO is enabled, llvm emits many text sections, which is not supported
+# by kexec. Remove -flto=* flags.
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
+
# When linking purgatory.ro with -r unresolved symbols are not checked,
# also link a purgatory.chk binary without -r to check for unresolved symbols.
PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib
diff --git a/block/bio.c b/block/bio.c
index 6d6e7b96b002..d03952b9e76d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -21,6 +21,7 @@
#include
#include
+#include
#include "blk.h"
#include "blk-rq-qos.h"
@@ -252,6 +253,7 @@ static void bio_free(struct bio *bio)
struct bio_set *bs = bio->bi_pool;
void *p;
+ trace_android_vh_bio_free(bio);
bio_uninit(bio);
if (bs) {
diff --git a/block/blk-core.c b/block/blk-core.c
index d64ce14cbe6c..73adca697336 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -66,6 +66,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_merge);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_requeue);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_complete);
+#undef CREATE_TRACE_POINTS
+#include
+
DEFINE_IDA(blk_queue_ida);
/*
@@ -522,6 +525,7 @@ struct request_queue *blk_alloc_queue(int node_id)
{
struct request_queue *q;
int ret;
+ bool skip = false;
q = kmem_cache_alloc_node(blk_requestq_cachep,
GFP_KERNEL | __GFP_ZERO, node_id);
@@ -585,6 +589,10 @@ struct request_queue *blk_alloc_queue(int node_id)
blk_set_default_limits(&q->limits);
q->nr_requests = BLKDEV_MAX_RQ;
+ trace_android_rvh_blk_allocated_queue_init(&skip, q);
+ if (skip)
+ goto fail_ref;
+
return q;
fail_ref:
@@ -1761,6 +1769,7 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
+ trace_android_rvh_blk_flush_plug_list(plug, from_schedule);
flush_plug_callbacks(plug, from_schedule);
if (!list_empty(&plug->mq_list))
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 33b487b5cbf7..af59a7e4684f 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -467,11 +467,13 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
}
EXPORT_SYMBOL(blkdev_issue_flush);
+#include
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
gfp_t flags)
{
struct blk_flush_queue *fq;
int rq_sz = sizeof(struct request);
+ bool skip = false;
fq = kzalloc_node(sizeof(*fq), flags, node);
if (!fq)
@@ -479,8 +481,12 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
spin_lock_init(&fq->mq_flush_lock);
- rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
- fq->flush_rq = kzalloc_node(rq_sz, flags, node);
+ trace_android_vh_blk_alloc_flush_queue(&skip, cmd_size, flags, node,
+ fq);
+ if (!skip) {
+ rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
+ fq->flush_rq = kzalloc_node(rq_sz, flags, node);
+ }
if (!fq->flush_rq)
goto fail_rq;
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 91b7626dbcd1..856fe30f1a3c 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -422,6 +422,7 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
return false;
}
+#include
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async)
{
@@ -429,10 +430,13 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+ bool skip = false;
WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
- if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
+ trace_android_vh_blk_mq_sched_insert_request(&skip, rq);
+
+ if (!skip && blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
/*
* Firstly normal IO request is inserted to scheduler queue or
* sw queue, meantime we add flush request to dispatch queue(
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 16ad9e656610..a1125b9e34f1 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -15,6 +15,8 @@
#include "blk-mq.h"
#include "blk-mq-tag.h"
+#include
+
/*
* If a previously inactive queue goes active, bump the active user count.
* We need to do this before try to allocate driver tag, then even if fail
@@ -336,8 +338,13 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
busy_tag_iter_fn *fn, void *priv, unsigned int flags)
{
+ bool skip = false;
+
WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
+ trace_android_vh_blk_mq_all_tag_iter(&skip, tags, fn, priv);
+ if (skip)
+ return;
if (tags->nr_reserved_tags)
bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
flags | BT_TAG_ITER_RESERVED);
@@ -438,6 +445,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
{
struct blk_mq_hw_ctx *hctx;
int i;
+ bool skip = false;
/*
* __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
@@ -457,6 +465,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
if (!blk_mq_hw_queue_mapped(hctx))
continue;
+ trace_android_vh_blk_mq_queue_tag_busy_iter(&skip, hctx, fn,
+ priv);
+ if (skip)
+ continue;
+
if (tags->nr_reserved_tags)
bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
@@ -556,6 +569,12 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
{
+ bool skip = false;
+
+ trace_android_vh_blk_mq_free_tags(&skip, tags);
+ if (skip)
+ return;
+
if (!(flags & BLK_MQ_F_TAG_HCTX_SHARED)) {
sbitmap_queue_free(tags->bitmap_tags);
sbitmap_queue_free(tags->breserved_tags);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 21544b119a7e..f5dc90f25d31 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -353,6 +353,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
struct elevator_queue *e = q->elevator;
u64 alloc_time_ns = 0;
unsigned int tag;
+ bool skip = false;
/* alloc_time includes depth and tag waits */
if (blk_queue_rq_alloc_time(q))
@@ -384,7 +385,9 @@ retry:
* case just retry the hctx assignment and tag allocation as CPU hotplug
* should have migrated us to an online CPU by now.
*/
- tag = blk_mq_get_tag(data);
+ trace_android_rvh_internal_blk_mq_alloc_request(&skip, &tag, data);
+ if (!skip)
+ tag = blk_mq_get_tag(data);
if (tag == BLK_MQ_NO_TAG) {
if (data->flags & BLK_MQ_REQ_NOWAIT)
return NULL;
@@ -496,12 +499,17 @@ static void __blk_mq_free_request(struct request *rq)
struct blk_mq_ctx *ctx = rq->mq_ctx;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
const int sched_tag = rq->internal_tag;
+ bool skip = false;
blk_crypto_free_request(rq);
blk_pm_mark_last_busy(rq);
rq->mq_hctx = NULL;
- if (rq->tag != BLK_MQ_NO_TAG)
- blk_mq_put_tag(hctx->tags, ctx, rq->tag);
+
+ trace_android_vh_internal_blk_mq_free_request(&skip, rq, hctx);
+ if (!skip) {
+ if (rq->tag != BLK_MQ_NO_TAG)
+ blk_mq_put_tag(hctx->tags, ctx, rq->tag);
+ }
if (sched_tag != BLK_MQ_NO_TAG)
blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
blk_mq_sched_restart(hctx);
@@ -701,6 +709,11 @@ EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
**/
void blk_mq_complete_request(struct request *rq)
{
+ bool skip = false;
+
+ trace_android_vh_blk_mq_complete_request(&skip, rq);
+ if (skip)
+ return;
if (!blk_mq_complete_request_remote(rq))
rq->q->mq_ops->complete(rq);
}
@@ -827,7 +840,12 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
{
struct request_queue *q = rq->q;
unsigned long flags;
+ bool skip = false;
+ trace_android_vh_blk_mq_add_to_requeue_list(&skip, rq,
+ kick_requeue_list);
+ if (skip)
+ return;
/*
* We abuse this flag that is otherwise used by the I/O scheduler to
* request head insertion from the workqueue.
@@ -1593,9 +1611,15 @@ select_cpu:
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
unsigned long msecs)
{
+ bool skip = false;
+
if (unlikely(blk_mq_hctx_stopped(hctx)))
return;
+ trace_android_rvh_blk_mq_delay_run_hw_queue(&skip, hctx, async);
+ if (skip)
+ return;
+
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
int cpu = get_cpu();
if (cpumask_test_cpu(cpu, hctx->cpumask)) {
@@ -1651,6 +1675,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
blk_mq_hctx_has_pending(hctx);
hctx_unlock(hctx, srcu_idx);
+ trace_android_vh_blk_mq_run_hw_queue(&need_run, hctx);
if (need_run)
__blk_mq_delay_run_hw_queue(hctx, async, 0);
}
@@ -1877,9 +1902,14 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
+ bool skip = false;
lockdep_assert_held(&ctx->lock);
+ trace_android_vh_blk_mq_insert_request(&skip, hctx, rq);
+ if (skip)
+ return;
+
__blk_mq_insert_req_list(hctx, rq, at_head);
blk_mq_hctx_mark_pending(hctx, ctx);
}
@@ -2419,12 +2449,15 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
{
struct blk_mq_tags *tags;
int node;
+ bool skip = false;
node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;
- tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags);
+ trace_android_rvh_blk_mq_alloc_rq_map(&skip, &tags, set, node, flags);
+ if (!skip)
+ tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags);
if (!tags)
return NULL;
@@ -3362,6 +3395,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q);
+ trace_android_rvh_blk_mq_init_allocated_queue(q);
+
if (elevator_init)
elevator_init_mq(q);
@@ -3385,6 +3420,7 @@ void blk_mq_exit_queue(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
+ trace_android_vh_blk_mq_exit_queue(q);
/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
@@ -3575,6 +3611,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (ret)
goto out_free_mq_map;
+ trace_android_vh_blk_mq_alloc_tag_set(set);
+
ret = blk_mq_alloc_map_and_requests(set);
if (ret)
goto out_free_mq_map;
diff --git a/block/mq-deadline-main.c b/block/mq-deadline-main.c
index 560752ad8677..f59a904cfc2a 100644
--- a/block/mq-deadline-main.c
+++ b/block/mq-deadline-main.c
@@ -549,8 +549,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
+ unsigned int shift = tags->bitmap_tags->sb.shift;
- dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
+ dd->async_depth = max(1U, 3 * (1U << shift) / 4);
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index dabe96f0b848..9e128402a2e1 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1800,8 +1800,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
+
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;
@@ -3830,12 +3832,14 @@ binder_free_buf(struct binder_proc *proc,
struct binder_buffer *buffer, bool is_failure)
{
bool enqueue_task = true;
+ bool has_transaction = false;
trace_android_vh_binder_free_buf(proc, thread, buffer);
binder_inner_proc_lock(proc);
if (buffer->transaction) {
buffer->transaction->buffer = NULL;
buffer->transaction = NULL;
+ has_transaction = true;
}
binder_inner_proc_unlock(proc);
if (buffer->async_transaction && buffer->target_node) {
@@ -3859,6 +3863,8 @@ binder_free_buf(struct binder_proc *proc,
}
binder_node_inner_unlock(buf_node);
}
+ trace_android_vh_binder_buffer_release(proc, thread, buffer,
+ has_transaction);
trace_binder_transaction_buffer_release(buffer);
binder_release_entire_buffer(proc, thread, buffer, is_failure);
binder_alloc_free_buf(&proc->alloc, buffer);
diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c
index a45a4e21c68d..8eaa9f02093e 100644
--- a/drivers/android/vendor_hooks.c
+++ b/drivers/android/vendor_hooks.c
@@ -45,6 +45,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -218,6 +219,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_die_kernel_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_fixup_sea);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_alloc_iova);
@@ -506,6 +508,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_received);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_oem_binder_struct);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_special_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_free_buf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_buffer_release);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_perf_huristic_ctrl);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command_post_change);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_abort_success_ctrl);
@@ -515,6 +518,26 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_err_check_ctrl);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_err_print_ctrl);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmscan_kswapd_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_swap_page_spf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_check);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_bio_free);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_internal_blk_mq_alloc_request);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_internal_blk_mq_free_request);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_complete_request);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_add_to_requeue_list);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_mq_delay_run_hw_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_run_hw_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_insert_request);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_mq_alloc_rq_map);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_mq_init_allocated_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_exit_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_alloc_tag_set);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_allocated_queue_init);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_flush_plug_list);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_alloc_flush_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_all_tag_iter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_queue_tag_busy_iter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_free_tags);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_sched_insert_request);
/*
* For type visibility
*/
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 6482fecadabc..02a9e7fa97bb 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4579,6 +4579,7 @@ int devm_clk_notifier_register(struct device *dev, struct clk *clk,
if (!ret) {
devres->clk = clk;
devres->nb = nb;
+ devres_add(dev, devres);
} else {
devres_free(devres);
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 146c8934dcf7..3ddf95713555 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -2052,7 +2052,7 @@ static void clear_etmdrvdata(void *info)
etmdrvdata[cpu] = NULL;
}
-static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
+static void etm4_remove_dev(struct etmv4_drvdata *drvdata)
{
etm_perf_symlink(drvdata->csdev, false);
/*
@@ -2074,7 +2074,7 @@ static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
coresight_unregister(drvdata->csdev);
}
-static void __exit etm4_remove_amba(struct amba_device *adev)
+static void etm4_remove_amba(struct amba_device *adev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
@@ -2082,7 +2082,7 @@ static void __exit etm4_remove_amba(struct amba_device *adev)
etm4_remove_dev(drvdata);
}
-static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
+static int etm4_remove_platform_dev(struct platform_device *pdev)
{
int ret = 0;
struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 2816e3b6c08c..5c12806ffa62 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1121,6 +1121,18 @@ static int dwc3_core_init(struct dwc3 *dwc)
}
}
+ /*
+ * Modify this for all supported Super Speed ports when
+ * multiport support is added.
+ */
+ if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET &&
+ (DWC3_IP_IS(DWC31)) &&
+ dwc->maximum_speed == USB_SPEED_SUPER) {
+ reg = dwc3_readl(dwc->regs, DWC3_LLUCTL);
+ reg |= DWC3_LLUCTL_FORCE_GEN1;
+ dwc3_writel(dwc->regs, DWC3_LLUCTL, reg);
+ }
+
return 0;
err4:
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 327a9faab7df..415d2e24dc44 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -170,6 +170,8 @@
#define DWC3_OEVTEN 0xcc0C
#define DWC3_OSTS 0xcc10
+#define DWC3_LLUCTL 0xd024
+
/* Bit fields */
/* Global SoC Bus Configuration INCRx Register 0 */
@@ -633,6 +635,9 @@
#define DWC3_OSTS_VBUSVLD BIT(1)
#define DWC3_OSTS_CONIDSTS BIT(0)
+/* Force Gen1 speed on Gen2 link */
+#define DWC3_LLUCTL_FORCE_GEN1 BIT(10)
+
/* Structures */
struct dwc3_trb;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 5388c6e6ba83..bb8888696867 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -139,6 +139,24 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
return -ETIMEDOUT;
}
+static void dwc3_ep0_reset_state(struct dwc3 *dwc)
+{
+ unsigned int dir;
+
+ if (dwc->ep0state != EP0_SETUP_PHASE) {
+ dir = !!dwc->ep0_expect_in;
+ if (dwc->ep0state == EP0_DATA_PHASE)
+ dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
+ else
+ dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
+
+ dwc->eps[0]->trb_enqueue = 0;
+ dwc->eps[1]->trb_enqueue = 0;
+
+ dwc3_ep0_stall_and_restart(dwc);
+ }
+}
+
/**
* dwc3_ep_inc_trb - increment a trb index.
* @index: Pointer to the TRB index to increment.
@@ -2068,7 +2086,17 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
list_for_each_entry(r, &dep->pending_list, list) {
if (r == req) {
- dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ /*
+ * Explicitly check for EP0/1 as dequeue for those
+ * EPs need to be handled differently. Control EP
+ * only deals with one USB req, and giveback will
+ * occur during dwc3_ep0_stall_and_restart(). EP0
+ * requests are never added to started_list.
+ */
+ if (dep->number > 1)
+ dwc3_gadget_giveback(dep, req, -ECONNRESET);
+ else
+ dwc3_ep0_reset_state(dwc);
goto out;
}
}
@@ -2547,16 +2575,9 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
if (ret == 0) {
- unsigned int dir;
-
dev_warn(dwc->dev, "wait for SETUP phase timed out\n");
spin_lock_irqsave(&dwc->lock, flags);
- dir = !!dwc->ep0_expect_in;
- if (dwc->ep0state == EP0_DATA_PHASE)
- dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
- else
- dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
- dwc3_ep0_stall_and_restart(dwc);
+ dwc3_ep0_reset_state(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
}
}
@@ -3849,16 +3870,7 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc->setup_packet_pending = false;
usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
- if (dwc->ep0state != EP0_SETUP_PHASE) {
- unsigned int dir;
-
- dir = !!dwc->ep0_expect_in;
- if (dwc->ep0state == EP0_DATA_PHASE)
- dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
- else
- dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
- dwc3_ep0_stall_and_restart(dwc);
- }
+ dwc3_ep0_reset_state(dwc);
}
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
@@ -3912,20 +3924,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
* phase. So ensure that EP0 is in setup phase by issuing a stall
* and restart if EP0 is not in setup phase.
*/
- if (dwc->ep0state != EP0_SETUP_PHASE) {
- unsigned int dir;
-
- dir = !!dwc->ep0_expect_in;
- if (dwc->ep0state == EP0_DATA_PHASE)
- dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
- else
- dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
-
- dwc->eps[0]->trb_enqueue = 0;
- dwc->eps[1]->trb_enqueue = 0;
-
- dwc3_ep0_stall_and_restart(dwc);
- }
+ dwc3_ep0_reset_state(dwc);
/*
* In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index ba13d8997db8..ce973fae4091 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -419,6 +419,7 @@ uvc_register_video(struct uvc_device *uvc)
/* TODO reference counting. */
uvc->vdev.v4l2_dev = &uvc->v4l2_dev;
+ uvc->vdev.v4l2_dev->dev = &cdev->gadget->dev;
uvc->vdev.fops = &uvc_v4l2_fops;
uvc->vdev.ioctl_ops = &uvc_v4l2_ioctl_ops;
uvc->vdev.release = video_device_release_empty;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 3714cb40f532..7b196ad570b9 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1624,6 +1624,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
if (PD_VDO_VID(p[0]) != USB_SID_PD)
break;
+ if (IS_ERR_OR_NULL(port->partner))
+ break;
+
if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
typec_partner_set_svdm_version(port->partner,
PD_VDO_SVDM_VER(p[0]));
@@ -2709,6 +2712,13 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
port->sink_cap_done = true;
tcpm_set_state(port, ready_state(port), 0);
break;
+ /*
+ * Some port partners do not support GET_STATUS, avoid soft reset the link to
+ * prevent redundant power re-negotiation
+ */
+ case GET_STATUS_SEND:
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
case SRC_READY:
case SNK_READY:
if (port->vdm_state > VDM_STATE_READY) {
@@ -5333,6 +5343,10 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
/* Do nothing, vbus drop expected */
break;
+ case SNK_HARD_RESET_WAIT_VBUS:
+ /* Do nothing, its OK to receive vbus off events */
+ break;
+
default:
if (port->pwr_role == TYPEC_SINK && port->attached)
tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
@@ -5384,6 +5398,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
case SNK_DEBOUNCED:
/*Do nothing, still waiting for VSAFE5V for connect */
break;
+ case SNK_HARD_RESET_WAIT_VBUS:
+ /* Do nothing, its OK to receive vbus off events */
+ break;
default:
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
tcpm_set_state(port, SNK_UNATTACHED, 0);
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index e11d1a6732a6..90e51fa3696b 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -70,6 +70,7 @@ struct userfaultfd_ctx {
bool mmap_changing;
/* mm with one ore more vmas attached to this userfaultfd_ctx */
struct mm_struct *mm;
+ struct rcu_head rcu_head;
};
struct userfaultfd_fork_ctx {
@@ -155,6 +156,13 @@ static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
refcount_inc(&ctx->refcount);
}
+static void __free_userfaultfd_ctx(struct rcu_head *head)
+{
+ struct userfaultfd_ctx *ctx = container_of(head, struct userfaultfd_ctx,
+ rcu_head);
+ kmem_cache_free(userfaultfd_ctx_cachep, ctx);
+}
+
/**
* userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
* context.
@@ -175,7 +183,7 @@ static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
mmdrop(ctx->mm);
- kmem_cache_free(userfaultfd_ctx_cachep, ctx);
+ call_rcu(&ctx->rcu_head, __free_userfaultfd_ctx);
}
}
@@ -349,6 +357,24 @@ static inline long userfaultfd_get_blocking_state(unsigned int flags)
return TASK_UNINTERRUPTIBLE;
}
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+bool userfaultfd_using_sigbus(struct vm_area_struct *vma)
+{
+ struct userfaultfd_ctx *ctx;
+ bool ret;
+
+ /*
+ * Do it inside RCU section to ensure that the ctx doesn't
+ * disappear under us.
+ */
+ rcu_read_lock();
+ ctx = rcu_dereference(vma->vm_userfaultfd_ctx.ctx);
+ ret = ctx && (ctx->features & UFFD_FEATURE_SIGBUS);
+ rcu_read_unlock();
+ return ret;
+}
+#endif
+
/*
* The locking rules involved in returning VM_FAULT_RETRY depending on
* FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
@@ -393,7 +419,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
*/
mmap_assert_locked(mm);
- ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
+ ctx = rcu_dereference_protected(vmf->vma->vm_userfaultfd_ctx.ctx,
+ lockdep_is_held(&mm->mmap_lock));
if (!ctx)
goto out;
@@ -610,8 +637,10 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
/* the various vma->vm_userfaultfd_ctx still points to it */
mmap_write_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next)
- if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+ if (rcu_access_pointer(vma->vm_userfaultfd_ctx.ctx) ==
+ release_new_ctx) {
+ rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx,
+ NULL);
vma->vm_flags &= ~__VM_UFFD_FLAGS;
}
mmap_write_unlock(mm);
@@ -641,10 +670,13 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
struct userfaultfd_ctx *ctx = NULL, *octx;
struct userfaultfd_fork_ctx *fctx;
- octx = vma->vm_userfaultfd_ctx.ctx;
+ octx = rcu_dereference_protected(
+ vma->vm_userfaultfd_ctx.ctx,
+ lockdep_is_held(&vma->vm_mm->mmap_lock));
+
if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
vm_write_begin(vma);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+ rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL);
WRITE_ONCE(vma->vm_flags,
vma->vm_flags & ~__VM_UFFD_FLAGS);
vm_write_end(vma);
@@ -683,7 +715,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
list_add_tail(&fctx->list, fcs);
}
- vma->vm_userfaultfd_ctx.ctx = ctx;
+ rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, ctx);
return 0;
}
@@ -716,7 +748,8 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
{
struct userfaultfd_ctx *ctx;
- ctx = vma->vm_userfaultfd_ctx.ctx;
+ ctx = rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
+ lockdep_is_held(&vma->vm_mm->mmap_lock));
if (!ctx)
return;
@@ -727,7 +760,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
WRITE_ONCE(ctx->mmap_changing, true);
} else {
/* Drop uffd context if remap feature not enabled */
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+ rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL);
vma->vm_flags &= ~__VM_UFFD_FLAGS;
}
}
@@ -764,7 +797,8 @@ bool userfaultfd_remove(struct vm_area_struct *vma,
struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue ewq;
- ctx = vma->vm_userfaultfd_ctx.ctx;
+ ctx = rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
+ lockdep_is_held(&mm->mmap_lock));
if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
return true;
@@ -802,7 +836,9 @@ int userfaultfd_unmap_prep(struct vm_area_struct *vma,
{
for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
struct userfaultfd_unmap_ctx *unmap_ctx;
- struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
+ struct userfaultfd_ctx *ctx =
+ rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
+ lockdep_is_held(&vma->vm_mm->mmap_lock));
if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
has_unmap_ctx(ctx, unmaps, start, end))
@@ -867,10 +903,13 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
mmap_write_lock(mm);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ struct userfaultfd_ctx *cur_uffd_ctx =
+ rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
+ lockdep_is_held(&mm->mmap_lock));
cond_resched();
- BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
+ BUG_ON(!!cur_uffd_ctx ^
!!(vma->vm_flags & __VM_UFFD_FLAGS));
- if (vma->vm_userfaultfd_ctx.ctx != ctx) {
+ if (cur_uffd_ctx != ctx) {
prev = vma;
continue;
}
@@ -887,7 +926,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
prev = vma;
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+ rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL);
vm_write_end(vma);
}
mmap_write_unlock(mm);
@@ -1350,9 +1389,12 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
found = false;
basic_ioctls = false;
for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
+ struct userfaultfd_ctx *cur_uffd_ctx =
+ rcu_dereference_protected(cur->vm_userfaultfd_ctx.ctx,
+ lockdep_is_held(&mm->mmap_lock));
cond_resched();
- BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
+ BUG_ON(!!cur_uffd_ctx ^
!!(cur->vm_flags & __VM_UFFD_FLAGS));
/* check not compatible vmas */
@@ -1395,8 +1437,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
* wouldn't know which one to deliver the userfaults to.
*/
ret = -EBUSY;
- if (cur->vm_userfaultfd_ctx.ctx &&
- cur->vm_userfaultfd_ctx.ctx != ctx)
+ if (cur_uffd_ctx && cur_uffd_ctx != ctx)
goto out_unlock;
/*
@@ -1414,18 +1455,20 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
ret = 0;
do {
+ struct userfaultfd_ctx *cur_uffd_ctx =
+ rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
+ lockdep_is_held(&mm->mmap_lock));
cond_resched();
BUG_ON(!vma_can_userfault(vma, vm_flags));
- BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
- vma->vm_userfaultfd_ctx.ctx != ctx);
+ BUG_ON(cur_uffd_ctx && cur_uffd_ctx != ctx);
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
/*
* Nothing to do: this vma is already registered into this
* userfaultfd and with the right tracking mode too.
*/
- if (vma->vm_userfaultfd_ctx.ctx == ctx &&
+ if (cur_uffd_ctx == ctx &&
(vma->vm_flags & vm_flags) == vm_flags)
goto skip;
@@ -1461,7 +1504,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
*/
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
- vma->vm_userfaultfd_ctx.ctx = ctx;
+ rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, ctx);
vm_write_end(vma);
if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
@@ -1561,7 +1604,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
cond_resched();
- BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
+ BUG_ON(!!rcu_access_pointer(cur->vm_userfaultfd_ctx.ctx) ^
!!(cur->vm_flags & __VM_UFFD_FLAGS));
/*
@@ -1583,6 +1626,9 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
ret = 0;
do {
+ struct userfaultfd_ctx *cur_uffd_ctx =
+ rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
+ lockdep_is_held(&mm->mmap_lock));
cond_resched();
BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
@@ -1591,7 +1637,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
* Nothing to do: this vma is already registered into this
* userfaultfd and with the right tracking mode too.
*/
- if (!vma->vm_userfaultfd_ctx.ctx)
+ if (!cur_uffd_ctx)
goto skip;
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
@@ -1610,7 +1656,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
struct userfaultfd_wake_range range;
range.start = start;
range.len = vma_end - start;
- wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
+ wake_userfault(cur_uffd_ctx, &range);
}
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
@@ -1641,7 +1687,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
*/
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
- vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+ rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL);
vm_write_end(vma);
skip:
@@ -1723,7 +1769,9 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
ret = -EINVAL;
if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
goto out;
- if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
+ if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|
+ UFFDIO_COPY_MODE_WP|
+ UFFDIO_COPY_MODE_MMAP_TRYLOCK))
goto out;
if (mmget_not_zero(ctx->mm)) {
ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
@@ -1774,13 +1822,14 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
if (ret)
goto out;
ret = -EINVAL;
- if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
+ if (uffdio_zeropage.mode & ~(UFFDIO_ZEROPAGE_MODE_DONTWAKE|
+ UFFDIO_ZEROPAGE_MODE_MMAP_TRYLOCK))
goto out;
if (mmget_not_zero(ctx->mm)) {
ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
uffdio_zeropage.range.len,
- &ctx->mmap_changing);
+ &ctx->mmap_changing, uffdio_zeropage.mode);
mmput(ctx->mm);
} else {
return -ESRCH;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index c853f612a815..bd4eb4547da3 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -292,7 +292,7 @@ struct vm_region {
#ifdef CONFIG_USERFAULTFD
#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
struct vm_userfaultfd_ctx {
- struct userfaultfd_ctx *ctx;
+ struct userfaultfd_ctx __rcu *ctx;
};
#else /* CONFIG_USERFAULTFD */
#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 331d2ccf0bcc..c8d776bee7e7 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -33,9 +33,15 @@
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
+static_assert(UFFDIO_ZEROPAGE_MODE_MMAP_TRYLOCK == UFFDIO_COPY_MODE_MMAP_TRYLOCK);
+#define UFFDIO_MODE_MMAP_TRYLOCK UFFDIO_COPY_MODE_MMAP_TRYLOCK
+
extern int sysctl_unprivileged_userfaultfd;
extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+extern bool userfaultfd_using_sigbus(struct vm_area_struct *vma);
+#endif
/*
* The mode of operation for __mcopy_atomic and its helpers.
@@ -62,9 +68,8 @@ extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len,
bool *mmap_changing, __u64 mode);
extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
- unsigned long dst_start,
- unsigned long len,
- bool *mmap_changing);
+ unsigned long dst_start, unsigned long len,
+ bool *mmap_changing, __u64 mode);
extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long len, bool *mmap_changing);
extern int mwriteprotect_range(struct mm_struct *dst_mm,
@@ -75,7 +80,7 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx vm_ctx)
{
- return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
+ return rcu_access_pointer(vma->vm_userfaultfd_ctx.ctx) == vm_ctx.ctx;
}
/*
@@ -154,6 +159,13 @@ static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
return VM_FAULT_SIGBUS;
}
+#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+static inline bool userfaultfd_using_sigbus(struct vm_area_struct *vma)
+{
+ return false;
+}
+#endif
+
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx vm_ctx)
{
diff --git a/include/trace/hooks/binder.h b/include/trace/hooks/binder.h
index c12926edc615..234917699ee1 100644
--- a/include/trace/hooks/binder.h
+++ b/include/trace/hooks/binder.h
@@ -140,6 +140,10 @@ DECLARE_HOOK(android_vh_binder_free_buf,
TP_PROTO(struct binder_proc *proc, struct binder_thread *thread,
struct binder_buffer *buffer),
TP_ARGS(proc, thread, buffer));
+DECLARE_HOOK(android_vh_binder_buffer_release,
+ TP_PROTO(struct binder_proc *proc, struct binder_thread *thread,
+ struct binder_buffer *buffer, bool has_transaction),
+ TP_ARGS(proc, thread, buffer, has_transaction));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_BINDER_H */
diff --git a/include/trace/hooks/blk_mq.h b/include/trace/hooks/blk_mq.h
new file mode 100644
index 000000000000..75d36a48ebad
--- /dev/null
+++ b/include/trace/hooks/blk_mq.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM blk_mq
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_BLK_MQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_BLK_MQ_H
+
+#include
+
+struct blk_mq_tag_set;
+struct blk_mq_hw_ctx;
+
+
+DECLARE_HOOK(android_vh_blk_mq_all_tag_iter,
+ TP_PROTO(bool *skip, struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
+ void *priv),
+ TP_ARGS(skip, tags, fn, priv));
+
+DECLARE_HOOK(android_vh_blk_mq_queue_tag_busy_iter,
+ TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx, busy_iter_fn * fn,
+ void *priv),
+ TP_ARGS(skip, hctx, fn, priv));
+
+DECLARE_HOOK(android_vh_blk_mq_free_tags,
+ TP_PROTO(bool *skip, struct blk_mq_tags *tags),
+ TP_ARGS(skip, tags));
+
+#endif /* _TRACE_HOOK_BLK_MQ_H */
+
+/* This part must be outside protection */
+#include
diff --git a/include/trace/hooks/block.h b/include/trace/hooks/block.h
index a5a7ac70a2ee..9126e6c0e3ff 100644
--- a/include/trace/hooks/block.h
+++ b/include/trace/hooks/block.h
@@ -14,14 +14,20 @@
struct blk_mq_tags;
struct blk_mq_alloc_data;
struct blk_mq_tag_set;
+struct blk_mq_hw_ctx;
#else
/* struct blk_mq_tags */
#include <../block/blk-mq-tag.h>
/* struct blk_mq_alloc_data */
#include <../block/blk-mq.h>
-/* struct blk_mq_tag_set */
+/* struct blk_mq_tag_set struct blk_mq_hw_ctx*/
#include
#endif /* __GENKSYMS__ */
+struct bio;
+struct request_queue;
+struct request;
+struct blk_plug;
+struct blk_flush_queue;
DECLARE_HOOK(android_vh_blk_alloc_rqs,
TP_PROTO(size_t *rq_size, struct blk_mq_tag_set *set,
@@ -33,6 +39,84 @@ DECLARE_HOOK(android_vh_blk_rq_ctx_init,
struct blk_mq_alloc_data *data, u64 alloc_time_ns),
TP_ARGS(rq, tags, data, alloc_time_ns));
+DECLARE_HOOK(android_vh_bio_free,
+ TP_PROTO(struct bio *bio),
+ TP_ARGS(bio));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_internal_blk_mq_alloc_request,
+ TP_PROTO(bool *skip, int *tag, struct blk_mq_alloc_data *data),
+ TP_ARGS(skip, tag, data), 1);
+
+DECLARE_HOOK(android_vh_internal_blk_mq_free_request,
+ TP_PROTO(bool *skip, struct request *rq, struct blk_mq_hw_ctx *hctx),
+ TP_ARGS(skip, rq, hctx));
+
+DECLARE_HOOK(android_vh_blk_mq_complete_request,
+ TP_PROTO(bool *skip, struct request *rq),
+ TP_ARGS(skip, rq));
+
+DECLARE_HOOK(android_vh_blk_mq_add_to_requeue_list,
+ TP_PROTO(bool *skip, struct request *rq, bool kick_requeue_list),
+ TP_ARGS(skip, rq, kick_requeue_list));
+
+DECLARE_HOOK(android_vh_blk_mq_get_driver_tag,
+ TP_PROTO(struct request *rq),
+ TP_ARGS(rq));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_blk_mq_delay_run_hw_queue,
+ TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx, bool async),
+ TP_ARGS(skip, hctx, async), 1);
+
+DECLARE_HOOK(android_vh_blk_mq_run_hw_queue,
+ TP_PROTO(bool *need_run, struct blk_mq_hw_ctx *hctx),
+ TP_ARGS(need_run, hctx));
+
+DECLARE_HOOK(android_vh_blk_mq_insert_request,
+ TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx, struct request *rq),
+ TP_ARGS(skip, hctx, rq));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_blk_mq_alloc_rq_map,
+ TP_PROTO(bool *skip, struct blk_mq_tags **tags,
+ struct blk_mq_tag_set *set, int node, unsigned int flags),
+ TP_ARGS(skip, tags, set, node, flags), 1);
+
+DECLARE_HOOK(android_vh_blk_mq_hctx_notify_dead,
+ TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx),
+ TP_ARGS(skip, hctx));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_blk_mq_init_allocated_queue,
+ TP_PROTO(struct request_queue *q),
+ TP_ARGS(q), 1);
+
+DECLARE_HOOK(android_vh_blk_mq_exit_queue,
+ TP_PROTO(struct request_queue *q),
+ TP_ARGS(q));
+
+DECLARE_HOOK(android_vh_blk_mq_alloc_tag_set,
+ TP_PROTO(struct blk_mq_tag_set *set),
+ TP_ARGS(set));
+
+DECLARE_HOOK(android_vh_blk_mq_update_nr_requests,
+ TP_PROTO(bool *skip, struct request_queue *q),
+ TP_ARGS(skip, q));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_blk_allocated_queue_init,
+ TP_PROTO(bool *skip, struct request_queue *q),
+ TP_ARGS(skip, q), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_blk_flush_plug_list,
+ TP_PROTO(struct blk_plug *plug, bool from_schedule),
+ TP_ARGS(plug, from_schedule), 1);
+
+DECLARE_HOOK(android_vh_blk_alloc_flush_queue,
+ TP_PROTO(bool *skip, int cmd_size, int flags, int node,
+ struct blk_flush_queue *fq),
+ TP_ARGS(skip, cmd_size, flags, node, fq));
+
+DECLARE_HOOK(android_vh_blk_mq_sched_insert_request,
+ TP_PROTO(bool *skip, struct request *rq),
+ TP_ARGS(skip, rq));
+
#endif /* _TRACE_HOOK_BLOCK_H */
/* This part must be outside protection */
diff --git a/include/trace/hooks/dtask.h b/include/trace/hooks/dtask.h
index d8fa440ea6e9..1e77e78b8c3d 100644
--- a/include/trace/hooks/dtask.h
+++ b/include/trace/hooks/dtask.h
@@ -104,7 +104,9 @@ DECLARE_HOOK(android_vh_percpu_rwsem_wq_add,
TP_PROTO(struct percpu_rw_semaphore *sem, bool reader),
TP_ARGS(sem, reader));
-
+DECLARE_HOOK(android_vh_exit_check,
+ TP_PROTO(struct task_struct *tsk, long code, int group_dead),
+ TP_ARGS(tsk, code, group_dead));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_DTASK_H */
diff --git a/include/trace/hooks/fault.h b/include/trace/hooks/fault.h
index de66a1ca33c7..0d99b07a3a66 100644
--- a/include/trace/hooks/fault.h
+++ b/include/trace/hooks/fault.h
@@ -40,6 +40,11 @@ DECLARE_HOOK(android_vh_handle_tlb_conf,
/* macro versions of hooks are no longer required */
+DECLARE_HOOK(android_vh_try_fixup_sea,
+ TP_PROTO(unsigned long addr, unsigned long esr, struct pt_regs *regs,
+ bool *can_fixup),
+ TP_ARGS(addr, esr, regs, can_fixup));
+
#endif /* _TRACE_HOOK_FAULT_H */
/* This part must be outside protection */
#include
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 05b31d60acf6..a13fa043c092 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -237,6 +237,7 @@ struct uffdio_copy {
* according to the uffdio_register.ioctls.
*/
#define UFFDIO_COPY_MODE_WP ((__u64)1<<1)
+#define UFFDIO_COPY_MODE_MMAP_TRYLOCK ((__u64)1<<63)
__u64 mode;
/*
@@ -249,6 +250,7 @@ struct uffdio_copy {
struct uffdio_zeropage {
struct uffdio_range range;
#define UFFDIO_ZEROPAGE_MODE_DONTWAKE ((__u64)1<<0)
+#define UFFDIO_ZEROPAGE_MODE_MMAP_TRYLOCK ((__u64)1<<63)
__u64 mode;
/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 7231d6afbdd8..10d801577b65 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -70,6 +70,7 @@
#include
#include
#include
+#include
/*
* The default value should be high enough to not crash a system that randomly
@@ -820,6 +821,7 @@ void __noreturn do_exit(long code)
sync_mm_rss(tsk->mm);
acct_update_integrals(tsk);
group_dead = atomic_dec_and_test(&tsk->signal->live);
+ trace_android_vh_exit_check(current, code, group_dead);
if (group_dead) {
/*
* If the last thread of global init has exited, panic
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 1698fbe6f0e1..5b3e199d7276 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -65,6 +65,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
* task_work_cancel_match - cancel a pending work added by task_work_add()
* @task: the task which should execute the work
* @match: match function to call
+ * @data: data to be passed in to match function
*
* RETURNS:
* The found work or NULL if not found.
diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
index 6a1b9272ea12..6e82210ac380 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/vaddr-test.h
@@ -152,6 +152,8 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
}
+
+ damon_destroy_target(t);
}
/*
diff --git a/mm/memory.c b/mm/memory.c
index 7b07dc7bcd1f..94c5e331e49e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5040,6 +5040,7 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
pud_t pudval;
int seq;
vm_fault_t ret;
+ bool uffd_missing_sigbus = false;
/* Clear flags that may lead to release the mmap_sem to retry */
flags &= ~(FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_KILLABLE);
@@ -5052,20 +5053,31 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
return VM_FAULT_RETRY;
}
- if (!vmf_allows_speculation(&vmf))
- return VM_FAULT_RETRY;
-
vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags);
vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot);
#ifdef CONFIG_USERFAULTFD
- /* Can't call userland page fault handler in the speculative path */
+ /*
+ * Only support SPF for SIGBUS+MISSING userfaults in private anonymous
+ * VMAs. Rest all should be retried with mmap_lock.
+ */
if (unlikely(vmf.vma_flags & __VM_UFFD_FLAGS)) {
- trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
- return VM_FAULT_RETRY;
+ uffd_missing_sigbus = vma_is_anonymous(vmf.vma) &&
+ (vmf.vma_flags & VM_UFFD_MISSING) &&
+ userfaultfd_using_sigbus(vmf.vma);
+ if (!uffd_missing_sigbus) {
+ trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
+ return VM_FAULT_RETRY;
+ }
+ /* Not having anon_vma implies that the PTE is missing */
+ if (!vmf.vma->anon_vma)
+ return VM_FAULT_SIGBUS;
}
#endif
+ if (!vmf_allows_speculation(&vmf))
+ return VM_FAULT_RETRY;
+
if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) {
/*
* This could be detected by the check address against VMA's
@@ -5183,6 +5195,9 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
local_irq_enable();
+ if (!vmf.pte && uffd_missing_sigbus)
+ return VM_FAULT_SIGBUS;
+
/*
* We need to re-validate the VMA after checking the bounds, otherwise
* we might have a false positive on the bounds.
@@ -5216,7 +5231,12 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
out_walk:
trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
local_irq_enable();
- return VM_FAULT_RETRY;
+ /*
+ * Failing page-table walk is similar to page-missing so give an
+ * opportunity to SIGBUS+MISSING userfault to handle it before retrying
+ * with mmap_lock
+ */
+ return uffd_missing_sigbus ? VM_FAULT_SIGBUS : VM_FAULT_RETRY;
out_segv:
trace_spf_vma_access(_RET_IP_, vmf.vma, address);
diff --git a/mm/mremap.c b/mm/mremap.c
index b1263e9a16af..57b73944bfbc 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -217,7 +217,7 @@ static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
* If we have the only reference, swap the refcount to -1. This
* will prevent other concurrent references by get_vma() for SPFs.
*/
- return atomic_cmpxchg(&vma->vm_ref_count, 1, -1) == 1;
+ return atomic_cmpxchg_acquire(&vma->vm_ref_count, 1, -1) == 1;
}
/*
@@ -225,12 +225,13 @@ static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
*/
static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
{
+ int old = atomic_xchg_release(&vma->vm_ref_count, 1);
+
/*
* This should only be called after a corresponding,
* successful trylock_vma_ref_count().
*/
- VM_BUG_ON_VMA(atomic_cmpxchg(&vma->vm_ref_count, -1, 1) != -1,
- vma);
+ VM_BUG_ON_VMA(old != -1, vma);
}
#else /* !CONFIG_SPECULATIVE_PAGE_FAULT */
static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index fa707e50b102..e8e8a7951c4a 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -42,7 +42,7 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
* enforce the VM_MAYWRITE check done at uffd registration
* time.
*/
- if (!dst_vma->vm_userfaultfd_ctx.ctx)
+ if (!rcu_access_pointer(dst_vma->vm_userfaultfd_ctx.ctx))
return NULL;
return dst_vma;
@@ -559,14 +559,19 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
copied = 0;
page = NULL;
retry:
- mmap_read_lock(dst_mm);
+ err = -EAGAIN;
+ if (mode & UFFDIO_MODE_MMAP_TRYLOCK) {
+ if (!mmap_read_trylock(dst_mm))
+ goto out;
+ } else {
+ mmap_read_lock(dst_mm);
+ }
/*
* If memory mappings are changing because of non-cooperative
* operation (e.g. mremap) running in parallel, bail out and
* request the user to retry later
*/
- err = -EAGAIN;
if (mmap_changing && READ_ONCE(*mmap_changing))
goto out_unlock;
@@ -659,6 +664,15 @@ retry:
if (unlikely(err == -ENOENT)) {
void *page_kaddr;
+ /*
+ * Return early due to mmap_lock contention only after
+ * some pages are copied to ensure that jank sensitive
+ * threads don't keep retrying for progress-critical
+ * pages.
+ */
+ if (copied && mmap_lock_is_contended(dst_mm))
+ break;
+
mmap_read_unlock(dst_mm);
BUG_ON(!page);
@@ -683,6 +697,9 @@ retry:
if (fatal_signal_pending(current))
err = -EINTR;
+
+ if (mmap_lock_is_contended(dst_mm))
+ err = -EAGAIN;
}
if (err)
break;
@@ -708,10 +725,10 @@ ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
}
ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
- unsigned long len, bool *mmap_changing)
+ unsigned long len, bool *mmap_changing, __u64 mode)
{
return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
- mmap_changing, 0);
+ mmap_changing, mode);
}
ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 12d9d0d0c602..18c0d163dc76 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -237,7 +237,7 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
static const struct nft_rbtree_elem *
nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
- struct nft_rbtree_elem *rbe, u8 genmask)
+ struct nft_rbtree_elem *rbe)
{
struct nft_set *set = (struct nft_set *)__set;
struct rb_node *prev = rb_prev(&rbe->node);
@@ -256,7 +256,7 @@ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
while (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
if (nft_rbtree_interval_end(rbe_prev) &&
- nft_set_elem_active(&rbe_prev->ext, genmask))
+ nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
break;
prev = rb_prev(prev);
@@ -367,7 +367,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
nft_set_elem_active(&rbe->ext, cur_genmask)) {
const struct nft_rbtree_elem *removed_end;
- removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
+ removed_end = nft_rbtree_gc_elem(set, priv, rbe);
if (IS_ERR(removed_end))
return PTR_ERR(removed_end);
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index d0fb04ae372c..ab8686908f78 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -849,18 +849,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
void *data)
{
struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
- int ret = -ENOTSUPP;
if (hcp->hcd.ops->hook_plugged_cb) {
hcp->jack = jack;
- ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
- hcp->hcd.data,
- plugged_cb,
- component->dev);
- if (ret)
- hcp->jack = NULL;
+ return 0;
}
- return ret;
+
+ return -ENOTSUPP;
}
static int hdmi_dai_spdif_probe(struct snd_soc_dai *dai)
@@ -944,6 +939,21 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
return ret;
}
+static int hdmi_probe(struct snd_soc_component *component)
+{
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ if (hcp->hcd.ops->hook_plugged_cb) {
+ ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
+ hcp->hcd.data,
+ plugged_cb,
+ component->dev);
+ }
+
+ return ret;
+}
+
static void hdmi_remove(struct snd_soc_component *component)
{
struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
@@ -954,6 +964,7 @@ static void hdmi_remove(struct snd_soc_component *component)
}
static const struct snd_soc_component_driver hdmi_driver = {
+ .probe = hdmi_probe,
.remove = hdmi_remove,
.dapm_widgets = hdmi_widgets,
.num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
diff --git a/tools/testing/selftests/damon/config b/tools/testing/selftests/damon/config
new file mode 100644
index 000000000000..0daf38974eb0
--- /dev/null
+++ b/tools/testing/selftests/damon/config
@@ -0,0 +1,7 @@
+CONFIG_DAMON=y
+CONFIG_DAMON_SYSFS=y
+CONFIG_DAMON_DBGFS=y
+CONFIG_DAMON_PADDR=y
+CONFIG_DAMON_VADDR=y
+CONFIG_DAMON_RECLAIM=y
+CONFIG_DAMON_LRU_SORT=y