From aebdd1ae36280d4fbbac80fa8591d542cc49dba0 Mon Sep 17 00:00:00 2001 From: Prakash Gupta Date: Thu, 1 Jul 2021 16:13:01 +0530 Subject: [PATCH 01/62] ANDROID: GKI: Update abi_gki_aarch64_qcom for oom_check_panic symbol Whitelist the symbol __tracepoint_android_vh_oom_check_panic Leaf changes summary: 1 artifact changed Changed leaf types summary: 0 leaf type changed Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 0 Added function Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 1 Added variable 1 Added variable: [A] 'tracepoint __tracepoint_android_vh_oom_check_panic' Bug: 186875166 Change-Id: Ie54977a3fad4645a6d55c87f2c36ae4a0347c59b Signed-off-by: Prakash Gupta --- android/abi_gki_aarch64.xml | 78 ++++++++++++++++++------------------ android/abi_gki_aarch64_qcom | 1 + 2 files changed, 41 insertions(+), 38 deletions(-) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index 72cb03c89a4f..1f8692f9bc05 100755 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -4894,6 +4894,7 @@ + @@ -40877,7 +40878,7 @@ - + @@ -40891,7 +40892,7 @@ - + @@ -45799,6 +45800,7 @@ + @@ -131704,25 +131706,9 @@ - - - - - - - - - - - - - - - - @@ -163617,6 +163603,21 @@ + + + + + + + + + + + + + + + @@ -163763,6 +163764,7 @@ + @@ -168367,34 +168369,34 @@ - - - + + + - - + + - - - - + + + + - - - + + + - - - + + + - - - + + + @@ -175486,7 +175488,7 @@ - + @@ -175650,7 +175652,7 @@ - + diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index 0b00b1b2e4a9..9ca7f943d6b9 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -2604,6 +2604,7 @@ __tracepoint_android_vh_jiffies_update __tracepoint_android_vh_logbuf __tracepoint_android_vh_logbuf_pr_cont + __tracepoint_android_vh_oom_check_panic __tracepoint_android_vh_printk_hotplug __tracepoint_android_vh_process_killed __tracepoint_android_vh_psi_event From 8d2e1c8a3ede29806348c32acde1afafda9344f8 Mon Sep 17 00:00:00 2001 From: Rick Yiu Date: Thu, 1 Jul 2021 18:05:59 +0800 Subject: [PATCH 02/62] ANDROID: Update the ABI representation Leaf changes summary: 2 artifacts changed Changed leaf types summary: 0 leaf type changed Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 1 Added function Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 1 Added variable 1 Added function: [A] 'function int __traceiter_android_vh_setscheduler_uclamp(void*, task_struct*, int, unsigned int)' 1 Added variable: [A] 'tracepoint __tracepoint_android_vh_setscheduler_uclamp' Bug: 191973176 Signed-off-by: Rick Yiu Change-Id: Id8c2dd8a1201b1adaa84e9635331b5c703c742bd --- android/abi_gki_aarch64.xml | 194 +++++++++++++++++--------------- android/abi_gki_aarch64_generic | 2 + 2 files changed, 104 insertions(+), 92 deletions(-) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index 1f8692f9bc05..f1b0315ecb69 100755 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -365,6 +365,7 @@ + @@ -4920,6 +4921,7 @@ + @@ -45766,6 +45768,7 @@ + @@ -45954,6 +45957,13 @@ + + + + + + + @@ -76439,18 +76449,27 @@ - + - + - + - - + + - - + + + + + + + + + + + @@ -76494,27 +76513,18 @@ - + - + - + - - + + - - - - - - - - - - - + + @@ -153865,25 +153875,25 @@ - + - - - - - - + + + + + + - + - + - - - + + + @@ -153891,7 +153901,7 @@ - + @@ -153901,9 +153911,9 @@ - - - + + + @@ -153962,12 +153972,12 @@ - - + + - - + + @@ -153975,83 +153985,83 @@ - - + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - - - - + + + + + - - + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - + + - - - - + + + + diff --git a/android/abi_gki_aarch64_generic b/android/abi_gki_aarch64_generic index edfb40d1cdbb..98995368def0 100644 --- a/android/abi_gki_aarch64_generic +++ b/android/abi_gki_aarch64_generic @@ -1871,6 +1871,7 @@ __traceiter_android_vh_of_i2c_get_board_info __traceiter_android_vh_pagecache_get_page __traceiter_android_vh_rmqueue + __traceiter_android_vh_setscheduler_uclamp __traceiter_android_vh_thermal_pm_notify_suspend __traceiter_android_vh_timerfd_create __traceiter_android_vh_typec_store_partner_src_caps @@ -1940,6 +1941,7 @@ __tracepoint_android_vh_of_i2c_get_board_info __tracepoint_android_vh_pagecache_get_page __tracepoint_android_vh_rmqueue + __tracepoint_android_vh_setscheduler_uclamp __tracepoint_android_vh_thermal_pm_notify_suspend __tracepoint_android_vh_timerfd_create __tracepoint_android_vh_typec_store_partner_src_caps From 3b3bec886689de128a2556759e856f1a9d27f64a Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 28 Jun 2021 00:05:01 -0700 Subject: [PATCH 03/62] Revert "Revert "KMI: BACKPORT: FROMGIT: scsi: ufs: Optimize host lock on transfer requests send/compl paths"" This reverts commit 850f11aa85f055dd914928b7aea2e99319800459. We need to go back upstream version with right fix. Bug: 192095860 Signed-off-by: Jaegeuk Kim Change-Id: I26bf924125f06e97c1262578c99a2dbb58394235 --- drivers/scsi/ufs/ufshcd.c | 15 +++++++++------ drivers/scsi/ufs/ufshcd.h | 2 ++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 6656c98c7cd2..c2b862f81d2b 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2665,7 +2665,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) (hba->clk_gating.state != CLKS_ON)); lrbp = &hba->lrb[tag]; - if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { + if (unlikely(lrbp->in_use)) { if (hba->pm_op_in_progress) set_host_byte(cmd, DID_BAD_TARGET); else @@ -2925,7 +2925,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, init_completion(&wait); lrbp = &hba->lrb[tag]; - if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { + if (unlikely(lrbp->in_use)) { err = -EBUSY; goto out; } @@ -5134,9 +5134,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, bool update_scaling = false; for_each_set_bit(index, &completed_reqs, hba->nutrs) { - if (!test_and_clear_bit(index, &hba->outstanding_reqs)) - continue; lrbp = &hba->lrb[index]; + lrbp->in_use = false; lrbp->compl_time_stamp = ktime_get(); cmd = lrbp->cmd; if (cmd) { @@ -5168,6 +5167,9 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, hba->clk_scaling.active_reqs--; } + /* clear corresponding bits of completed commands */ + hba->outstanding_reqs ^= completed_reqs; + ufshcd_clk_scaling_update_busy(hba); } @@ -6601,11 +6603,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); - if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { + lrbp = &hba->lrb[tag]; + if (unlikely(lrbp->in_use)) { err = -EBUSY; goto out; } - lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); lrbp->cmd = NULL; @@ -6974,6 +6976,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) if (lrbp->cmd) { __ufshcd_transfer_req_compl(hba, (1UL << tag)); __set_bit(tag, &hba->outstanding_reqs); + lrbp->in_use = true; hba->force_reset = true; ufshcd_schedule_eh_work(hba); } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 0c70f8a8c158..9ce98969446c 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -188,6 +188,7 @@ struct ufs_pm_lvl_states { * @crypto_key_slot: the key slot to use for inline crypto (-1 if none) * @data_unit_num: the data unit number for the first block for inline crypto * @req_abort_skip: skip request abort task flag + * @in_use: indicates that this lrb is still in use */ struct ufshcd_lrb { struct utp_transfer_req_desc *utr_descriptor_ptr; @@ -217,6 +218,7 @@ struct ufshcd_lrb { #endif bool req_abort_skip; + bool in_use; }; /** From 9c0d749a4b3e55d7945dc3921b5f7de817282eee Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 28 Jun 2021 00:05:31 -0700 Subject: [PATCH 04/62] Revert "Revert "BACKPORT: FROMGIT: scsi: ufs: Optimize host lock on transfer requests send/compl paths"" This reverts commit 46575badbb5237f6db49d61f4d644c88d8a3b62b. We need to go back upstream version with right fix. Bug: 192095860 Signed-off-by: Jaegeuk Kim Change-Id: I3dd1eb638bb3a95b3c8d40673f0821afdeb74f96 --- drivers/scsi/ufs/ufshcd.c | 263 +++++++++++++++++++------------------- drivers/scsi/ufs/ufshcd.h | 2 - 2 files changed, 128 insertions(+), 137 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index c2b862f81d2b..99fb33f8acb3 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -730,7 +730,7 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) */ static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag) { - __clear_bit(tag, &hba->outstanding_reqs); + clear_bit(tag, &hba->outstanding_reqs); } /** @@ -1956,15 +1956,19 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) { bool queue_resume_work = false; ktime_t curr_t = ktime_get(); + unsigned long flags; if (!ufshcd_is_clkscaling_supported(hba)) return; + spin_lock_irqsave(hba->host->host_lock, flags); if (!hba->clk_scaling.active_reqs++) queue_resume_work = true; - if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) + if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) { + spin_unlock_irqrestore(hba->host->host_lock, flags); return; + } if (queue_resume_work) queue_work(hba->clk_scaling.workq, @@ -1980,21 +1984,26 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba) hba->clk_scaling.busy_start_t = curr_t; hba->clk_scaling.is_busy_started = true; } + spin_unlock_irqrestore(hba->host->host_lock, flags); } static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) { struct ufs_clk_scaling *scaling = &hba->clk_scaling; + unsigned long flags; if (!ufshcd_is_clkscaling_supported(hba)) return; + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_scaling.active_reqs--; if (!hba->outstanding_reqs && scaling->is_busy_started) { scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), scaling->busy_start_t)); scaling->busy_start_t = 0; scaling->is_busy_started = false; } + spin_unlock_irqrestore(hba->host->host_lock, flags); } static inline int ufshcd_monitor_opcode2dir(u8 opcode) @@ -2020,15 +2029,20 @@ static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba, static void ufshcd_start_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); + unsigned long flags; + spin_lock_irqsave(hba->host->host_lock, flags); if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0) hba->monitor.busy_start_ts[dir] = ktime_get(); + spin_unlock_irqrestore(hba->host->host_lock, flags); } static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd); + unsigned long flags; + spin_lock_irqsave(hba->host->host_lock, flags); if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { struct request *req = lrbp->cmd->request; struct ufs_hba_monitor *m = &hba->monitor; @@ -2052,6 +2066,7 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) /* Push forward the busy start of monitor */ m->busy_start_ts[dir] = now; } + spin_unlock_irqrestore(hba->host->host_lock, flags); } /** @@ -2063,6 +2078,7 @@ static inline void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; + unsigned long flags; lrbp->issue_time_stamp = ktime_get(); lrbp->compl_time_stamp = ktime_set(0, 0); @@ -2070,10 +2086,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) trace_android_vh_ufs_send_command(hba, lrbp); ufshcd_add_command_trace(hba, task_tag, "send"); ufshcd_clk_scaling_start_busy(hba); - __set_bit(task_tag, &hba->outstanding_reqs); if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_start_monitor(hba, lrbp); + spin_lock_irqsave(hba->host->host_lock, flags); + set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); + spin_unlock_irqrestore(hba->host->host_lock, flags); /* Make sure that doorbell is committed immediately */ wmb(); } @@ -2637,7 +2655,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { struct ufshcd_lrb *lrbp; struct ufs_hba *hba; - unsigned long flags; int tag; int err = 0; @@ -2654,6 +2671,43 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (!down_read_trylock(&hba->clk_scaling_lock)) return SCSI_MLQUEUE_HOST_BUSY; + switch (hba->ufshcd_state) { + case UFSHCD_STATE_OPERATIONAL: + case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: + break; + case UFSHCD_STATE_EH_SCHEDULED_FATAL: + /* + * pm_runtime_get_sync() is used at error handling preparation + * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's + * PM ops, it can never be finished if we let SCSI layer keep + * retrying it, which gets err handler stuck forever. Neither + * can we let the scsi cmd pass through, because UFS is in bad + * state, the scsi cmd may eventually time out, which will get + * err handler blocked for too long. So, just fail the scsi cmd + * sent from PM ops, err handler can recover PM error anyways. + */ + if (hba->pm_op_in_progress) { + hba->force_reset = true; + set_host_byte(cmd, DID_BAD_TARGET); + cmd->scsi_done(cmd); + goto out; + } + fallthrough; + case UFSHCD_STATE_RESET: + err = SCSI_MLQUEUE_HOST_BUSY; + goto out; + case UFSHCD_STATE_ERROR: + set_host_byte(cmd, DID_ERROR); + cmd->scsi_done(cmd); + goto out; + default: + dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", + __func__, hba->ufshcd_state); + set_host_byte(cmd, DID_BAD_TARGET); + cmd->scsi_done(cmd); + goto out; + } + hba->req_abort_count = 0; err = ufshcd_hold(hba, true); @@ -2664,8 +2718,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) WARN_ON(ufshcd_is_clkgating_allowed(hba) && (hba->clk_gating.state != CLKS_ON)); - lrbp = &hba->lrb[tag]; - if (unlikely(lrbp->in_use)) { + if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { if (hba->pm_op_in_progress) set_host_byte(cmd, DID_BAD_TARGET); else @@ -2674,6 +2727,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } + lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); lrbp->cmd = cmd; lrbp->sense_bufflen = UFS_SENSE_SIZE; @@ -2704,51 +2758,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) /* Make sure descriptors are ready before ringing the doorbell */ wmb(); - spin_lock_irqsave(hba->host->host_lock, flags); - switch (hba->ufshcd_state) { - case UFSHCD_STATE_OPERATIONAL: - case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: - break; - case UFSHCD_STATE_EH_SCHEDULED_FATAL: - /* - * pm_runtime_get_sync() is used at error handling preparation - * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's - * PM ops, it can never be finished if we let SCSI layer keep - * retrying it, which gets err handler stuck forever. Neither - * can we let the scsi cmd pass through, because UFS is in bad - * state, the scsi cmd may eventually time out, which will get - * err handler blocked for too long. So, just fail the scsi cmd - * sent from PM ops, err handler can recover PM error anyways. - */ - if (hba->pm_op_in_progress) { - hba->force_reset = true; - set_host_byte(cmd, DID_BAD_TARGET); - goto out_compl_cmd; - } - fallthrough; - case UFSHCD_STATE_RESET: - err = SCSI_MLQUEUE_HOST_BUSY; - goto out_compl_cmd; - case UFSHCD_STATE_ERROR: - set_host_byte(cmd, DID_ERROR); - goto out_compl_cmd; - default: - dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", - __func__, hba->ufshcd_state); - set_host_byte(cmd, DID_BAD_TARGET); - goto out_compl_cmd; - } ufshcd_send_command(hba, tag); - spin_unlock_irqrestore(hba->host->host_lock, flags); - goto out; - -out_compl_cmd: - scsi_dma_unmap(lrbp->cmd); - lrbp->cmd = NULL; - spin_unlock_irqrestore(hba->host->host_lock, flags); - ufshcd_release(hba); - if (!err) - cmd->scsi_done(cmd); out: up_read(&hba->clk_scaling_lock); return err; @@ -2903,7 +2913,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, int err; int tag; struct completion wait; - unsigned long flags; down_read(&hba->clk_scaling_lock); @@ -2923,34 +2932,30 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, req->timeout = msecs_to_jiffies(2 * timeout); blk_mq_start_request(req); - init_completion(&wait); - lrbp = &hba->lrb[tag]; - if (unlikely(lrbp->in_use)) { + if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { err = -EBUSY; goto out; } + init_completion(&wait); + lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); if (unlikely(err)) - goto out_put_tag; + goto out; hba->dev_cmd.complete = &wait; ufshcd_add_query_upiu_trace(hba, tag, "query_send"); /* Make sure descriptors are ready before ringing the doorbell */ wmb(); - spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_send_command(hba, tag); - spin_unlock_irqrestore(hba->host->host_lock, flags); - err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); - -out: ufshcd_add_query_upiu_trace(hba, tag, err ? "query_complete_err" : "query_complete"); -out_put_tag: +out: blk_put_request(req); out_unlock: up_read(&hba->clk_scaling_lock); @@ -5083,6 +5088,24 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) return result; } +static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, + u32 intr_mask) +{ + if (!ufshcd_is_auto_hibern8_supported(hba) || + !ufshcd_is_auto_hibern8_enabled(hba)) + return false; + + if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK)) + return false; + + if (hba->active_uic_cmd && + (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || + hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) + return false; + + return true; +} + /** * ufshcd_uic_cmd_compl - handle completion of uic command * @hba: per adapter instance @@ -5096,6 +5119,10 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { irqreturn_t retval = IRQ_NONE; + spin_lock(hba->host->host_lock); + if (ufshcd_is_auto_hibern8_error(hba, intr_status)) + hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); + if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { hba->active_uic_cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); @@ -5116,6 +5143,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) if (retval == IRQ_HANDLED) ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd, "complete"); + spin_unlock(hba->host->host_lock); return retval; } @@ -5134,8 +5162,9 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, bool update_scaling = false; for_each_set_bit(index, &completed_reqs, hba->nutrs) { + if (!test_and_clear_bit(index, &hba->outstanding_reqs)) + continue; lrbp = &hba->lrb[index]; - lrbp->in_use = false; lrbp->compl_time_stamp = ktime_get(); cmd = lrbp->cmd; if (cmd) { @@ -5151,7 +5180,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, lrbp->cmd = NULL; /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); - __ufshcd_release(hba); + ufshcd_release(hba); update_scaling = true; } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { @@ -5163,14 +5192,9 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, update_scaling = true; } } - if (ufshcd_is_clkscaling_supported(hba) && update_scaling) - hba->clk_scaling.active_reqs--; + if (update_scaling) + ufshcd_clk_scaling_update_busy(hba); } - - /* clear corresponding bits of completed commands */ - hba->outstanding_reqs ^= completed_reqs; - - ufshcd_clk_scaling_update_busy(hba); } /** @@ -5183,7 +5207,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, */ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) { - unsigned long completed_reqs; + unsigned long completed_reqs, flags; u32 tr_doorbell; /* Resetting interrupt aggregation counters first and reading the @@ -5197,8 +5221,10 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) ufshcd_reset_intr_aggr(hba); + spin_lock_irqsave(hba->host->host_lock, flags); tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); completed_reqs = tr_doorbell ^ hba->outstanding_reqs; + spin_unlock_irqrestore(hba->host->host_lock, flags); if (completed_reqs) { __ufshcd_transfer_req_compl(hba, completed_reqs); @@ -5956,13 +5982,11 @@ static void ufshcd_err_handler(struct work_struct *work) ufshcd_set_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_err_handling_prepare(hba); + /* Complete requests that have door-bell cleared by h/w */ + ufshcd_complete_requests(hba); spin_lock_irqsave(hba->host->host_lock, flags); if (hba->ufshcd_state != UFSHCD_STATE_ERROR) hba->ufshcd_state = UFSHCD_STATE_RESET; - - /* Complete requests that have door-bell cleared by h/w */ - ufshcd_complete_requests(hba); - /* * A full reset and restore might have happened after preparation * is finished, double check whether we should stop. @@ -6045,12 +6069,11 @@ static void ufshcd_err_handler(struct work_struct *work) } lock_skip_pending_xfer_clear: - spin_lock_irqsave(hba->host->host_lock, flags); - /* Complete the requests that are cleared by s/w */ ufshcd_complete_requests(hba); - hba->silence_err_logs = false; + spin_lock_irqsave(hba->host->host_lock, flags); + hba->silence_err_logs = false; if (err_xfer || err_tm) { needs_reset = true; goto do_reset; @@ -6200,37 +6223,23 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) return retval; } -static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, - u32 intr_mask) -{ - if (!ufshcd_is_auto_hibern8_supported(hba) || - !ufshcd_is_auto_hibern8_enabled(hba)) - return false; - - if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK)) - return false; - - if (hba->active_uic_cmd && - (hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER || - hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT)) - return false; - - return true; -} - /** * ufshcd_check_errors - Check for errors that need s/w attention * @hba: per-adapter instance + * @intr_status: interrupt status generated by the controller * * Returns * IRQ_HANDLED - If interrupt is valid * IRQ_NONE - If invalid interrupt */ -static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba) +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) { bool queue_eh_work = false; irqreturn_t retval = IRQ_NONE; + spin_lock(hba->host->host_lock); + hba->errors |= UFSHCD_ERROR_MASK & intr_status; + if (hba->errors & INT_FATAL_ERRORS) { ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR, hba->errors); @@ -6287,6 +6296,9 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba) * itself without s/w intervention or errors that will be * handled by the SCSI core layer. */ + hba->errors = 0; + hba->uic_error = 0; + spin_unlock(hba->host->host_lock); return retval; } @@ -6321,13 +6333,17 @@ static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved) */ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) { + unsigned long flags; struct request_queue *q = hba->tmf_queue; struct ctm_info ci = { .hba = hba, - .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL), }; + spin_lock_irqsave(hba->host->host_lock, flags); + ci.pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci); + spin_unlock_irqrestore(hba->host->host_lock, flags); + return ci.ncpl ? IRQ_HANDLED : IRQ_NONE; } @@ -6344,17 +6360,12 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) { irqreturn_t retval = IRQ_NONE; - hba->errors = UFSHCD_ERROR_MASK & intr_status; - - if (ufshcd_is_auto_hibern8_error(hba, intr_status)) - hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); - - if (hba->errors) - retval |= ufshcd_check_errors(hba); - if (intr_status & UFSHCD_UIC_MASK) retval |= ufshcd_uic_cmd_compl(hba, intr_status); + if (intr_status & UFSHCD_ERROR_MASK || hba->errors) + retval |= ufshcd_check_errors(hba, intr_status); + if (intr_status & UTP_TASK_REQ_COMPL) retval |= ufshcd_tmc_handler(hba); @@ -6380,7 +6391,6 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) struct ufs_hba *hba = __hba; int retries = hba->nutrs; - spin_lock(hba->host->host_lock); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); hba->ufs_stats.last_intr_status = intr_status; hba->ufs_stats.last_intr_ts = ktime_get(); @@ -6412,7 +6422,6 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); } - spin_unlock(hba->host->host_lock); return retval; } @@ -6589,7 +6598,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, int err = 0; int tag; struct completion wait; - unsigned long flags; u8 upiu_flags; down_read(&hba->clk_scaling_lock); @@ -6602,13 +6610,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, tag = req->tag; WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); - init_completion(&wait); - lrbp = &hba->lrb[tag]; - if (unlikely(lrbp->in_use)) { + if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { err = -EBUSY; goto out; } + init_completion(&wait); + lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); lrbp->cmd = NULL; lrbp->sense_bufflen = 0; @@ -6646,10 +6654,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, /* Make sure descriptors are ready before ringing the doorbell */ wmb(); - spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_send_command(hba, tag); - spin_unlock_irqrestore(hba->host->host_lock, flags); + ufshcd_send_command(hba, tag); /* * ignore the returning value here - ufshcd_check_query_response is * bound to fail since dev_cmd.query and dev_cmd.type were left empty. @@ -6768,7 +6774,6 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) u32 pos; int err; u8 resp = 0xF, lun; - unsigned long flags; host = cmd->device->host; hba = shost_priv(host); @@ -6787,11 +6792,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) err = ufshcd_clear_cmd(hba, pos); if (err) break; + __ufshcd_transfer_req_compl(hba, pos); } } - spin_lock_irqsave(host->host_lock, flags); - ufshcd_transfer_req_compl(hba); - spin_unlock_irqrestore(host->host_lock, flags); out: hba->req_abort_count = 0; @@ -6967,20 +6970,16 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) * will fail, due to spec violation, scsi err handling next step * will be to send LU reset which, again, is a spec violation. * To avoid these unnecessary/illegal steps, first we clean up - * the lrb taken by this cmd and mark the lrb as in_use, then - * queue the eh_work and bail. + * the lrb taken by this cmd and re-set it in outstanding_reqs, + * then queue the eh_work and bail. */ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); + __ufshcd_transfer_req_compl(hba, (1UL << tag)); + set_bit(tag, &hba->outstanding_reqs); spin_lock_irqsave(host->host_lock, flags); - if (lrbp->cmd) { - __ufshcd_transfer_req_compl(hba, (1UL << tag)); - __set_bit(tag, &hba->outstanding_reqs); - lrbp->in_use = true; - hba->force_reset = true; - ufshcd_schedule_eh_work(hba); - } - + hba->force_reset = true; + ufshcd_schedule_eh_work(hba); spin_unlock_irqrestore(host->host_lock, flags); goto out; } @@ -6993,9 +6992,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) if (!err) { cleanup: - spin_lock_irqsave(host->host_lock, flags); __ufshcd_transfer_req_compl(hba, (1UL << tag)); - spin_unlock_irqrestore(host->host_lock, flags); out: err = SUCCESS; } else { @@ -7025,19 +7022,15 @@ out: static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) { int err; - unsigned long flags; /* * Stop the host controller and complete the requests * cleared by h/w */ ufshcd_hba_stop(hba); - - spin_lock_irqsave(hba->host->host_lock, flags); hba->silence_err_logs = true; ufshcd_complete_requests(hba); hba->silence_err_logs = false; - spin_unlock_irqrestore(hba->host->host_lock, flags); /* scale up clocks to max frequency before full reinitialization */ ufshcd_set_clk_freq(hba, true); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 9ce98969446c..0c70f8a8c158 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -188,7 +188,6 @@ struct ufs_pm_lvl_states { * @crypto_key_slot: the key slot to use for inline crypto (-1 if none) * @data_unit_num: the data unit number for the first block for inline crypto * @req_abort_skip: skip request abort task flag - * @in_use: indicates that this lrb is still in use */ struct ufshcd_lrb { struct utp_transfer_req_desc *utr_descriptor_ptr; @@ -218,7 +217,6 @@ struct ufshcd_lrb { #endif bool req_abort_skip; - bool in_use; }; /** From e0288fd77906a94ef6b3e1c67d94f954d87fd40c Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 28 Jun 2021 00:05:55 -0700 Subject: [PATCH 05/62] Revert "Revert "FROMGIT: scsi: ufs: Utilize Transfer Request List Completion Notification Register"" This reverts commit 83d653257a55b8aa8ec3b1f49eaf6a507701a0a3. We need to go back upstream version with right fix. Bug: 192088222 Signed-off-by: Jaegeuk Kim Change-Id: I7a52e161e5c82a13304fb5ba96bb6a5c6dacd06a --- drivers/scsi/ufs/ufshcd.c | 52 ++++++++++++++++++++++++++++----------- drivers/scsi/ufs/ufshcd.h | 5 ++++ drivers/scsi/ufs/ufshci.h | 1 + 3 files changed, 43 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 99fb33f8acb3..12060ccc15de 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2078,7 +2078,6 @@ static inline void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; - unsigned long flags; lrbp->issue_time_stamp = ktime_get(); lrbp->compl_time_stamp = ktime_set(0, 0); @@ -2088,10 +2087,19 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) ufshcd_clk_scaling_start_busy(hba); if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_start_monitor(hba, lrbp); - spin_lock_irqsave(hba->host->host_lock, flags); - set_bit(task_tag, &hba->outstanding_reqs); - ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); - spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ufshcd_has_utrlcnr(hba)) { + set_bit(task_tag, &hba->outstanding_reqs); + ufshcd_writel(hba, 1 << task_tag, + REG_UTP_TRANSFER_REQ_DOOR_BELL); + } else { + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + set_bit(task_tag, &hba->outstanding_reqs); + ufshcd_writel(hba, 1 << task_tag, + REG_UTP_TRANSFER_REQ_DOOR_BELL); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } /* Make sure that doorbell is committed immediately */ wmb(); } @@ -5198,17 +5206,17 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, } /** - * ufshcd_transfer_req_compl - handle SCSI and query command completion + * ufshcd_trc_handler - handle transfer requests completion * @hba: per adapter instance + * @use_utrlcnr: get completed requests from UTRLCNR * * Returns * IRQ_HANDLED - If interrupt is valid * IRQ_NONE - If invalid interrupt */ -static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) +static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr) { - unsigned long completed_reqs, flags; - u32 tr_doorbell; + unsigned long completed_reqs = 0; /* Resetting interrupt aggregation counters first and reading the * DOOR_BELL afterward allows us to handle all the completed requests. @@ -5221,10 +5229,24 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) ufshcd_reset_intr_aggr(hba); - spin_lock_irqsave(hba->host->host_lock, flags); - tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - completed_reqs = tr_doorbell ^ hba->outstanding_reqs; - spin_unlock_irqrestore(hba->host->host_lock, flags); + if (use_utrlcnr) { + u32 utrlcnr; + + utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL); + if (utrlcnr) { + ufshcd_writel(hba, utrlcnr, + REG_UTP_TRANSFER_REQ_LIST_COMPL); + completed_reqs = utrlcnr; + } + } else { + unsigned long flags; + u32 tr_doorbell; + + spin_lock_irqsave(hba->host->host_lock, flags); + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + completed_reqs = tr_doorbell ^ hba->outstanding_reqs; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } if (completed_reqs) { __ufshcd_transfer_req_compl(hba, completed_reqs); @@ -5733,7 +5755,7 @@ out: /* Complete requests that have door-bell cleared */ static void ufshcd_complete_requests(struct ufs_hba *hba) { - ufshcd_transfer_req_compl(hba); + ufshcd_trc_handler(hba, false); ufshcd_tmc_handler(hba); } @@ -6370,7 +6392,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) retval |= ufshcd_tmc_handler(hba); if (intr_status & UTP_TRANSFER_REQ_COMPL) - retval |= ufshcd_transfer_req_compl(hba); + retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba)); return retval; } diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 0c70f8a8c158..8714b62682a0 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -1166,6 +1166,11 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) return ufshcd_readl(hba, REG_UFS_VERSION); } +static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba) +{ + return (hba->ufs_version >= ufshci_version(3, 0)); +} + static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, bool up, enum ufs_notify_change_status status) { diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 79ba211c1e59..757727dbab81 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -39,6 +39,7 @@ enum { REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58, REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C, REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60, + REG_UTP_TRANSFER_REQ_LIST_COMPL = 0x64, REG_UTP_TASK_REQ_LIST_BASE_L = 0x70, REG_UTP_TASK_REQ_LIST_BASE_H = 0x74, REG_UTP_TASK_REQ_DOOR_BELL = 0x78, From 72fa98b5148caec586f3c32e202fcc1471543935 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 30 Jun 2021 17:45:34 -0700 Subject: [PATCH 06/62] FROMLIST: scsi: ufs: add missing host_lock in setup_xfer_req This patch adds a host_lock which existed before on ufshcd_vops_setup_xfer_req. Bug: 190637035 Cc: Stanley Chu Cc: Can Guo Cc: Bean Huo Cc: Bart Van Assche Cc: Asutosh Das Link: https://lore.kernel.org/linux-scsi/20210701005117.3846179-1-jaegeuk@kernel.org/T/#u Fixes: 7613068f95fb ("BACKPORT: FROMGIT: scsi: ufs: Optimize host lock on transfer requests send/compl paths") Reviewed-by: Bart Van Assche Signed-off-by: Jaegeuk Kim Signed-off-by: Jaegeuk Kim Change-Id: I0e5f9ec11fa62a074bca5feb5638e8d04cf858ee --- drivers/scsi/ufs/ufshcd.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 8714b62682a0..6056ea2ccfb6 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -1235,8 +1235,13 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, bool is_scsi_cmd) { - if (hba->vops && hba->vops->setup_xfer_req) - return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); + if (hba->vops && hba->vops->setup_xfer_req) { + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } } static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, From 7445a59eed931678be335b92cb183af57fa42772 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 11 Jan 2021 17:47:16 +0100 Subject: [PATCH 07/62] FROMGIT: Revert "blk-mq, elevator: Count requests per hctx to improve performance" This reverts commit b445547ec1bbd3e7bf4b1c142550942f70527d95. Since both mq-deadline and BFQ completely ignore hctx they are passed to their dispatch function and dispatch whatever request they deem fit checking whether any request for a particular hctx is queued is just pointless since we'll very likely get a request from a different hctx anyway. In the following commit we'll deal with lock contention in these IO schedulers in presence of multiple HW queues in a different way. Signed-off-by: Jan Kara Reviewed-by: Ming Lei Signed-off-by: Jens Axboe Change-Id: Ibd7dbe69ae1799f2efce5788986e2f1aad88f66d BUG: 187357408 (cherry picked from commit 2490aeca0081bb168e96fb7b1746d676be84369f git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/bfq-iosched.c | 5 ----- block/blk-mq.c | 1 - block/mq-deadline.c | 6 ------ include/linux/blk-mq.h | 4 ---- 4 files changed, 16 deletions(-) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index c91dca641eb4..51b39774267a 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -4640,9 +4640,6 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) { struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; - if (!atomic_read(&hctx->elevator_queued)) - return false; - /* * Avoiding lock: a race on bfqd->busy_queues should cause at * most a call to dispatch for nothing @@ -5557,7 +5554,6 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); bfq_insert_request(hctx, rq, at_head); - atomic_inc(&hctx->elevator_queued); } } @@ -5925,7 +5921,6 @@ static void bfq_finish_requeue_request(struct request *rq) bfq_completed_request(bfqq, bfqd); bfq_finish_requeue_request_body(bfqq); - atomic_dec(&rq->mq_hctx->elevator_queued); spin_unlock_irqrestore(&bfqd->lock, flags); } else { diff --git a/block/blk-mq.c b/block/blk-mq.c index 06b45114eaab..22a4cd8b015d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2740,7 +2740,6 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, goto free_hctx; atomic_set(&hctx->nr_active, 0); - atomic_set(&hctx->elevator_queued, 0); if (node == NUMA_NO_NODE) node = set->numa_node; hctx->numa_node = node; diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 2b9635d0dcba..0bedb81a5af6 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -386,8 +386,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) spin_lock(&dd->lock); rq = __dd_dispatch_request(dd); spin_unlock(&dd->lock); - if (rq) - atomic_dec(&rq->mq_hctx->elevator_queued); return rq; } @@ -534,7 +532,6 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, rq = list_first_entry(list, struct request, queuelist); list_del_init(&rq->queuelist); dd_insert_request(hctx, rq, at_head); - atomic_inc(&hctx->elevator_queued); } spin_unlock(&dd->lock); } @@ -581,9 +578,6 @@ static bool dd_has_work(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; - if (!atomic_read(&hctx->elevator_queued)) - return false; - return !list_empty_careful(&dd->dispatch) || !list_empty_careful(&dd->fifo_list[0]) || !list_empty_careful(&dd->fifo_list[1]); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f8ea27423d1d..1626eb27be24 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -139,10 +139,6 @@ struct blk_mq_hw_ctx { * shared across request queues. */ atomic_t nr_active; - /** - * @elevator_queued: Number of queued requests on hctx. - */ - atomic_t elevator_queued; /** @cpuhp_online: List to store request if CPU is going to die */ struct hlist_node cpuhp_online; From d7d0098f244d85cb0c59a8507390aa82bdd95ddb Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 11 Jan 2021 17:47:17 +0100 Subject: [PATCH 08/62] FROMGIT: blk-mq: Improve performance of non-mq IO schedulers with multiple HW queues Currently when non-mq aware IO scheduler (BFQ, mq-deadline) is used for a queue with multiple HW queues, the performance it rather bad. The problem is that these IO schedulers use queue-wide locking and their dispatch function does not respect the hctx it is passed in and returns any request it finds appropriate. Thus locality of request access is broken and dispatch from multiple CPUs just contends on IO scheduler locks. For these IO schedulers there's little point in dispatching from multiple CPUs. Instead dispatch always only from a single CPU to limit contention. Below is a comparison of dbench runs on XFS filesystem where the storage is a raid card with 64 HW queues and to it attached a single rotating disk. BFQ is used as IO scheduler: clients MQ SQ MQ-Patched Amean 1 39.12 (0.00%) 43.29 * -10.67%* 36.09 * 7.74%* Amean 2 128.58 (0.00%) 101.30 * 21.22%* 96.14 * 25.23%* Amean 4 577.42 (0.00%) 494.47 * 14.37%* 508.49 * 11.94%* Amean 8 610.95 (0.00%) 363.86 * 40.44%* 362.12 * 40.73%* Amean 16 391.78 (0.00%) 261.49 * 33.25%* 282.94 * 27.78%* Amean 32 324.64 (0.00%) 267.71 * 17.54%* 233.00 * 28.23%* Amean 64 295.04 (0.00%) 253.02 * 14.24%* 242.37 * 17.85%* Amean 512 10281.61 (0.00%) 10211.16 * 0.69%* 10447.53 * -1.61%* Numbers are times so lower is better. MQ is stock 5.10-rc6 kernel. SQ is the same kernel with megaraid_sas.host_tagset_enable=0 so that the card advertises just a single HW queue. MQ-Patched is a kernel with this patch applied. You can see multiple hardware queues heavily hurt performance in combination with BFQ. The patch restores the performance. Signed-off-by: Jan Kara Reviewed-by: Ming Lei Signed-off-by: Jens Axboe BUG: 187357408 Change-Id: I53645eb48cb308cd3af81a1c5e718a6abec6a1f9 (cherry picked from commit fa56cac78af68bd93734c290a0ffd0716e871dba git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/blk-mq.c | 66 ++++++++++++++++++++++++++++++++++++---- block/kyber-iosched.c | 1 + include/linux/elevator.h | 2 ++ 3 files changed, 63 insertions(+), 6 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 22a4cd8b015d..08d5571d95e0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1668,6 +1668,42 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) } EXPORT_SYMBOL(blk_mq_run_hw_queue); +/* + * Is the request queue handled by an IO scheduler that does not respect + * hardware queues when dispatching? + */ +static bool blk_mq_has_sqsched(struct request_queue *q) +{ + struct elevator_queue *e = q->elevator; + + if (e && e->type->ops.dispatch_request && + !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE)) + return true; + return false; +} + +/* + * Return prefered queue to dispatch from (if any) for non-mq aware IO + * scheduler. + */ +static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + + /* + * If the IO scheduler does not respect hardware queues when + * dispatching, we just don't bother with multiple HW queues and + * dispatch from hctx for the current CPU since running multiple queues + * just causes lock contention inside the scheduler and pointless cache + * bouncing. + */ + hctx = blk_mq_map_queue_type(q, HCTX_TYPE_DEFAULT, + raw_smp_processor_id()); + if (!blk_mq_hctx_stopped(hctx)) + return hctx; + return NULL; +} + /** * blk_mq_run_hw_queues - Run all hardware queues in a request queue. * @q: Pointer to the request queue to run. @@ -1675,14 +1711,23 @@ EXPORT_SYMBOL(blk_mq_run_hw_queue); */ void blk_mq_run_hw_queues(struct request_queue *q, bool async) { - struct blk_mq_hw_ctx *hctx; + struct blk_mq_hw_ctx *hctx, *sq_hctx; int i; + sq_hctx = NULL; + if (blk_mq_has_sqsched(q)) + sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) continue; - - blk_mq_run_hw_queue(hctx, async); + /* + * Dispatch from this hctx either if there's no hctx preferred + * by IO scheduler or if it has requests that bypass the + * scheduler. + */ + if (!sq_hctx || sq_hctx == hctx || + !list_empty_careful(&hctx->dispatch)) + blk_mq_run_hw_queue(hctx, async); } } EXPORT_SYMBOL(blk_mq_run_hw_queues); @@ -1694,14 +1739,23 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues); */ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) { - struct blk_mq_hw_ctx *hctx; + struct blk_mq_hw_ctx *hctx, *sq_hctx; int i; + sq_hctx = NULL; + if (blk_mq_has_sqsched(q)) + sq_hctx = blk_mq_get_sq_hctx(q); queue_for_each_hw_ctx(q, hctx, i) { if (blk_mq_hctx_stopped(hctx)) continue; - - blk_mq_delay_run_hw_queue(hctx, msecs); + /* + * Dispatch from this hctx either if there's no hctx preferred + * by IO scheduler or if it has requests that bypass the + * scheduler. + */ + if (!sq_hctx || sq_hctx == hctx || + !list_empty_careful(&hctx->dispatch)) + blk_mq_delay_run_hw_queue(hctx, msecs); } } EXPORT_SYMBOL(blk_mq_delay_run_hw_queues); diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 7f9ef773bf44..54e6de6bdfd9 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -1030,6 +1030,7 @@ static struct elevator_type kyber_sched = { #endif .elevator_attrs = kyber_sched_attrs, .elevator_name = "kyber", + .elevator_features = ELEVATOR_F_MQ_AWARE, .elevator_owner = THIS_MODULE, }; diff --git a/include/linux/elevator.h b/include/linux/elevator.h index bc26b4e11f62..dcb2f9022c1d 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -172,6 +172,8 @@ extern struct request *elv_rb_find(struct rb_root *, sector_t); /* Supports zoned block devices sequential write constraint */ #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) +/* Supports scheduling on multiple hardware queues */ +#define ELEVATOR_F_MQ_AWARE (1U << 1) #endif /* CONFIG_BLOCK */ #endif From a749efa728d3bbc2d884d798c2c2a090610210df Mon Sep 17 00:00:00 2001 From: Lin Feng Date: Thu, 15 Apr 2021 11:43:26 +0800 Subject: [PATCH 09/62] FROMGIT: bfq/mq-deadline: remove redundant check for passthrough request Since commit 01e99aeca39796003 'blk-mq: insert passthrough request into hctx->dispatch directly', passthrough request should not appear in IO-scheduler any more, so blk_rq_is_passthrough checking in addon IO schedulers is redundant. (Notes: this patch passes generic IO load test with hdds under SAS controller and hdds under AHCI controller but obviously not covers all. Not sure if passthrough request can still escape into IO scheduler from blk_mq_sched_insert_requests, which is used by blk_mq_flush_plug_list and has lots of indirect callers.) Signed-off-by: Lin Feng Reviewed-by: Ming Lei Signed-off-by: Jens Axboe BUG: 187357408 Change-Id: I97d85c38e584add44399295f3839994b694bc9ca (cherry picked from commit 0856faaa220759a4fe4334f5c57a8661c94c14ce git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 0bedb81a5af6..b0773f03978c 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -497,11 +497,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_mq_sched_request_inserted(rq); - if (at_head || blk_rq_is_passthrough(rq)) { - if (at_head) - list_add(&rq->queuelist, &dd->dispatch); - else - list_add_tail(&rq->queuelist, &dd->dispatch); + if (at_head) { + list_add(&rq->queuelist, &dd->dispatch); } else { deadline_add_rq_rb(dd, rq); From 3271c9261a001f09c6c5a8b3e25092f0e8594adb Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 3 Jun 2021 14:44:30 -0700 Subject: [PATCH 10/62] FROMGIT: block/Kconfig: Make the BLK_WBT and BLK_WBT_MQ entries consecutive These entries were consecutive at the time of their introduction but are no longer consecutive. Make these again consecutive. Additionally, modify the help text since it refers to blk-mq and since the legacy block layer has been removed. Reviewed-by: Damien Le Moal Reviewed-by: Johannes Thumshirn Reviewed-by: Hannes Reinecke Cc: Christoph Hellwig Cc: Ming Lei Cc: Himanshu Madhani Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: I568383377a3244efba9748adf0a2e90bd7660bb2 (cherry picked from commit fdc250ea26e44066d690bbe65a03fab512af0699 git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/Kconfig | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/block/Kconfig b/block/Kconfig index e657e2bd8059..ef2e9daa5ee6 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -133,6 +133,13 @@ config BLK_WBT dynamically on an algorithm loosely based on CoDel, factoring in the realtime performance of the disk. +config BLK_WBT_MQ + bool "Enable writeback throttling by default" + default y + depends on BLK_WBT + help + Enable writeback throttling by default for request-based block devices. + config BLK_CGROUP_IOLATENCY bool "Enable support for latency based cgroup IO protection" depends on BLK_CGROUP=y @@ -155,13 +162,6 @@ config BLK_CGROUP_IOCOST distributes IO capacity between different groups based on their share of the overall weight distribution. -config BLK_WBT_MQ - bool "Multiqueue writeback throttling" - default y - depends on BLK_WBT - help - Enable writeback throttling by default on multiqueue devices. - config BLK_DEBUG_FS bool "Block layer debugging information in debugfs" default y From 87d38ebe74592d9cd955e584f3e72be6955bfede Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 3 Jun 2021 14:47:04 -0700 Subject: [PATCH 11/62] FROMGIT: block/blk-cgroup: Swap the blk_throtl_init() and blk_iolatency_init() calls Before adding more calls in this function, simplify the error path. Reviewed-by: Damien Le Moal Reviewed-by: Johannes Thumshirn Reviewed-by: Hannes Reinecke Cc: Tejun Heo Cc: Christoph Hellwig Cc: Ming Lei Cc: Himanshu Madhani Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: I8568b87d1bebbd3841e42a79b7efe2d0a1bff2bc (cherry picked from commit f1a7f539c2720906fb10be0af3514b034e1a9fee git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/blk-cgroup.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index f13688c4b931..44be26e62ca4 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1181,15 +1181,14 @@ int blkcg_init_queue(struct request_queue *q) if (preloaded) radix_tree_preload_end(); + ret = blk_iolatency_init(q); + if (ret) + goto err_destroy_all; + ret = blk_throtl_init(q); if (ret) goto err_destroy_all; - ret = blk_iolatency_init(q); - if (ret) { - blk_throtl_exit(q); - goto err_destroy_all; - } return 0; err_destroy_all: From 16b9fe8a3a75327825c3be93e02639af177dbbc4 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 3 Jun 2021 14:52:44 -0700 Subject: [PATCH 12/62] FROMGIT: block/blk-rq-qos: Move a function from a header file into a C file rq_qos_id_to_name() is only used in blk-mq-debugfs.c so move that function into in blk-mq-debugfs.c. Cc: Damien Le Moal Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Ming Lei Cc: Johannes Thumshirn Cc: Himanshu Madhani Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: If03083a13917bc2f88b6df7151e033a11ab1bc50 (cherry picked from commit f1a7f539c2720906fb10be0af3514b034e1a9fee git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/blk-mq-debugfs.c | 13 +++++++++++++ block/blk-rq-qos.h | 13 ------------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 4de03da9a624..7811bd70b711 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -939,6 +939,19 @@ void blk_mq_debugfs_unregister_sched(struct request_queue *q) q->sched_debugfs_dir = NULL; } +static const char *rq_qos_id_to_name(enum rq_qos_id id) +{ + switch (id) { + case RQ_QOS_WBT: + return "wbt"; + case RQ_QOS_LATENCY: + return "latency"; + case RQ_QOS_COST: + return "cost"; + } + return "unknown"; +} + void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) { debugfs_remove_recursive(rqos->debugfs_dir); diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index 2bc43e94f4c4..dd1727164fec 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -78,19 +78,6 @@ static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) return rq_qos_id(q, RQ_QOS_LATENCY); } -static inline const char *rq_qos_id_to_name(enum rq_qos_id id) -{ - switch (id) { - case RQ_QOS_WBT: - return "wbt"; - case RQ_QOS_LATENCY: - return "latency"; - case RQ_QOS_COST: - return "cost"; - } - return "unknown"; -} - static inline void rq_wait_init(struct rq_wait *rq_wait) { atomic_set(&rq_wait->inflight, 0); From 46d6ae07a7facac95b07163c6180a64fd613159a Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 3 Jun 2021 14:49:29 -0700 Subject: [PATCH 13/62] FROMGIT: block: Introduce the ioprio rq-qos policy Introduce an rq-qos policy that assigns an I/O priority to requests based on blk-cgroup configuration settings. This policy has the following advantages over the ioprio_set() system call: - This policy is cgroup based so it has all the advantages of cgroups. - While ioprio_set() does not affect page cache writeback I/O, this rq-qos controller affects page cache writeback I/O for filesystems that support assiociating a cgroup with writeback I/O. See also Documentation/admin-guide/cgroup-v2.rst. Cc: Damien Le Moal Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Ming Lei Cc: Johannes Thumshirn Cc: Himanshu Madhani Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: If51e608ad37ee7a3f57b507bb17900dcfcb263ed (cherry picked from commit ee9d2a55c960f152b5710078bbe399a4c51eb0a9 git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- Documentation/admin-guide/cgroup-v2.rst | 55 +++++ block/Kconfig | 9 + block/Makefile | 1 + block/blk-cgroup.c | 5 + block/blk-ioprio.c | 262 ++++++++++++++++++++++++ block/blk-ioprio.h | 19 ++ block/blk-mq-debugfs.c | 2 + block/blk-rq-qos.h | 1 + 8 files changed, 354 insertions(+) create mode 100644 block/blk-ioprio.c create mode 100644 block/blk-ioprio.h diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 608d7c279396..540ee6db5c48 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -54,6 +54,7 @@ v1 is available under :ref:`Documentation/admin-guide/cgroup-v1/index.rst #include #include "blk.h" +#include "blk-ioprio.h" #define MAX_KEY_LEN 100 @@ -1185,6 +1186,10 @@ int blkcg_init_queue(struct request_queue *q) if (ret) goto err_destroy_all; + ret = blk_ioprio_init(q); + if (ret) + goto err_destroy_all; + ret = blk_throtl_init(q); if (ret) goto err_destroy_all; diff --git a/block/blk-ioprio.c b/block/blk-ioprio.c new file mode 100644 index 000000000000..332a07761bf8 --- /dev/null +++ b/block/blk-ioprio.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Block rq-qos policy for assigning an I/O priority class to requests. + * + * Using an rq-qos policy for assigning I/O priority class has two advantages + * over using the ioprio_set() system call: + * + * - This policy is cgroup based so it has all the advantages of cgroups. + * - While ioprio_set() does not affect page cache writeback I/O, this rq-qos + * controller affects page cache writeback I/O for filesystems that support + * assiociating a cgroup with writeback I/O. See also + * Documentation/admin-guide/cgroup-v2.rst. + */ + +#include +#include +#include +#include +#include +#include "blk-ioprio.h" +#include "blk-rq-qos.h" + +/** + * enum prio_policy - I/O priority class policy. + * @POLICY_NO_CHANGE: (default) do not modify the I/O priority class. + * @POLICY_NONE_TO_RT: modify IOPRIO_CLASS_NONE into IOPRIO_CLASS_RT. + * @POLICY_RESTRICT_TO_BE: modify IOPRIO_CLASS_NONE and IOPRIO_CLASS_RT into + * IOPRIO_CLASS_BE. + * @POLICY_ALL_TO_IDLE: change the I/O priority class into IOPRIO_CLASS_IDLE. + * + * See also . + */ +enum prio_policy { + POLICY_NO_CHANGE = 0, + POLICY_NONE_TO_RT = 1, + POLICY_RESTRICT_TO_BE = 2, + POLICY_ALL_TO_IDLE = 3, +}; + +static const char *policy_name[] = { + [POLICY_NO_CHANGE] = "no-change", + [POLICY_NONE_TO_RT] = "none-to-rt", + [POLICY_RESTRICT_TO_BE] = "restrict-to-be", + [POLICY_ALL_TO_IDLE] = "idle", +}; + +static struct blkcg_policy ioprio_policy; + +/** + * struct ioprio_blkg - Per (cgroup, request queue) data. + * @pd: blkg_policy_data structure. + */ +struct ioprio_blkg { + struct blkg_policy_data pd; +}; + +/** + * struct ioprio_blkcg - Per cgroup data. + * @cpd: blkcg_policy_data structure. + * @prio_policy: One of the IOPRIO_CLASS_* values. See also . + */ +struct ioprio_blkcg { + struct blkcg_policy_data cpd; + enum prio_policy prio_policy; +}; + +static inline struct ioprio_blkg *pd_to_ioprio(struct blkg_policy_data *pd) +{ + return pd ? container_of(pd, struct ioprio_blkg, pd) : NULL; +} + +static struct ioprio_blkcg *blkcg_to_ioprio_blkcg(struct blkcg *blkcg) +{ + return container_of(blkcg_to_cpd(blkcg, &ioprio_policy), + struct ioprio_blkcg, cpd); +} + +static struct ioprio_blkcg * +ioprio_blkcg_from_css(struct cgroup_subsys_state *css) +{ + return blkcg_to_ioprio_blkcg(css_to_blkcg(css)); +} + +static struct ioprio_blkcg *ioprio_blkcg_from_bio(struct bio *bio) +{ + struct blkg_policy_data *pd = blkg_to_pd(bio->bi_blkg, &ioprio_policy); + + if (!pd) + return NULL; + + return blkcg_to_ioprio_blkcg(pd->blkg->blkcg); +} + +static int ioprio_show_prio_policy(struct seq_file *sf, void *v) +{ + struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(seq_css(sf)); + + seq_printf(sf, "%s\n", policy_name[blkcg->prio_policy]); + return 0; +} + +static ssize_t ioprio_set_prio_policy(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(of_css(of)); + int ret; + + if (off != 0) + return -EIO; + /* kernfs_fop_write_iter() terminates 'buf' with '\0'. */ + ret = sysfs_match_string(policy_name, buf); + if (ret < 0) + return ret; + blkcg->prio_policy = ret; + + return nbytes; +} + +static struct blkg_policy_data * +ioprio_alloc_pd(gfp_t gfp, struct request_queue *q, struct blkcg *blkcg) +{ + struct ioprio_blkg *ioprio_blkg; + + ioprio_blkg = kzalloc(sizeof(*ioprio_blkg), gfp); + if (!ioprio_blkg) + return NULL; + + return &ioprio_blkg->pd; +} + +static void ioprio_free_pd(struct blkg_policy_data *pd) +{ + struct ioprio_blkg *ioprio_blkg = pd_to_ioprio(pd); + + kfree(ioprio_blkg); +} + +static struct blkcg_policy_data *ioprio_alloc_cpd(gfp_t gfp) +{ + struct ioprio_blkcg *blkcg; + + blkcg = kzalloc(sizeof(*blkcg), gfp); + if (!blkcg) + return NULL; + blkcg->prio_policy = POLICY_NO_CHANGE; + return &blkcg->cpd; +} + +static void ioprio_free_cpd(struct blkcg_policy_data *cpd) +{ + struct ioprio_blkcg *blkcg = container_of(cpd, typeof(*blkcg), cpd); + + kfree(blkcg); +} + +#define IOPRIO_ATTRS \ + { \ + .name = "prio.class", \ + .seq_show = ioprio_show_prio_policy, \ + .write = ioprio_set_prio_policy, \ + }, \ + { } /* sentinel */ + +/* cgroup v2 attributes */ +static struct cftype ioprio_files[] = { + IOPRIO_ATTRS +}; + +/* cgroup v1 attributes */ +static struct cftype ioprio_legacy_files[] = { + IOPRIO_ATTRS +}; + +static struct blkcg_policy ioprio_policy = { + .dfl_cftypes = ioprio_files, + .legacy_cftypes = ioprio_legacy_files, + + .cpd_alloc_fn = ioprio_alloc_cpd, + .cpd_free_fn = ioprio_free_cpd, + + .pd_alloc_fn = ioprio_alloc_pd, + .pd_free_fn = ioprio_free_pd, +}; + +struct blk_ioprio { + struct rq_qos rqos; +}; + +static void blkcg_ioprio_track(struct rq_qos *rqos, struct request *rq, + struct bio *bio) +{ + struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio); + + /* + * Except for IOPRIO_CLASS_NONE, higher I/O priority numbers + * correspond to a lower priority. Hence, the max_t() below selects + * the lower priority of bi_ioprio and the cgroup I/O priority class. + * If the cgroup policy has been set to POLICY_NO_CHANGE == 0, the + * bio I/O priority is not modified. If the bio I/O priority equals + * IOPRIO_CLASS_NONE, the cgroup I/O priority is assigned to the bio. + */ + bio->bi_ioprio = max_t(u16, bio->bi_ioprio, + IOPRIO_PRIO_VALUE(blkcg->prio_policy, 0)); +} + +static void blkcg_ioprio_exit(struct rq_qos *rqos) +{ + struct blk_ioprio *blkioprio_blkg = + container_of(rqos, typeof(*blkioprio_blkg), rqos); + + blkcg_deactivate_policy(rqos->q, &ioprio_policy); + kfree(blkioprio_blkg); +} + +static struct rq_qos_ops blkcg_ioprio_ops = { + .track = blkcg_ioprio_track, + .exit = blkcg_ioprio_exit, +}; + +int blk_ioprio_init(struct request_queue *q) +{ + struct blk_ioprio *blkioprio_blkg; + struct rq_qos *rqos; + int ret; + + blkioprio_blkg = kzalloc(sizeof(*blkioprio_blkg), GFP_KERNEL); + if (!blkioprio_blkg) + return -ENOMEM; + + ret = blkcg_activate_policy(q, &ioprio_policy); + if (ret) { + kfree(blkioprio_blkg); + return ret; + } + + rqos = &blkioprio_blkg->rqos; + rqos->id = RQ_QOS_IOPRIO; + rqos->ops = &blkcg_ioprio_ops; + rqos->q = q; + + /* + * Registering the rq-qos policy after activating the blk-cgroup + * policy guarantees that ioprio_blkcg_from_bio(bio) != NULL in the + * rq-qos callbacks. + */ + rq_qos_add(q, rqos); + + return 0; +} + +static int __init ioprio_init(void) +{ + return blkcg_policy_register(&ioprio_policy); +} + +static void __exit ioprio_exit(void) +{ + blkcg_policy_unregister(&ioprio_policy); +} + +module_init(ioprio_init); +module_exit(ioprio_exit); diff --git a/block/blk-ioprio.h b/block/blk-ioprio.h new file mode 100644 index 000000000000..a7785c2f1aea --- /dev/null +++ b/block/blk-ioprio.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _BLK_IOPRIO_H_ +#define _BLK_IOPRIO_H_ + +#include + +struct request_queue; + +#ifdef CONFIG_BLK_CGROUP_IOPRIO +int blk_ioprio_init(struct request_queue *q); +#else +static inline int blk_ioprio_init(struct request_queue *q) +{ + return 0; +} +#endif + +#endif /* _BLK_IOPRIO_H_ */ diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 7811bd70b711..34a5eff632bf 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -948,6 +948,8 @@ static const char *rq_qos_id_to_name(enum rq_qos_id id) return "latency"; case RQ_QOS_COST: return "cost"; + case RQ_QOS_IOPRIO: + return "ioprio"; } return "unknown"; } diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h index dd1727164fec..52728614fb27 100644 --- a/block/blk-rq-qos.h +++ b/block/blk-rq-qos.h @@ -16,6 +16,7 @@ enum rq_qos_id { RQ_QOS_WBT, RQ_QOS_LATENCY, RQ_QOS_COST, + RQ_QOS_IOPRIO, }; struct rq_wait { From dfc5e14a3653f1b5655eb836f096919e8179fb85 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 21 May 2021 11:00:46 -0700 Subject: [PATCH 14/62] FROMGIT: block/mq-deadline: Add several comments Make the code easier to read by adding more comments. Reviewed-by: Damien Le Moal Reviewed-by: Johannes Thumshirn Reviewed-by: Himanshu Madhani Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: If62eb600614d2883d72ee3bd7e7859ae66b24512 (cherry picked from commit 16c3afdb127bbff7d3552e076e568281765674b7 git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index b0773f03978c..a3b489390feb 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -137,6 +137,9 @@ static void dd_request_merged(struct request_queue *q, struct request *req, } } +/* + * Callback function that is invoked after @next has been merged into @req. + */ static void dd_merged_requests(struct request_queue *q, struct request *req, struct request *next) { @@ -373,6 +376,8 @@ done: } /* + * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests(). + * * One confusing aspect here is that we get called for a specific * hardware queue, but we may return a request that is for a * different hardware queue. This is because mq-deadline has shared @@ -436,6 +441,10 @@ static int dd_init_queue(struct request_queue *q, struct elevator_type *e) return 0; } +/* + * Try to merge @bio into an existing request. If @bio has been merged into + * an existing request, store the pointer to that request into *@rq. + */ static int dd_request_merge(struct request_queue *q, struct request **rq, struct bio *bio) { @@ -459,6 +468,10 @@ static int dd_request_merge(struct request_queue *q, struct request **rq, return ELEVATOR_NO_MERGE; } +/* + * Attempt to merge a bio into an existing request. This function is called + * before @bio is associated with a request. + */ static bool dd_bio_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { @@ -516,6 +529,9 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, } } +/* + * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests(). + */ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, struct list_head *list, bool at_head) { @@ -542,6 +558,8 @@ static void dd_prepare_request(struct request *rq) } /* + * Callback from inside blk_mq_free_request(). + * * For zoned block devices, write unlock the target zone of * completed write requests. Do this while holding the zone lock * spinlock so that the zone is never unlocked while deadline_fifo_request() From d0d50fe4b4ded33f5d1e7912a63f39870cff456c Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 19 May 2021 16:16:06 -0700 Subject: [PATCH 15/62] FROMGIT: block/mq-deadline: Add two lockdep_assert_held() statements Document the locking strategy by adding two lockdep_assert_held() statements. Reviewed-by: Chaitanya Kulkarni Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Reviewed-by: Himanshu Madhani Cc: Damien Le Moal Cc: Christoph Hellwig Cc: Ming Lei Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: Ie8cf0b0ae208c9cc87731a9c6d7df5e5e59332d5 (cherry picked from commit 91831ddfd7c6e3df9857526a76cfa88673ec0637 git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index a3b489390feb..a63b8303c0bf 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -277,6 +277,8 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) bool reads, writes; int data_dir; + lockdep_assert_held(&dd->lock); + if (!list_empty(&dd->dispatch)) { rq = list_first_entry(&dd->dispatch, struct request, queuelist); list_del_init(&rq->queuelist); @@ -499,6 +501,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, struct deadline_data *dd = q->elevator->elevator_data; const int data_dir = rq_data_dir(rq); + lockdep_assert_held(&dd->lock); + /* * This may be a requeue of a write request that has locked its * target zone. If it is the case, this releases the zone lock. From 4c5a1f31b3d6ac007523c143fd83fe27c3f030e2 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 25 May 2021 14:34:55 -0700 Subject: [PATCH 16/62] FROMGIT: block/mq-deadline: Remove two local variables Make __dd_dispatch_request() easier to read by removing two local variables. Reviewed-by: Chaitanya Kulkarni Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Reviewed-by: Himanshu Madhani Cc: Damien Le Moal Cc: Christoph Hellwig Cc: Ming Lei Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: I5567f7d02a2c628efb437058a1c103c7b123747a (cherry picked from commit f005b6ff19d2a961a2c3ae9c5f49d48fda143469 git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index a63b8303c0bf..fe23abf8c988 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -274,7 +274,6 @@ deadline_next_request(struct deadline_data *dd, int data_dir) static struct request *__dd_dispatch_request(struct deadline_data *dd) { struct request *rq, *next_rq; - bool reads, writes; int data_dir; lockdep_assert_held(&dd->lock); @@ -285,9 +284,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) goto done; } - reads = !list_empty(&dd->fifo_list[READ]); - writes = !list_empty(&dd->fifo_list[WRITE]); - /* * batches are currently reads XOR writes */ @@ -304,7 +300,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) * data direction (read / write) */ - if (reads) { + if (!list_empty(&dd->fifo_list[READ])) { BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); if (deadline_fifo_request(dd, WRITE) && @@ -320,7 +316,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) * there are either no reads or writes have been starved */ - if (writes) { + if (!list_empty(&dd->fifo_list[WRITE])) { dispatch_writes: BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); From d3cebf1cf0051411ebfe06518f5bb0bd7cb3a330 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 20 May 2021 14:06:56 -0700 Subject: [PATCH 17/62] FROMGIT: block/mq-deadline: Rename dd_init_queue() and dd_exit_queue() Change "queue" into "sched" to make the function names reflect better the purpose of these functions. Reviewed-by: Chaitanya Kulkarni Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Reviewed-by: Himanshu Madhani Cc: Damien Le Moal Cc: Christoph Hellwig Cc: Ming Lei Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: I30825b379146dbaef4ff3f85148b2e788667a77c (cherry picked from commit a6e57fe5ab09c250fc741294e6321270a4364fec git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index fe23abf8c988..b4bcfc6546eb 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -393,7 +393,7 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) return rq; } -static void dd_exit_queue(struct elevator_queue *e) +static void dd_exit_sched(struct elevator_queue *e) { struct deadline_data *dd = e->elevator_data; @@ -406,7 +406,7 @@ static void dd_exit_queue(struct elevator_queue *e) /* * initialize elevator private data (deadline_data). */ -static int dd_init_queue(struct request_queue *q, struct elevator_type *e) +static int dd_init_sched(struct request_queue *q, struct elevator_type *e) { struct deadline_data *dd; struct elevator_queue *eq; @@ -798,8 +798,8 @@ static struct elevator_type mq_deadline = { .requests_merged = dd_merged_requests, .request_merged = dd_request_merged, .has_work = dd_has_work, - .init_sched = dd_init_queue, - .exit_sched = dd_exit_queue, + .init_sched = dd_init_sched, + .exit_sched = dd_exit_sched, }, #ifdef CONFIG_BLK_DEBUG_FS From af7003062c8a8db0b6b29145d53de587508918e0 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 19 May 2021 14:52:22 -0700 Subject: [PATCH 18/62] FROMGIT: block/mq-deadline: Improve compile-time argument checking Modern compilers complain if an out-of-range value is passed to a function argument that has an enumeration type. Let the compiler detect out-of-range data direction arguments instead of verifying the data_dir argument at runtime. Reviewed-by: Chaitanya Kulkarni Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Reviewed-by: Himanshu Madhani Cc: Damien Le Moal Cc: Christoph Hellwig Cc: Ming Lei Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: I4ad8c106a86d17f3010e12e172702e77eca61e80 (cherry picked from commit d9baee13f8cf66a8fac9ec67fdb85ce419fcce3a git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 96 +++++++++++++++++++++++---------------------- 1 file changed, 49 insertions(+), 47 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index b4bcfc6546eb..758f3a905762 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -33,6 +33,13 @@ static const int writes_starved = 2; /* max times reads can starve a write */ static const int fifo_batch = 16; /* # of sequential requests treated as one by the above parameters. For throughput. */ +enum dd_data_dir { + DD_READ = READ, + DD_WRITE = WRITE, +}; + +enum { DD_DIR_COUNT = 2 }; + struct deadline_data { /* * run time data @@ -41,20 +48,20 @@ struct deadline_data { /* * requests (deadline_rq s) are present on both sort_list and fifo_list */ - struct rb_root sort_list[2]; - struct list_head fifo_list[2]; + struct rb_root sort_list[DD_DIR_COUNT]; + struct list_head fifo_list[DD_DIR_COUNT]; /* * next in sort order. read, write or both are NULL */ - struct request *next_rq[2]; + struct request *next_rq[DD_DIR_COUNT]; unsigned int batching; /* number of sequential requests made */ unsigned int starved; /* times reads have starved writes */ /* * settings that change how the i/o scheduler behaves */ - int fifo_expire[2]; + int fifo_expire[DD_DIR_COUNT]; int fifo_batch; int writes_starved; int front_merges; @@ -95,7 +102,7 @@ deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) static inline void deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) { - const int data_dir = rq_data_dir(rq); + const enum dd_data_dir data_dir = rq_data_dir(rq); if (dd->next_rq[data_dir] == rq) dd->next_rq[data_dir] = deadline_latter_request(rq); @@ -167,10 +174,10 @@ static void dd_merged_requests(struct request_queue *q, struct request *req, static void deadline_move_request(struct deadline_data *dd, struct request *rq) { - const int data_dir = rq_data_dir(rq); + const enum dd_data_dir data_dir = rq_data_dir(rq); - dd->next_rq[READ] = NULL; - dd->next_rq[WRITE] = NULL; + dd->next_rq[DD_READ] = NULL; + dd->next_rq[DD_WRITE] = NULL; dd->next_rq[data_dir] = deadline_latter_request(rq); /* @@ -183,9 +190,10 @@ deadline_move_request(struct deadline_data *dd, struct request *rq) * deadline_check_fifo returns 0 if there are no expired requests on the fifo, * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) */ -static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) +static inline int deadline_check_fifo(struct deadline_data *dd, + enum dd_data_dir data_dir) { - struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); + struct request *rq = rq_entry_fifo(dd->fifo_list[data_dir].next); /* * rq is expired! @@ -201,19 +209,16 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) * dispatch using arrival ordered lists. */ static struct request * -deadline_fifo_request(struct deadline_data *dd, int data_dir) +deadline_fifo_request(struct deadline_data *dd, enum dd_data_dir data_dir) { struct request *rq; unsigned long flags; - if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) - return NULL; - if (list_empty(&dd->fifo_list[data_dir])) return NULL; rq = rq_entry_fifo(dd->fifo_list[data_dir].next); - if (data_dir == READ || !blk_queue_is_zoned(rq->q)) + if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) return rq; /* @@ -221,7 +226,7 @@ deadline_fifo_request(struct deadline_data *dd, int data_dir) * an unlocked target zone. */ spin_lock_irqsave(&dd->zone_lock, flags); - list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) { + list_for_each_entry(rq, &dd->fifo_list[DD_WRITE], queuelist) { if (blk_req_can_dispatch_to_zone(rq)) goto out; } @@ -237,19 +242,16 @@ out: * dispatch using sector position sorted lists. */ static struct request * -deadline_next_request(struct deadline_data *dd, int data_dir) +deadline_next_request(struct deadline_data *dd, enum dd_data_dir data_dir) { struct request *rq; unsigned long flags; - if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) - return NULL; - rq = dd->next_rq[data_dir]; if (!rq) return NULL; - if (data_dir == READ || !blk_queue_is_zoned(rq->q)) + if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) return rq; /* @@ -274,7 +276,7 @@ deadline_next_request(struct deadline_data *dd, int data_dir) static struct request *__dd_dispatch_request(struct deadline_data *dd) { struct request *rq, *next_rq; - int data_dir; + enum dd_data_dir data_dir; lockdep_assert_held(&dd->lock); @@ -287,9 +289,9 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) /* * batches are currently reads XOR writes */ - rq = deadline_next_request(dd, WRITE); + rq = deadline_next_request(dd, DD_WRITE); if (!rq) - rq = deadline_next_request(dd, READ); + rq = deadline_next_request(dd, DD_READ); if (rq && dd->batching < dd->fifo_batch) /* we have a next request are still entitled to batch */ @@ -300,14 +302,14 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) * data direction (read / write) */ - if (!list_empty(&dd->fifo_list[READ])) { - BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); + if (!list_empty(&dd->fifo_list[DD_READ])) { + BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[DD_READ])); - if (deadline_fifo_request(dd, WRITE) && + if (deadline_fifo_request(dd, DD_WRITE) && (dd->starved++ >= dd->writes_starved)) goto dispatch_writes; - data_dir = READ; + data_dir = DD_READ; goto dispatch_find_request; } @@ -316,13 +318,13 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) * there are either no reads or writes have been starved */ - if (!list_empty(&dd->fifo_list[WRITE])) { + if (!list_empty(&dd->fifo_list[DD_WRITE])) { dispatch_writes: - BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); + BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[DD_WRITE])); dd->starved = 0; - data_dir = WRITE; + data_dir = DD_WRITE; goto dispatch_find_request; } @@ -397,8 +399,8 @@ static void dd_exit_sched(struct elevator_queue *e) { struct deadline_data *dd = e->elevator_data; - BUG_ON(!list_empty(&dd->fifo_list[READ])); - BUG_ON(!list_empty(&dd->fifo_list[WRITE])); + BUG_ON(!list_empty(&dd->fifo_list[DD_READ])); + BUG_ON(!list_empty(&dd->fifo_list[DD_WRITE])); kfree(dd); } @@ -422,12 +424,12 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) } eq->elevator_data = dd; - INIT_LIST_HEAD(&dd->fifo_list[READ]); - INIT_LIST_HEAD(&dd->fifo_list[WRITE]); - dd->sort_list[READ] = RB_ROOT; - dd->sort_list[WRITE] = RB_ROOT; - dd->fifo_expire[READ] = read_expire; - dd->fifo_expire[WRITE] = write_expire; + INIT_LIST_HEAD(&dd->fifo_list[DD_READ]); + INIT_LIST_HEAD(&dd->fifo_list[DD_WRITE]); + dd->sort_list[DD_READ] = RB_ROOT; + dd->sort_list[DD_WRITE] = RB_ROOT; + dd->fifo_expire[DD_READ] = read_expire; + dd->fifo_expire[DD_WRITE] = write_expire; dd->writes_starved = writes_starved; dd->front_merges = 1; dd->fifo_batch = fifo_batch; @@ -495,7 +497,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, { struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; - const int data_dir = rq_data_dir(rq); + const enum dd_data_dir data_dir = rq_data_dir(rq); lockdep_assert_held(&dd->lock); @@ -583,7 +585,7 @@ static void dd_finish_request(struct request *rq) spin_lock_irqsave(&dd->zone_lock, flags); blk_req_zone_write_unlock(rq); - if (!list_empty(&dd->fifo_list[WRITE])) + if (!list_empty(&dd->fifo_list[DD_WRITE])) blk_mq_sched_mark_restart_hctx(rq->mq_hctx); spin_unlock_irqrestore(&dd->zone_lock, flags); } @@ -624,8 +626,8 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ __data = jiffies_to_msecs(__data); \ return deadline_var_show(__data, (page)); \ } -SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); -SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); +SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[DD_READ], 1); +SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[DD_WRITE], 1); SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); @@ -647,8 +649,8 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) *(__PTR) = __data; \ return count; \ } -STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); -STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX, 1); +STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX, 1); STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); @@ -715,8 +717,8 @@ static int deadline_##name##_next_rq_show(void *data, \ __blk_mq_debugfs_rq_show(m, rq); \ return 0; \ } -DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read) -DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write) +DEADLINE_DEBUGFS_DDIR_ATTRS(DD_READ, read) +DEADLINE_DEBUGFS_DDIR_ATTRS(DD_WRITE, write) #undef DEADLINE_DEBUGFS_DDIR_ATTRS static int deadline_batching_show(void *data, struct seq_file *m) From f3daa8d50a399ec64692da6861ec634ec6351cfa Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Mon, 7 Jun 2021 16:52:20 -0700 Subject: [PATCH 19/62] FROMGIT: block/mq-deadline: Improve the sysfs show and store macros Define separate macros for integers and jiffies to improve readability. Use sysfs_emit() and kstrtoint() instead of sprintf() and simple_strtol(). The former functions are the recommended functions. Cc: Damien Le Moal Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Ming Lei Cc: Johannes Thumshirn Cc: Himanshu Madhani Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: I4e0fd35124cd0319fcace0d1d5e3c113b60a213c (cherry picked from commit d9baee13f8cf66a8fac9ec67fdb85ce419fcce3a git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 64 ++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 36 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 758f3a905762..3ac8e15d724c 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -603,58 +603,50 @@ static bool dd_has_work(struct blk_mq_hw_ctx *hctx) /* * sysfs parts below */ -static ssize_t -deadline_var_show(int var, char *page) -{ - return sprintf(page, "%d\n", var); -} - -static void -deadline_var_store(int *var, const char *page) -{ - char *p = (char *) page; - - *var = simple_strtol(p, &p, 10); -} - -#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +#define SHOW_INT(__FUNC, __VAR) \ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct deadline_data *dd = e->elevator_data; \ - int __data = __VAR; \ - if (__CONV) \ - __data = jiffies_to_msecs(__data); \ - return deadline_var_show(__data, (page)); \ + \ + return sysfs_emit(page, "%d\n", __VAR); \ } -SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[DD_READ], 1); -SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[DD_WRITE], 1); -SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); -SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); -SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); -#undef SHOW_FUNCTION +#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) +SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); +SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); +SHOW_INT(deadline_writes_starved_show, dd->writes_starved); +SHOW_INT(deadline_front_merges_show, dd->front_merges); +SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); +#undef SHOW_INT +#undef SHOW_JIFFIES #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct deadline_data *dd = e->elevator_data; \ - int __data; \ - deadline_var_store(&__data, (page)); \ + int __data, __ret; \ + \ + __ret = kstrtoint(page, 0, &__data); \ + if (__ret < 0) \ + return __ret; \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ - if (__CONV) \ - *(__PTR) = msecs_to_jiffies(__data); \ - else \ - *(__PTR) = __data; \ + *(__PTR) = __CONV(__data); \ return count; \ } -STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX, 1); -STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX, 1); -STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); -STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); -STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); +#define STORE_INT(__FUNC, __PTR, MIN, MAX) \ + STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, ) +#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \ + STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) +STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); +STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); +STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); +STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); +STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); #undef STORE_FUNCTION +#undef STORE_INT +#undef STORE_JIFFIES #define DD_ATTR(name) \ __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) From 179aecb2b24705bb927391e8a0837a90cc4a37f0 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 18 May 2021 15:48:36 -0700 Subject: [PATCH 20/62] FROMGIT: block/mq-deadline: Reserve 25% of scheduler tags for synchronous requests For interactive workloads it is important that synchronous requests are not delayed. Hence reserve 25% of scheduler tags for synchronous requests. This patch still allows asynchronous requests to fill the hardware queues since blk_mq_init_sched() makes sure that the number of scheduler requests is the double of the hardware queue depth. From blk_mq_init_sched(): q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, BLKDEV_MAX_RQ); Cc: Damien Le Moal Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Ming Lei Cc: Johannes Thumshirn Cc: Himanshu Madhani Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: Ib9cd753a39c8e5f5c45908001d69334130ef2067 (cherry picked from commit c970bc8292aaaf6f2d333d612e657df3a99f417c git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 55 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 3ac8e15d724c..62db85f2c3d6 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -65,6 +65,7 @@ struct deadline_data { int fifo_batch; int writes_starved; int front_merges; + u32 async_depth; spinlock_t lock; spinlock_t zone_lock; @@ -395,6 +396,44 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) return rq; } +/* + * Called by __blk_mq_alloc_request(). The shallow_depth value set by this + * function is used by __blk_mq_get_tag(). + */ +static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) +{ + struct deadline_data *dd = data->q->elevator->elevator_data; + + /* Do not throttle synchronous reads. */ + if (op_is_sync(op) && !op_is_write(op)) + return; + + /* + * Throttle asynchronous requests and writes such that these requests + * do not block the allocation of synchronous requests. + */ + data->shallow_depth = dd->async_depth; +} + +/* Called by blk_mq_update_nr_requests(). */ +static void dd_depth_updated(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + struct deadline_data *dd = q->elevator->elevator_data; + struct blk_mq_tags *tags = hctx->sched_tags; + + dd->async_depth = max(1UL, 3 * q->nr_requests / 4); + + sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth); +} + +/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */ +static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) +{ + dd_depth_updated(hctx); + return 0; +} + static void dd_exit_sched(struct elevator_queue *e) { struct deadline_data *dd = e->elevator_data; @@ -615,6 +654,7 @@ SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); SHOW_INT(deadline_writes_starved_show, dd->writes_starved); SHOW_INT(deadline_front_merges_show, dd->front_merges); +SHOW_INT(deadline_async_depth_show, dd->front_merges); SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch); #undef SHOW_INT #undef SHOW_JIFFIES @@ -643,6 +683,7 @@ STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX) STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); +STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX); STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX); #undef STORE_FUNCTION #undef STORE_INT @@ -656,6 +697,7 @@ static struct elv_fs_entry deadline_attrs[] = { DD_ATTR(write_expire), DD_ATTR(writes_starved), DD_ATTR(front_merges), + DD_ATTR(async_depth), DD_ATTR(fifo_batch), __ATTR_NULL }; @@ -731,6 +773,15 @@ static int deadline_starved_show(void *data, struct seq_file *m) return 0; } +static int dd_async_depth_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + struct deadline_data *dd = q->elevator->elevator_data; + + seq_printf(m, "%u\n", dd->async_depth); + return 0; +} + static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos) __acquires(&dd->lock) { @@ -773,6 +824,7 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { DEADLINE_QUEUE_DDIR_ATTRS(write), {"batching", 0400, deadline_batching_show}, {"starved", 0400, deadline_starved_show}, + {"async_depth", 0400, dd_async_depth_show}, {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, {}, }; @@ -781,6 +833,8 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { static struct elevator_type mq_deadline = { .ops = { + .depth_updated = dd_depth_updated, + .limit_depth = dd_limit_depth, .insert_requests = dd_insert_requests, .dispatch_request = dd_dispatch_request, .prepare_request = dd_prepare_request, @@ -794,6 +848,7 @@ static struct elevator_type mq_deadline = { .has_work = dd_has_work, .init_sched = dd_init_sched, .exit_sched = dd_exit_sched, + .init_hctx = dd_init_hctx, }, #ifdef CONFIG_BLK_DEBUG_FS From 63544e140bcc10d825e89d9cf86216d8a6146c35 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 10 Jun 2021 16:51:14 -0700 Subject: [PATCH 21/62] FROMGIT: block/mq-deadline: Micro-optimize the batching algorithm When dispatching the first request of a batch, the deadline_move_request() call clears .next_rq[] for the opposite data direction. .next_rq[] is not restored when changing data direction. Fix this by not clearing .next_rq[] and by keeping track of the data direction of a batch in a variable instead. This patch is a micro-optimization because: - The number of deadline_next_request() calls for the read direction is halved. - The number of times that deadline_next_request() returns NULL is reduced. Cc: Damien Le Moal Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Ming Lei Cc: Johannes Thumshirn Cc: Himanshu Madhani Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: I582e99603a5443d75cf2b18a5daa2c93b5c66de3 (cherry picked from commit ea0fd2a525436ab5b9ada0f1953b0c0a29357311 git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 62db85f2c3d6..a3fcae74518a 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -51,6 +51,8 @@ struct deadline_data { struct rb_root sort_list[DD_DIR_COUNT]; struct list_head fifo_list[DD_DIR_COUNT]; + /* Data direction of latest dispatched request. */ + enum dd_data_dir last_dir; /* * next in sort order. read, write or both are NULL */ @@ -177,8 +179,6 @@ deadline_move_request(struct deadline_data *dd, struct request *rq) { const enum dd_data_dir data_dir = rq_data_dir(rq); - dd->next_rq[DD_READ] = NULL; - dd->next_rq[DD_WRITE] = NULL; dd->next_rq[data_dir] = deadline_latter_request(rq); /* @@ -290,10 +290,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) /* * batches are currently reads XOR writes */ - rq = deadline_next_request(dd, DD_WRITE); - if (!rq) - rq = deadline_next_request(dd, DD_READ); - + rq = deadline_next_request(dd, dd->last_dir); if (rq && dd->batching < dd->fifo_batch) /* we have a next request are still entitled to batch */ goto dispatch_request; @@ -359,6 +356,7 @@ dispatch_find_request: if (!rq) return NULL; + dd->last_dir = data_dir; dd->batching = 0; dispatch_request: @@ -471,6 +469,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) dd->fifo_expire[DD_WRITE] = write_expire; dd->writes_starved = writes_starved; dd->front_merges = 1; + dd->last_dir = DD_WRITE; dd->fifo_batch = fifo_batch; spin_lock_init(&dd->lock); spin_lock_init(&dd->zone_lock); From e3880a66fa08a6bd25b947c9d0addaa91ed6f151 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 19 May 2021 14:47:00 -0700 Subject: [PATCH 22/62] FROMGIT: block/mq-deadline: Add I/O priority support Maintain one dispatch list and one FIFO list per I/O priority class: RT, BE and IDLE. Maintain statistics for each priority level. Split the debugfs attributes per priority level as follows: $ ls /sys/kernel/debug/block/.../sched/ async_depth dispatch2 read_next_rq write2_fifo_list batching read0_fifo_list starved write_next_rq dispatch0 read1_fifo_list write0_fifo_list dispatch1 read2_fifo_list write1_fifo_list Cc: Damien Le Moal Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Ming Lei Cc: Johannes Thumshirn Cc: Himanshu Madhani Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: I60451cfdb416ad27601dc3ffb4eb307fa6ff783f (cherry picked from commit 5b701a6e040ff8626ecf29ac06de9689efc00754 git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 343 +++++++++++++++++++++++++++++--------------- 1 file changed, 229 insertions(+), 114 deletions(-) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index a3fcae74518a..50b9fdfd8f0a 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -40,23 +40,36 @@ enum dd_data_dir { enum { DD_DIR_COUNT = 2 }; +enum dd_prio { + DD_RT_PRIO = 0, + DD_BE_PRIO = 1, + DD_IDLE_PRIO = 2, + DD_PRIO_MAX = 2, +}; + +enum { DD_PRIO_COUNT = 3 }; + +/* + * Deadline scheduler data per I/O priority (enum dd_prio). Requests are + * present on both sort_list[] and fifo_list[]. + */ +struct dd_per_prio { + struct list_head dispatch; + struct rb_root sort_list[DD_DIR_COUNT]; + struct list_head fifo_list[DD_DIR_COUNT]; + /* Next request in FIFO order. Read, write or both are NULL. */ + struct request *next_rq[DD_DIR_COUNT]; +}; + struct deadline_data { /* * run time data */ - /* - * requests (deadline_rq s) are present on both sort_list and fifo_list - */ - struct rb_root sort_list[DD_DIR_COUNT]; - struct list_head fifo_list[DD_DIR_COUNT]; + struct dd_per_prio per_prio[DD_PRIO_COUNT]; /* Data direction of latest dispatched request. */ enum dd_data_dir last_dir; - /* - * next in sort order. read, write or both are NULL - */ - struct request *next_rq[DD_DIR_COUNT]; unsigned int batching; /* number of sequential requests made */ unsigned int starved; /* times reads have starved writes */ @@ -71,13 +84,29 @@ struct deadline_data { spinlock_t lock; spinlock_t zone_lock; - struct list_head dispatch; +}; + +/* Maps an I/O priority class to a deadline scheduler priority. */ +static const enum dd_prio ioprio_class_to_prio[] = { + [IOPRIO_CLASS_NONE] = DD_BE_PRIO, + [IOPRIO_CLASS_RT] = DD_RT_PRIO, + [IOPRIO_CLASS_BE] = DD_BE_PRIO, + [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO, }; static inline struct rb_root * -deadline_rb_root(struct deadline_data *dd, struct request *rq) +deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq) { - return &dd->sort_list[rq_data_dir(rq)]; + return &per_prio->sort_list[rq_data_dir(rq)]; +} + +/* + * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a + * request. + */ +static u8 dd_rq_ioclass(struct request *rq) +{ + return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); } /* @@ -95,38 +124,38 @@ deadline_latter_request(struct request *rq) } static void -deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) +deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq) { - struct rb_root *root = deadline_rb_root(dd, rq); + struct rb_root *root = deadline_rb_root(per_prio, rq); elv_rb_add(root, rq); } static inline void -deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) +deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq) { const enum dd_data_dir data_dir = rq_data_dir(rq); - if (dd->next_rq[data_dir] == rq) - dd->next_rq[data_dir] = deadline_latter_request(rq); + if (per_prio->next_rq[data_dir] == rq) + per_prio->next_rq[data_dir] = deadline_latter_request(rq); - elv_rb_del(deadline_rb_root(dd, rq), rq); + elv_rb_del(deadline_rb_root(per_prio, rq), rq); } /* * remove rq from rbtree and fifo. */ -static void deadline_remove_request(struct request_queue *q, struct request *rq) +static void deadline_remove_request(struct request_queue *q, + struct dd_per_prio *per_prio, + struct request *rq) { - struct deadline_data *dd = q->elevator->elevator_data; - list_del_init(&rq->queuelist); /* * We might not be on the rbtree, if we are doing an insert merge */ if (!RB_EMPTY_NODE(&rq->rb_node)) - deadline_del_rq_rb(dd, rq); + deadline_del_rq_rb(per_prio, rq); elv_rqhash_del(q, rq); if (q->last_merge == rq) @@ -137,13 +166,16 @@ static void dd_request_merged(struct request_queue *q, struct request *req, enum elv_merge type) { struct deadline_data *dd = q->elevator->elevator_data; + const u8 ioprio_class = dd_rq_ioclass(req); + const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; + struct dd_per_prio *per_prio = &dd->per_prio[prio]; /* * if the merge was a front merge, we need to reposition request */ if (type == ELEVATOR_FRONT_MERGE) { - elv_rb_del(deadline_rb_root(dd, req), req); - deadline_add_rq_rb(dd, req); + elv_rb_del(deadline_rb_root(per_prio, req), req); + deadline_add_rq_rb(per_prio, req); } } @@ -153,6 +185,10 @@ static void dd_request_merged(struct request_queue *q, struct request *req, static void dd_merged_requests(struct request_queue *q, struct request *req, struct request *next) { + struct deadline_data *dd = q->elevator->elevator_data; + const u8 ioprio_class = dd_rq_ioclass(next); + const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; + /* * if next expires before rq, assign its expire time to rq * and move into next position (next will be deleted) in fifo @@ -168,33 +204,34 @@ static void dd_merged_requests(struct request_queue *q, struct request *req, /* * kill knowledge of next, this one is a goner */ - deadline_remove_request(q, next); + deadline_remove_request(q, &dd->per_prio[prio], next); } /* * move an entry to dispatch queue */ static void -deadline_move_request(struct deadline_data *dd, struct request *rq) +deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, + struct request *rq) { const enum dd_data_dir data_dir = rq_data_dir(rq); - dd->next_rq[data_dir] = deadline_latter_request(rq); + per_prio->next_rq[data_dir] = deadline_latter_request(rq); /* * take it off the sort and fifo list */ - deadline_remove_request(rq->q, rq); + deadline_remove_request(rq->q, per_prio, rq); } /* * deadline_check_fifo returns 0 if there are no expired requests on the fifo, * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) */ -static inline int deadline_check_fifo(struct deadline_data *dd, +static inline int deadline_check_fifo(struct dd_per_prio *per_prio, enum dd_data_dir data_dir) { - struct request *rq = rq_entry_fifo(dd->fifo_list[data_dir].next); + struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); /* * rq is expired! @@ -210,15 +247,16 @@ static inline int deadline_check_fifo(struct deadline_data *dd, * dispatch using arrival ordered lists. */ static struct request * -deadline_fifo_request(struct deadline_data *dd, enum dd_data_dir data_dir) +deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio, + enum dd_data_dir data_dir) { struct request *rq; unsigned long flags; - if (list_empty(&dd->fifo_list[data_dir])) + if (list_empty(&per_prio->fifo_list[data_dir])) return NULL; - rq = rq_entry_fifo(dd->fifo_list[data_dir].next); + rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q)) return rq; @@ -227,7 +265,7 @@ deadline_fifo_request(struct deadline_data *dd, enum dd_data_dir data_dir) * an unlocked target zone. */ spin_lock_irqsave(&dd->zone_lock, flags); - list_for_each_entry(rq, &dd->fifo_list[DD_WRITE], queuelist) { + list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) { if (blk_req_can_dispatch_to_zone(rq)) goto out; } @@ -243,12 +281,13 @@ out: * dispatch using sector position sorted lists. */ static struct request * -deadline_next_request(struct deadline_data *dd, enum dd_data_dir data_dir) +deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, + enum dd_data_dir data_dir) { struct request *rq; unsigned long flags; - rq = dd->next_rq[data_dir]; + rq = per_prio->next_rq[data_dir]; if (!rq) return NULL; @@ -274,15 +313,17 @@ deadline_next_request(struct deadline_data *dd, enum dd_data_dir data_dir) * deadline_dispatch_requests selects the best request according to * read/write expire, fifo_batch, etc */ -static struct request *__dd_dispatch_request(struct deadline_data *dd) +static struct request *__dd_dispatch_request(struct deadline_data *dd, + struct dd_per_prio *per_prio) { struct request *rq, *next_rq; enum dd_data_dir data_dir; lockdep_assert_held(&dd->lock); - if (!list_empty(&dd->dispatch)) { - rq = list_first_entry(&dd->dispatch, struct request, queuelist); + if (!list_empty(&per_prio->dispatch)) { + rq = list_first_entry(&per_prio->dispatch, struct request, + queuelist); list_del_init(&rq->queuelist); goto done; } @@ -290,7 +331,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) /* * batches are currently reads XOR writes */ - rq = deadline_next_request(dd, dd->last_dir); + rq = deadline_next_request(dd, per_prio, dd->last_dir); if (rq && dd->batching < dd->fifo_batch) /* we have a next request are still entitled to batch */ goto dispatch_request; @@ -300,10 +341,10 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) * data direction (read / write) */ - if (!list_empty(&dd->fifo_list[DD_READ])) { - BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[DD_READ])); + if (!list_empty(&per_prio->fifo_list[DD_READ])) { + BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ])); - if (deadline_fifo_request(dd, DD_WRITE) && + if (deadline_fifo_request(dd, per_prio, DD_WRITE) && (dd->starved++ >= dd->writes_starved)) goto dispatch_writes; @@ -316,9 +357,9 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd) * there are either no reads or writes have been starved */ - if (!list_empty(&dd->fifo_list[DD_WRITE])) { + if (!list_empty(&per_prio->fifo_list[DD_WRITE])) { dispatch_writes: - BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[DD_WRITE])); + BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE])); dd->starved = 0; @@ -333,14 +374,14 @@ dispatch_find_request: /* * we are not running a batch, find best request for selected data_dir */ - next_rq = deadline_next_request(dd, data_dir); - if (deadline_check_fifo(dd, data_dir) || !next_rq) { + next_rq = deadline_next_request(dd, per_prio, data_dir); + if (deadline_check_fifo(per_prio, data_dir) || !next_rq) { /* * A deadline has expired, the last request was in the other * direction, or we have run out of higher-sectored requests. * Start again from the request with the earliest expiry time. */ - rq = deadline_fifo_request(dd, data_dir); + rq = deadline_fifo_request(dd, per_prio, data_dir); } else { /* * The last req was the same dir and we have a next request in @@ -364,7 +405,7 @@ dispatch_request: * rq is the selected appropriate request. */ dd->batching++; - deadline_move_request(dd, rq); + deadline_move_request(dd, per_prio, rq); done: /* * If the request needs its target zone locked, do it. @@ -386,9 +427,14 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; struct request *rq; + enum dd_prio prio; spin_lock(&dd->lock); - rq = __dd_dispatch_request(dd); + for (prio = 0; prio <= DD_PRIO_MAX; prio++) { + rq = __dd_dispatch_request(dd, &dd->per_prio[prio]); + if (rq) + break; + } spin_unlock(&dd->lock); return rq; @@ -435,9 +481,14 @@ static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) static void dd_exit_sched(struct elevator_queue *e) { struct deadline_data *dd = e->elevator_data; + enum dd_prio prio; - BUG_ON(!list_empty(&dd->fifo_list[DD_READ])); - BUG_ON(!list_empty(&dd->fifo_list[DD_WRITE])); + for (prio = 0; prio <= DD_PRIO_MAX; prio++) { + struct dd_per_prio *per_prio = &dd->per_prio[prio]; + + WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ])); + WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); + } kfree(dd); } @@ -449,22 +500,28 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) { struct deadline_data *dd; struct elevator_queue *eq; + enum dd_prio prio; + int ret = -ENOMEM; eq = elevator_alloc(q, e); if (!eq) - return -ENOMEM; + return ret; dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); - if (!dd) { - kobject_put(&eq->kobj); - return -ENOMEM; - } + if (!dd) + goto put_eq; + eq->elevator_data = dd; - INIT_LIST_HEAD(&dd->fifo_list[DD_READ]); - INIT_LIST_HEAD(&dd->fifo_list[DD_WRITE]); - dd->sort_list[DD_READ] = RB_ROOT; - dd->sort_list[DD_WRITE] = RB_ROOT; + for (prio = 0; prio <= DD_PRIO_MAX; prio++) { + struct dd_per_prio *per_prio = &dd->per_prio[prio]; + + INIT_LIST_HEAD(&per_prio->dispatch); + INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]); + INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]); + per_prio->sort_list[DD_READ] = RB_ROOT; + per_prio->sort_list[DD_WRITE] = RB_ROOT; + } dd->fifo_expire[DD_READ] = read_expire; dd->fifo_expire[DD_WRITE] = write_expire; dd->writes_starved = writes_starved; @@ -473,10 +530,13 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) dd->fifo_batch = fifo_batch; spin_lock_init(&dd->lock); spin_lock_init(&dd->zone_lock); - INIT_LIST_HEAD(&dd->dispatch); q->elevator = eq; return 0; + +put_eq: + kobject_put(&eq->kobj); + return ret; } /* @@ -487,13 +547,16 @@ static int dd_request_merge(struct request_queue *q, struct request **rq, struct bio *bio) { struct deadline_data *dd = q->elevator->elevator_data; + const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio); + const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; + struct dd_per_prio *per_prio = &dd->per_prio[prio]; sector_t sector = bio_end_sector(bio); struct request *__rq; if (!dd->front_merges) return ELEVATOR_NO_MERGE; - __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); + __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector); if (__rq) { BUG_ON(sector != blk_rq_pos(__rq)); @@ -536,6 +599,10 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, struct request_queue *q = hctx->queue; struct deadline_data *dd = q->elevator->elevator_data; const enum dd_data_dir data_dir = rq_data_dir(rq); + u16 ioprio = req_get_ioprio(rq); + u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); + struct dd_per_prio *per_prio; + enum dd_prio prio; lockdep_assert_held(&dd->lock); @@ -545,15 +612,18 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, */ blk_req_zone_write_unlock(rq); + prio = ioprio_class_to_prio[ioprio_class]; + if (blk_mq_sched_try_insert_merge(q, rq)) return; blk_mq_sched_request_inserted(rq); + per_prio = &dd->per_prio[prio]; if (at_head) { - list_add(&rq->queuelist, &dd->dispatch); + list_add(&rq->queuelist, &per_prio->dispatch); } else { - deadline_add_rq_rb(dd, rq); + deadline_add_rq_rb(per_prio, rq); if (rq_mergeable(rq)) { elv_rqhash_add(q, rq); @@ -565,7 +635,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, * set expire time and add to fifo list */ rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; - list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); + list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]); } } @@ -616,26 +686,39 @@ static void dd_prepare_request(struct request *rq) static void dd_finish_request(struct request *rq) { struct request_queue *q = rq->q; + struct deadline_data *dd = q->elevator->elevator_data; + const u8 ioprio_class = dd_rq_ioclass(rq); + const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; + struct dd_per_prio *per_prio = &dd->per_prio[prio]; if (blk_queue_is_zoned(q)) { - struct deadline_data *dd = q->elevator->elevator_data; unsigned long flags; spin_lock_irqsave(&dd->zone_lock, flags); blk_req_zone_write_unlock(rq); - if (!list_empty(&dd->fifo_list[DD_WRITE])) + if (!list_empty(&per_prio->fifo_list[DD_WRITE])) blk_mq_sched_mark_restart_hctx(rq->mq_hctx); spin_unlock_irqrestore(&dd->zone_lock, flags); } } +static bool dd_has_work_for_prio(struct dd_per_prio *per_prio) +{ + return !list_empty_careful(&per_prio->dispatch) || + !list_empty_careful(&per_prio->fifo_list[DD_READ]) || + !list_empty_careful(&per_prio->fifo_list[DD_WRITE]); +} + static bool dd_has_work(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; + enum dd_prio prio; - return !list_empty_careful(&dd->dispatch) || - !list_empty_careful(&dd->fifo_list[0]) || - !list_empty_careful(&dd->fifo_list[1]); + for (prio = 0; prio <= DD_PRIO_MAX; prio++) + if (dd_has_work_for_prio(&dd->per_prio[prio])) + return true; + + return false; } /* @@ -702,16 +785,17 @@ static struct elv_fs_entry deadline_attrs[] = { }; #ifdef CONFIG_BLK_DEBUG_FS -#define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \ +#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \ static void *deadline_##name##_fifo_start(struct seq_file *m, \ loff_t *pos) \ __acquires(&dd->lock) \ { \ struct request_queue *q = m->private; \ struct deadline_data *dd = q->elevator->elevator_data; \ + struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ \ spin_lock(&dd->lock); \ - return seq_list_start(&dd->fifo_list[ddir], *pos); \ + return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \ } \ \ static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ @@ -719,8 +803,9 @@ static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ { \ struct request_queue *q = m->private; \ struct deadline_data *dd = q->elevator->elevator_data; \ + struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ \ - return seq_list_next(v, &dd->fifo_list[ddir], pos); \ + return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \ } \ \ static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ @@ -744,14 +829,20 @@ static int deadline_##name##_next_rq_show(void *data, \ { \ struct request_queue *q = data; \ struct deadline_data *dd = q->elevator->elevator_data; \ - struct request *rq = dd->next_rq[ddir]; \ + struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ + struct request *rq = per_prio->next_rq[data_dir]; \ \ if (rq) \ __blk_mq_debugfs_rq_show(m, rq); \ return 0; \ } -DEADLINE_DEBUGFS_DDIR_ATTRS(DD_READ, read) -DEADLINE_DEBUGFS_DDIR_ATTRS(DD_WRITE, write) + +DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0); +DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0); +DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1); +DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1); +DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2); +DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2); #undef DEADLINE_DEBUGFS_DDIR_ATTRS static int deadline_batching_show(void *data, struct seq_file *m) @@ -781,50 +872,74 @@ static int dd_async_depth_show(void *data, struct seq_file *m) return 0; } -static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos) - __acquires(&dd->lock) -{ - struct request_queue *q = m->private; - struct deadline_data *dd = q->elevator->elevator_data; - - spin_lock(&dd->lock); - return seq_list_start(&dd->dispatch, *pos); +#define DEADLINE_DISPATCH_ATTR(prio) \ +static void *deadline_dispatch##prio##_start(struct seq_file *m, \ + loff_t *pos) \ + __acquires(&dd->lock) \ +{ \ + struct request_queue *q = m->private; \ + struct deadline_data *dd = q->elevator->elevator_data; \ + struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ + \ + spin_lock(&dd->lock); \ + return seq_list_start(&per_prio->dispatch, *pos); \ +} \ + \ +static void *deadline_dispatch##prio##_next(struct seq_file *m, \ + void *v, loff_t *pos) \ +{ \ + struct request_queue *q = m->private; \ + struct deadline_data *dd = q->elevator->elevator_data; \ + struct dd_per_prio *per_prio = &dd->per_prio[prio]; \ + \ + return seq_list_next(v, &per_prio->dispatch, pos); \ +} \ + \ +static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \ + __releases(&dd->lock) \ +{ \ + struct request_queue *q = m->private; \ + struct deadline_data *dd = q->elevator->elevator_data; \ + \ + spin_unlock(&dd->lock); \ +} \ + \ +static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \ + .start = deadline_dispatch##prio##_start, \ + .next = deadline_dispatch##prio##_next, \ + .stop = deadline_dispatch##prio##_stop, \ + .show = blk_mq_debugfs_rq_show, \ } -static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos) -{ - struct request_queue *q = m->private; - struct deadline_data *dd = q->elevator->elevator_data; +DEADLINE_DISPATCH_ATTR(0); +DEADLINE_DISPATCH_ATTR(1); +DEADLINE_DISPATCH_ATTR(2); +#undef DEADLINE_DISPATCH_ATTR - return seq_list_next(v, &dd->dispatch, pos); -} - -static void deadline_dispatch_stop(struct seq_file *m, void *v) - __releases(&dd->lock) -{ - struct request_queue *q = m->private; - struct deadline_data *dd = q->elevator->elevator_data; - - spin_unlock(&dd->lock); -} - -static const struct seq_operations deadline_dispatch_seq_ops = { - .start = deadline_dispatch_start, - .next = deadline_dispatch_next, - .stop = deadline_dispatch_stop, - .show = blk_mq_debugfs_rq_show, -}; - -#define DEADLINE_QUEUE_DDIR_ATTRS(name) \ - {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \ +#define DEADLINE_QUEUE_DDIR_ATTRS(name) \ + {#name "_fifo_list", 0400, \ + .seq_ops = &deadline_##name##_fifo_seq_ops} +#define DEADLINE_NEXT_RQ_ATTR(name) \ {#name "_next_rq", 0400, deadline_##name##_next_rq_show} static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { - DEADLINE_QUEUE_DDIR_ATTRS(read), - DEADLINE_QUEUE_DDIR_ATTRS(write), + DEADLINE_QUEUE_DDIR_ATTRS(read0), + DEADLINE_QUEUE_DDIR_ATTRS(write0), + DEADLINE_QUEUE_DDIR_ATTRS(read1), + DEADLINE_QUEUE_DDIR_ATTRS(write1), + DEADLINE_QUEUE_DDIR_ATTRS(read2), + DEADLINE_QUEUE_DDIR_ATTRS(write2), + DEADLINE_NEXT_RQ_ATTR(read0), + DEADLINE_NEXT_RQ_ATTR(write0), + DEADLINE_NEXT_RQ_ATTR(read1), + DEADLINE_NEXT_RQ_ATTR(write1), + DEADLINE_NEXT_RQ_ATTR(read2), + DEADLINE_NEXT_RQ_ATTR(write2), {"batching", 0400, deadline_batching_show}, {"starved", 0400, deadline_starved_show}, {"async_depth", 0400, dd_async_depth_show}, - {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, + {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops}, + {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops}, + {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops}, {}, }; #undef DEADLINE_QUEUE_DDIR_ATTRS @@ -874,6 +989,6 @@ static void __exit deadline_exit(void) module_init(deadline_init); module_exit(deadline_exit); -MODULE_AUTHOR("Jens Axboe"); +MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MQ deadline IO scheduler"); From 6981c53a670e5f2e8a7b27cbafdc5b98df1d7296 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 11 Jun 2021 16:58:51 -0700 Subject: [PATCH 23/62] FROMGIT: block/mq-deadline: Track I/O statistics Track I/O statistics per I/O priority and export these statistics to debugfs. These statistics help developers of the deadline scheduler. Cc: Damien Le Moal Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Ming Lei Cc: Johannes Thumshirn Cc: Himanshu Madhani Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: I8e91693dc1d015060737fa2fc15f5f2ebee2530c (cherry picked from commit 9dc236caf2518c1e434be7a4f8fae60fb0be506a git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline.c | 99 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/block/mq-deadline.c b/block/mq-deadline.c index 50b9fdfd8f0a..c58ba5417329 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline.c @@ -49,6 +49,19 @@ enum dd_prio { enum { DD_PRIO_COUNT = 3 }; +/* I/O statistics per I/O priority. */ +struct io_stats_per_prio { + local_t inserted; + local_t merged; + local_t dispatched; + local_t completed; +}; + +/* I/O statistics for all I/O priorities (enum dd_prio). */ +struct io_stats { + struct io_stats_per_prio stats[DD_PRIO_COUNT]; +}; + /* * Deadline scheduler data per I/O priority (enum dd_prio). Requests are * present on both sort_list[] and fifo_list[]. @@ -73,6 +86,8 @@ struct deadline_data { unsigned int batching; /* number of sequential requests made */ unsigned int starved; /* times reads have starved writes */ + struct io_stats __percpu *stats; + /* * settings that change how the i/o scheduler behaves */ @@ -86,6 +101,33 @@ struct deadline_data { spinlock_t zone_lock; }; +/* Count one event of type 'event_type' and with I/O priority 'prio' */ +#define dd_count(dd, event_type, prio) do { \ + struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \ + \ + BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ + BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ + local_inc(&io_stats->stats[(prio)].event_type); \ + put_cpu_ptr(io_stats); \ +} while (0) + +/* + * Returns the total number of dd_count(dd, event_type, prio) calls across all + * CPUs. No locking or barriers since it is fine if the returned sum is slightly + * outdated. + */ +#define dd_sum(dd, event_type, prio) ({ \ + unsigned int cpu; \ + u32 sum = 0; \ + \ + BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \ + BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \ + for_each_present_cpu(cpu) \ + sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \ + stats[(prio)].event_type); \ + sum; \ +}) + /* Maps an I/O priority class to a deadline scheduler priority. */ static const enum dd_prio ioprio_class_to_prio[] = { [IOPRIO_CLASS_NONE] = DD_BE_PRIO, @@ -189,6 +231,8 @@ static void dd_merged_requests(struct request_queue *q, struct request *req, const u8 ioprio_class = dd_rq_ioclass(next); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; + dd_count(dd, merged, prio); + /* * if next expires before rq, assign its expire time to rq * and move into next position (next will be deleted) in fifo @@ -224,6 +268,12 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio, deadline_remove_request(rq->q, per_prio, rq); } +/* Number of requests queued for a given priority level. */ +static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio) +{ + return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio); +} + /* * deadline_check_fifo returns 0 if there are no expired requests on the fifo, * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) @@ -318,6 +368,8 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, { struct request *rq, *next_rq; enum dd_data_dir data_dir; + enum dd_prio prio; + u8 ioprio_class; lockdep_assert_held(&dd->lock); @@ -407,6 +459,9 @@ dispatch_request: dd->batching++; deadline_move_request(dd, per_prio, rq); done: + ioprio_class = dd_rq_ioclass(rq); + prio = ioprio_class_to_prio[ioprio_class]; + dd_count(dd, dispatched, prio); /* * If the request needs its target zone locked, do it. */ @@ -490,6 +545,8 @@ static void dd_exit_sched(struct elevator_queue *e) WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE])); } + free_percpu(dd->stats); + kfree(dd); } @@ -513,6 +570,11 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) eq->elevator_data = dd; + dd->stats = alloc_percpu_gfp(typeof(*dd->stats), + GFP_KERNEL | __GFP_ZERO); + if (!dd->stats) + goto free_dd; + for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; @@ -534,6 +596,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) q->elevator = eq; return 0; +free_dd: + kfree(dd); + put_eq: kobject_put(&eq->kobj); return ret; @@ -613,6 +678,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, blk_req_zone_write_unlock(rq); prio = ioprio_class_to_prio[ioprio_class]; + dd_count(dd, inserted, prio); if (blk_mq_sched_try_insert_merge(q, rq)) return; @@ -691,6 +757,8 @@ static void dd_finish_request(struct request *rq) const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; struct dd_per_prio *per_prio = &dd->per_prio[prio]; + dd_count(dd, completed, prio); + if (blk_queue_is_zoned(q)) { unsigned long flags; @@ -872,6 +940,35 @@ static int dd_async_depth_show(void *data, struct seq_file *m) return 0; } +static int dd_queued_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + struct deadline_data *dd = q->elevator->elevator_data; + + seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO), + dd_queued(dd, DD_BE_PRIO), + dd_queued(dd, DD_IDLE_PRIO)); + return 0; +} + +/* Number of requests owned by the block driver for a given priority. */ +static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio) +{ + return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio) + - dd_sum(dd, completed, prio); +} + +static int dd_owned_by_driver_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + struct deadline_data *dd = q->elevator->elevator_data; + + seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO), + dd_owned_by_driver(dd, DD_BE_PRIO), + dd_owned_by_driver(dd, DD_IDLE_PRIO)); + return 0; +} + #define DEADLINE_DISPATCH_ATTR(prio) \ static void *deadline_dispatch##prio##_start(struct seq_file *m, \ loff_t *pos) \ @@ -940,6 +1037,8 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops}, {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops}, {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops}, + {"owned_by_driver", 0400, dd_owned_by_driver_show}, + {"queued", 0400, dd_queued_show}, {}, }; #undef DEADLINE_QUEUE_DDIR_ATTRS From 855265c76ea998c89648846edd889a843f7bf2d3 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 11 Jun 2021 16:48:32 -0700 Subject: [PATCH 24/62] FROMGIT: block/mq-deadline: Add cgroup support Maintain statistics per cgroup and export these to user space. These statistics are essential for verifying whether the proper I/O priorities have been assigned to requests. An example of the statistics data with this patch applied: $ cat /sys/fs/cgroup/io.stat 11:2 rbytes=0 wbytes=0 rios=3 wios=0 dbytes=0 dios=0 [NONE] dispatched=0 inserted=0 merged=171 [RT] dispatched=0 inserted=0 merged=0 [BE] dispatched=0 inserted=0 merged=0 [IDLE] dispatched=0 inserted=0 merged=0 8:32 rbytes=2142720 wbytes=0 rios=105 wios=0 dbytes=0 dios=0 [NONE] dispatched=0 inserted=0 merged=171 [RT] dispatched=0 inserted=0 merged=0 [BE] dispatched=0 inserted=0 merged=0 [IDLE] dispatched=0 inserted=0 merged=0 Cc: Damien Le Moal Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Ming Lei Cc: Johannes Thumshirn Cc: Himanshu Madhani Signed-off-by: Bart Van Assche BUG: 187357408 Change-Id: I8d976c62ba2c0397cbb18076f3e61d5ab246cbcf (cherry picked from commit f5dc926252cb31739809f7d27a8cbc9941b4d36d git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/Kconfig.iosched | 6 + block/Makefile | 2 + block/mq-deadline-cgroup.c | 126 ++++++++++++++++++++ block/mq-deadline-cgroup.h | 114 ++++++++++++++++++ block/{mq-deadline.c => mq-deadline-main.c} | 73 +++++++++--- 5 files changed, 307 insertions(+), 14 deletions(-) create mode 100644 block/mq-deadline-cgroup.c create mode 100644 block/mq-deadline-cgroup.h rename block/{mq-deadline.c => mq-deadline-main.c} (95%) diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 2f2158e05a91..64053d67a97b 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -9,6 +9,12 @@ config MQ_IOSCHED_DEADLINE help MQ version of the deadline IO scheduler. +config MQ_IOSCHED_DEADLINE_CGROUP + tristate + default y + depends on MQ_IOSCHED_DEADLINE + depends on BLK_CGROUP + config MQ_IOSCHED_KYBER tristate "Kyber I/O scheduler" default y diff --git a/block/Makefile b/block/Makefile index af3d044abaf1..b9db5d4edfc8 100644 --- a/block/Makefile +++ b/block/Makefile @@ -21,6 +21,8 @@ obj-$(CONFIG_BLK_CGROUP_IOPRIO) += blk-ioprio.o obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o obj-$(CONFIG_BLK_CGROUP_IOCOST) += blk-iocost.o obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o +mq-deadline-y += mq-deadline-main.o +mq-deadline-$(CONFIG_MQ_IOSCHED_DEADLINE_CGROUP)+= mq-deadline-cgroup.o obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o obj-$(CONFIG_IOSCHED_BFQ) += bfq.o diff --git a/block/mq-deadline-cgroup.c b/block/mq-deadline-cgroup.c new file mode 100644 index 000000000000..3b4bfddec39f --- /dev/null +++ b/block/mq-deadline-cgroup.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include "mq-deadline-cgroup.h" + +static struct blkcg_policy dd_blkcg_policy; + +static struct blkcg_policy_data *dd_cpd_alloc(gfp_t gfp) +{ + struct dd_blkcg *pd; + + pd = kzalloc(sizeof(*pd), gfp); + if (!pd) + return NULL; + pd->stats = alloc_percpu_gfp(typeof(*pd->stats), + GFP_KERNEL | __GFP_ZERO); + if (!pd->stats) { + kfree(pd); + return NULL; + } + return &pd->cpd; +} + +static void dd_cpd_free(struct blkcg_policy_data *cpd) +{ + struct dd_blkcg *dd_blkcg = container_of(cpd, typeof(*dd_blkcg), cpd); + + free_percpu(dd_blkcg->stats); + kfree(dd_blkcg); +} + +static struct dd_blkcg *dd_blkcg_from_pd(struct blkg_policy_data *pd) +{ + return container_of(blkcg_to_cpd(pd->blkg->blkcg, &dd_blkcg_policy), + struct dd_blkcg, cpd); +} + +/* + * Convert an association between a block cgroup and a request queue into a + * pointer to the mq-deadline information associated with a (blkcg, queue) pair. + */ +struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio) +{ + struct blkg_policy_data *pd; + + pd = blkg_to_pd(bio->bi_blkg, &dd_blkcg_policy); + if (!pd) + return NULL; + + return dd_blkcg_from_pd(pd); +} + +static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size) +{ + static const char *const prio_class_name[] = { + [IOPRIO_CLASS_NONE] = "NONE", + [IOPRIO_CLASS_RT] = "RT", + [IOPRIO_CLASS_BE] = "BE", + [IOPRIO_CLASS_IDLE] = "IDLE", + }; + struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd); + int res = 0; + u8 prio; + + for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++) + res += scnprintf(buf + res, size - res, + " [%s] dispatched=%u inserted=%u merged=%u", + prio_class_name[prio], + ddcg_sum(blkcg, dispatched, prio) + + ddcg_sum(blkcg, merged, prio) - + ddcg_sum(blkcg, completed, prio), + ddcg_sum(blkcg, inserted, prio) - + ddcg_sum(blkcg, completed, prio), + ddcg_sum(blkcg, merged, prio)); + + return res; +} + +static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q, + struct blkcg *blkcg) +{ + struct dd_blkg *pd; + + pd = kzalloc(sizeof(*pd), gfp); + if (!pd) + return NULL; + return &pd->pd; +} + +static void dd_pd_free(struct blkg_policy_data *pd) +{ + struct dd_blkg *dd_blkg = container_of(pd, typeof(*dd_blkg), pd); + + kfree(dd_blkg); +} + +static struct blkcg_policy dd_blkcg_policy = { + .cpd_alloc_fn = dd_cpd_alloc, + .cpd_free_fn = dd_cpd_free, + + .pd_alloc_fn = dd_pd_alloc, + .pd_free_fn = dd_pd_free, + .pd_stat_fn = dd_pd_stat, +}; + +int dd_activate_policy(struct request_queue *q) +{ + return blkcg_activate_policy(q, &dd_blkcg_policy); +} + +void dd_deactivate_policy(struct request_queue *q) +{ + blkcg_deactivate_policy(q, &dd_blkcg_policy); +} + +int __init dd_blkcg_init(void) +{ + return blkcg_policy_register(&dd_blkcg_policy); +} + +void __exit dd_blkcg_exit(void) +{ + blkcg_policy_unregister(&dd_blkcg_policy); +} diff --git a/block/mq-deadline-cgroup.h b/block/mq-deadline-cgroup.h new file mode 100644 index 000000000000..0143fd74f3ce --- /dev/null +++ b/block/mq-deadline-cgroup.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#if !defined(_MQ_DEADLINE_CGROUP_H_) +#define _MQ_DEADLINE_CGROUP_H_ + +#include + +struct request_queue; + +/** + * struct io_stats_per_prio - I/O statistics per I/O priority class. + * @inserted: Number of inserted requests. + * @merged: Number of merged requests. + * @dispatched: Number of dispatched requests. + * @completed: Number of I/O completions. + */ +struct io_stats_per_prio { + local_t inserted; + local_t merged; + local_t dispatched; + local_t completed; +}; + +/* I/O statistics per I/O cgroup per I/O priority class (IOPRIO_CLASS_*). */ +struct blkcg_io_stats { + struct io_stats_per_prio stats[4]; +}; + +/** + * struct dd_blkcg - Per cgroup data. + * @cpd: blkcg_policy_data structure. + * @stats: I/O statistics. + */ +struct dd_blkcg { + struct blkcg_policy_data cpd; /* must be the first member */ + struct blkcg_io_stats __percpu *stats; +}; + +/* + * Count one event of type 'event_type' and with I/O priority class + * 'prio_class'. + */ +#define ddcg_count(ddcg, event_type, prio_class) do { \ +if (ddcg) { \ + struct blkcg_io_stats *io_stats = get_cpu_ptr((ddcg)->stats); \ + \ + BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \ + BUILD_BUG_ON(!__same_type((prio_class), u8)); \ + local_inc(&io_stats->stats[(prio_class)].event_type); \ + put_cpu_ptr(io_stats); \ +} \ +} while (0) + +/* + * Returns the total number of ddcg_count(ddcg, event_type, prio_class) calls + * across all CPUs. No locking or barriers since it is fine if the returned + * sum is slightly outdated. + */ +#define ddcg_sum(ddcg, event_type, prio) ({ \ + unsigned int cpu; \ + u32 sum = 0; \ + \ + BUILD_BUG_ON(!__same_type((ddcg), struct dd_blkcg *)); \ + BUILD_BUG_ON(!__same_type((prio), u8)); \ + for_each_present_cpu(cpu) \ + sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)-> \ + stats[(prio)].event_type); \ + sum; \ +}) + +#ifdef CONFIG_BLK_CGROUP + +/** + * struct dd_blkg - Per (cgroup, request queue) data. + * @pd: blkg_policy_data structure. + */ +struct dd_blkg { + struct blkg_policy_data pd; /* must be the first member */ +}; + +struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio); +int dd_activate_policy(struct request_queue *q); +void dd_deactivate_policy(struct request_queue *q); +int __init dd_blkcg_init(void); +void __exit dd_blkcg_exit(void); + +#else /* CONFIG_BLK_CGROUP */ + +static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio) +{ + return NULL; +} + +static inline int dd_activate_policy(struct request_queue *q) +{ + return 0; +} + +static inline void dd_deactivate_policy(struct request_queue *q) +{ +} + +static inline int dd_blkcg_init(void) +{ + return 0; +} + +static inline void dd_blkcg_exit(void) +{ +} + +#endif /* CONFIG_BLK_CGROUP */ + +#endif /* _MQ_DEADLINE_CGROUP_H_ */ diff --git a/block/mq-deadline.c b/block/mq-deadline-main.c similarity index 95% rename from block/mq-deadline.c rename to block/mq-deadline-main.c index c58ba5417329..7134f1a4240a 100644 --- a/block/mq-deadline.c +++ b/block/mq-deadline-main.c @@ -23,6 +23,7 @@ #include "blk-mq-debugfs.h" #include "blk-mq-tag.h" #include "blk-mq-sched.h" +#include "mq-deadline-cgroup.h" /* * See Documentation/block/deadline-iosched.rst @@ -49,14 +50,6 @@ enum dd_prio { enum { DD_PRIO_COUNT = 3 }; -/* I/O statistics per I/O priority. */ -struct io_stats_per_prio { - local_t inserted; - local_t merged; - local_t dispatched; - local_t completed; -}; - /* I/O statistics for all I/O priorities (enum dd_prio). */ struct io_stats { struct io_stats_per_prio stats[DD_PRIO_COUNT]; @@ -79,6 +72,9 @@ struct deadline_data { * run time data */ + /* Request queue that owns this data structure. */ + struct request_queue *queue; + struct dd_per_prio per_prio[DD_PRIO_COUNT]; /* Data direction of latest dispatched request. */ @@ -230,8 +226,10 @@ static void dd_merged_requests(struct request_queue *q, struct request *req, struct deadline_data *dd = q->elevator->elevator_data; const u8 ioprio_class = dd_rq_ioclass(next); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; + struct dd_blkcg *blkcg = next->elv.priv[0]; dd_count(dd, merged, prio); + ddcg_count(blkcg, merged, ioprio_class); /* * if next expires before rq, assign its expire time to rq @@ -368,6 +366,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, { struct request *rq, *next_rq; enum dd_data_dir data_dir; + struct dd_blkcg *blkcg; enum dd_prio prio; u8 ioprio_class; @@ -462,6 +461,8 @@ done: ioprio_class = dd_rq_ioclass(rq); prio = ioprio_class_to_prio[ioprio_class]; dd_count(dd, dispatched, prio); + blkcg = rq->elv.priv[0]; + ddcg_count(blkcg, dispatched, ioprio_class); /* * If the request needs its target zone locked, do it. */ @@ -538,6 +539,8 @@ static void dd_exit_sched(struct elevator_queue *e) struct deadline_data *dd = e->elevator_data; enum dd_prio prio; + dd_deactivate_policy(dd->queue); + for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; @@ -551,7 +554,7 @@ static void dd_exit_sched(struct elevator_queue *e) } /* - * initialize elevator private data (deadline_data). + * Initialize elevator private data (deadline_data) and associate with blkcg. */ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) { @@ -560,6 +563,12 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) enum dd_prio prio; int ret = -ENOMEM; + /* + * Initialization would be very tricky if the queue is not frozen, + * hence the warning statement below. + */ + WARN_ON_ONCE(!percpu_ref_is_zero(&q->q_usage_counter)); + eq = elevator_alloc(q, e); if (!eq) return ret; @@ -575,6 +584,8 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) if (!dd->stats) goto free_dd; + dd->queue = q; + for (prio = 0; prio <= DD_PRIO_MAX; prio++) { struct dd_per_prio *per_prio = &dd->per_prio[prio]; @@ -593,9 +604,17 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) spin_lock_init(&dd->lock); spin_lock_init(&dd->zone_lock); + ret = dd_activate_policy(q); + if (ret) + goto free_stats; + + ret = 0; q->elevator = eq; return 0; +free_stats: + free_percpu(dd->stats); + free_dd: kfree(dd); @@ -668,6 +687,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio); struct dd_per_prio *per_prio; enum dd_prio prio; + struct dd_blkcg *blkcg; lockdep_assert_held(&dd->lock); @@ -677,8 +697,18 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, */ blk_req_zone_write_unlock(rq); + /* + * If a block cgroup has been associated with the submitter and if an + * I/O priority has been set in the associated block cgroup, use the + * lowest of the cgroup priority and the request priority for the + * request. If no priority has been set in the request, use the cgroup + * priority. + */ prio = ioprio_class_to_prio[ioprio_class]; dd_count(dd, inserted, prio); + blkcg = dd_blkcg_from_bio(rq->bio); + ddcg_count(blkcg, inserted, ioprio_class); + rq->elv.priv[0] = blkcg; if (blk_mq_sched_try_insert_merge(q, rq)) return; @@ -725,12 +755,10 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, spin_unlock(&dd->lock); } -/* - * Nothing to do here. This is defined only to ensure that .finish_request - * method is called upon request completion. - */ +/* Callback from inside blk_mq_rq_ctx_init(). */ static void dd_prepare_request(struct request *rq) { + rq->elv.priv[0] = NULL; } /* @@ -753,11 +781,13 @@ static void dd_finish_request(struct request *rq) { struct request_queue *q = rq->q; struct deadline_data *dd = q->elevator->elevator_data; + struct dd_blkcg *blkcg = rq->elv.priv[0]; const u8 ioprio_class = dd_rq_ioclass(rq); const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; struct dd_per_prio *per_prio = &dd->per_prio[prio]; dd_count(dd, completed, prio); + ddcg_count(blkcg, completed, ioprio_class); if (blk_queue_is_zoned(q)) { unsigned long flags; @@ -1077,11 +1107,26 @@ MODULE_ALIAS("mq-deadline-iosched"); static int __init deadline_init(void) { - return elv_register(&mq_deadline); + int ret; + + ret = elv_register(&mq_deadline); + if (ret) + goto out; + ret = dd_blkcg_init(); + if (ret) + goto unreg; + +out: + return ret; + +unreg: + elv_unregister(&mq_deadline); + goto out; } static void __exit deadline_exit(void) { + dd_blkcg_exit(); elv_unregister(&mq_deadline); } From e35b90b784b6a1a2e7b5d167d0de6d37af10d1f8 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 27 May 2021 16:53:27 -0700 Subject: [PATCH 25/62] FROMGIT: block/mq-deadline: Prioritize high-priority requests While one or more requests with a certain I/O priority are pending, do not dispatch lower priority requests. Dispatch lower priority requests anyway after the "aging" time has expired. This patch has been tested as follows: modprobe scsi_debug ndelay=1000000 max_queue=16 && sd='' && while [ -z "$sd" ]; do sd=/dev/$(basename /sys/bus/pseudo/drivers/scsi_debug/adapter*/host*/target*/*/block/*) done && echo $((100*1000)) > /sys/block/$sd/queue/iosched/aging_expire && cd /sys/fs/cgroup/blkio/ && echo $$ >cgroup.procs && echo restrict-to-be >blkio.prio.class && mkdir -p hipri && cd hipri && echo none-to-rt >blkio.prio.class && { max-iops -a1 -d32 -j1 -e mq-deadline $sd >& ~/low-pri.txt & } && echo $$ >cgroup.procs && max-iops -a1 -d32 -j1 -e mq-deadline $sd >& ~/hi-pri.txt Result: * 11000 IOPS for the high-priority job * 40 IOPS for the low-priority job If the aging expiry time is changed from 100s into 0, the IOPS results change into 6712 and 6796 IOPS. The max-iops script is a script that runs fio with the following arguments: --bs=4K --gtod_reduce=1 --ioengine=libaio --ioscheduler=${arg_e} --runtime=60 --norandommap --rw=read --thread --buffered=0 --numjobs=${arg_j} --iodepth=${arg_d} --iodepth_batch_submit=${arg_a} --iodepth_batch_complete=$((arg_d / 2)) --name=${positional_argument_1} --filename=${positional_argument_1} Reviewed-by: Damien Le Moal Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Ming Lei Cc: Johannes Thumshirn Cc: Himanshu Madhani Signed-off-by: Bart Van Assche Change-Id: I99a0674b018d096ec96bbfa3008eedcfda5013da BUG: 187357408 (cherry picked from commit 40d5d42992b0de3ae7961735ea15eef5bd385ebf git://git.kernel.dk/linux-block/ for-5.14/block) Signed-off-by: Bart Van Assche --- block/mq-deadline-main.c | 42 +++++++++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/block/mq-deadline-main.c b/block/mq-deadline-main.c index 7134f1a4240a..ada893d6374d 100644 --- a/block/mq-deadline-main.c +++ b/block/mq-deadline-main.c @@ -30,6 +30,11 @@ */ static const int read_expire = HZ / 2; /* max time before a read is submitted. */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ +/* + * Time after which to dispatch lower priority requests even if higher + * priority requests are pending. + */ +static const int aging_expire = 10 * HZ; static const int writes_starved = 2; /* max times reads can starve a write */ static const int fifo_batch = 16; /* # of sequential requests treated as one by the above parameters. For throughput. */ @@ -92,6 +97,7 @@ struct deadline_data { int writes_starved; int front_merges; u32 async_depth; + int aging_expire; spinlock_t lock; spinlock_t zone_lock; @@ -359,10 +365,11 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, /* * deadline_dispatch_requests selects the best request according to - * read/write expire, fifo_batch, etc + * read/write expire, fifo_batch, etc and with a start time <= @latest. */ static struct request *__dd_dispatch_request(struct deadline_data *dd, - struct dd_per_prio *per_prio) + struct dd_per_prio *per_prio, + u64 latest_start_ns) { struct request *rq, *next_rq; enum dd_data_dir data_dir; @@ -375,6 +382,8 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, if (!list_empty(&per_prio->dispatch)) { rq = list_first_entry(&per_prio->dispatch, struct request, queuelist); + if (rq->start_time_ns > latest_start_ns) + return NULL; list_del_init(&rq->queuelist); goto done; } @@ -452,6 +461,8 @@ dispatch_find_request: dd->batching = 0; dispatch_request: + if (rq->start_time_ns > latest_start_ns) + return NULL; /* * rq is the selected appropriate request. */ @@ -482,15 +493,32 @@ done: static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) { struct deadline_data *dd = hctx->queue->elevator->elevator_data; - struct request *rq; + const u64 now_ns = ktime_get_ns(); + struct request *rq = NULL; enum dd_prio prio; spin_lock(&dd->lock); - for (prio = 0; prio <= DD_PRIO_MAX; prio++) { - rq = __dd_dispatch_request(dd, &dd->per_prio[prio]); + /* + * Start with dispatching requests whose deadline expired more than + * aging_expire jiffies ago. + */ + for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) { + rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns - + jiffies_to_nsecs(dd->aging_expire)); if (rq) + goto unlock; + } + /* + * Next, dispatch requests in priority order. Ignore lower priority + * requests if any higher priority requests are pending. + */ + for (prio = 0; prio <= DD_PRIO_MAX; prio++) { + rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns); + if (rq || dd_queued(dd, prio)) break; } + +unlock: spin_unlock(&dd->lock); return rq; @@ -601,6 +629,7 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) dd->front_merges = 1; dd->last_dir = DD_WRITE; dd->fifo_batch = fifo_batch; + dd->aging_expire = aging_expire; spin_lock_init(&dd->lock); spin_lock_init(&dd->zone_lock); @@ -832,6 +861,7 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); +SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire); SHOW_INT(deadline_writes_starved_show, dd->writes_starved); SHOW_INT(deadline_front_merges_show, dd->front_merges); SHOW_INT(deadline_async_depth_show, dd->front_merges); @@ -861,6 +891,7 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); +STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX); STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX); @@ -879,6 +910,7 @@ static struct elv_fs_entry deadline_attrs[] = { DD_ATTR(front_merges), DD_ATTR(async_depth), DD_ATTR(fifo_batch), + DD_ATTR(aging_expire), __ATTR_NULL }; From 791e1c6968f60f8d46d63279dc7c285de47fe44c Mon Sep 17 00:00:00 2001 From: Daejun Park Date: Wed, 16 Jun 2021 17:36:38 +0900 Subject: [PATCH 26/62] FROMLIST: scsi: ufs: Introduce HPB feature This is a patch for the HPB initialization and adds HPB function calls to UFS core driver. NAND flash-based storage devices, including UFS, have mechanisms to translate logical addresses of IO requests to the corresponding physical addresses of the flash storage. In UFS, Logical-address-to-Physical-address (L2P) map data, which is required to identify the physical address for the requested IOs, can only be partially stored in SRAM from NAND flash. Due to this partial loading, accessing the flash address area where the L2P information for that address is not loaded in the SRAM can result in serious performance degradation. The basic concept of HPB is to cache L2P mapping entries in host system memory so that both physical block address (PBA) and logical block address (LBA) can be delivered in HPB read command. The HPB READ command allows to read data faster than a read command in UFS since it provides the physical address (HPB Entry) of the desired logical block in addition to its logical address. The UFS device can access the physical block in NAND directly without searching and uploading L2P mapping table. This improves read performance because the NAND read operation for uploading L2P mapping table is removed. In HPB initialization, the host checks if the UFS device supports HPB feature and retrieves related device capabilities. Then, some HPB parameters are configured in the device. We measured the total start-up time of popular applications and observed the difference by enabling the HPB. Popular applications are 12 game apps and 24 non-game apps. Each target applications were launched in order. The cycle consists of running 36 applications in sequence. We repeated the cycle for observing performance improvement by L2P mapping cache hit in HPB. The Following is experiment environment: - kernel version: 4.4.0 - RAM: 8GB - UFS 2.1 (64GB) Result: +-------+----------+----------+-------+ | cycle | baseline | with HPB | diff | +-------+----------+----------+-------+ | 1 | 272.4 | 264.9 | -7.5 | | 2 | 250.4 | 248.2 | -2.2 | | 3 | 226.2 | 215.6 | -10.6 | | 4 | 230.6 | 214.8 | -15.8 | | 5 | 232.0 | 218.1 | -13.9 | | 6 | 231.9 | 212.6 | -19.3 | +-------+----------+----------+-------+ We also measured HPB performance using iozone. Here is my iozone script: iozone -r 4k -+n -i2 -ecI -t 16 -l 16 -u 16 -s $IO_RANGE/16 -F mnt/tmp_1 mnt/tmp_2 mnt/tmp_3 mnt/tmp_4 mnt/tmp_5 mnt/tmp_6 mnt/tmp_7 mnt/tmp_8 mnt/tmp_9 mnt/tmp_10 mnt/tmp_11 mnt/tmp_12 mnt/tmp_13 mnt/tmp_14 mnt/tmp_15 mnt/tmp_16 Result: +----------+--------+---------+ | IO range | HPB on | HPB off | +----------+--------+---------+ | 1 GB | 294.8 | 300.87 | | 4 GB | 293.51 | 179.35 | | 8 GB | 294.85 | 162.52 | | 16 GB | 293.45 | 156.26 | | 32 GB | 277.4 | 153.25 | +----------+--------+---------+ Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/linux-scsi/20210616070812epcms2p4650ce5cd78056dce9162482e59bb74dd@epcms2p4/ Reviewed-by: Greg Kroah-Hartman Reviewed-by: Bart Van Assche Reviewed-by: Can Guo Reviewed-by: Bean Huo Reviewed-by: Stanley Chu Acked-by: Avri Altman Tested-by: Bean Huo Tested-by: Can Guo Tested-by: Stanley Chu Reported-by: kernel test robot Signed-off-by: Daejun Park Change-Id: Ib198ff9844fc78c718d1c8e2a98fa13cc7b05f35 --- Documentation/ABI/testing/sysfs-driver-ufs | 127 +++++ drivers/scsi/ufs/Kconfig | 9 + drivers/scsi/ufs/Makefile | 1 + drivers/scsi/ufs/ufs-sysfs.c | 18 + drivers/scsi/ufs/ufs.h | 14 + drivers/scsi/ufs/ufshcd.c | 48 ++ drivers/scsi/ufs/ufshcd.h | 23 + drivers/scsi/ufs/ufshpb.c | 569 +++++++++++++++++++++ drivers/scsi/ufs/ufshpb.h | 167 ++++++ 9 files changed, 976 insertions(+) create mode 100644 drivers/scsi/ufs/ufshpb.c create mode 100644 drivers/scsi/ufs/ufshpb.h diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs index f57422c58385..9c3188bf2a53 100644 --- a/Documentation/ABI/testing/sysfs-driver-ufs +++ b/Documentation/ABI/testing/sysfs-driver-ufs @@ -1279,3 +1279,130 @@ Description: This entry shows the configured size of WriteBooster buffer. 0400h corresponds to 4GB. The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_version +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the HPB specification version. + The full information about the descriptor could be found at UFS + HPB (Host Performance Booster) Extension specifications. + Example: version 1.2.3 = 0123h + + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_control +Date: June 2021 +Contact: Daejun Park +Description: This entry shows an indication of the HPB control mode. + 00h: Host control mode + 01h: Device control mode + + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_region_size +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the bHPBRegionSize which can be calculated + as in the following (in bytes): + HPB Region size = 512B * 2^bHPBRegionSize + + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_number_lu +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the maximum number of HPB LU supported by + the device. + 00h: HPB is not supported by the device. + 01h ~ 20h: Maximum number of HPB LU supported by the device + + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_subregion_size +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the bHPBSubRegionSize, which can be + calculated as in the following (in bytes) and shall be a multiple of + logical block size: + HPB Sub-Region size = 512B x 2^bHPBSubRegionSize + bHPBSubRegionSize shall not exceed bHPBRegionSize. + + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_max_active_regions +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the maximum number of active HPB regions that + is supported by the device. + + The file is read only. + +What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_lu_max_active_regions +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the maximum number of HPB regions assigned to + the HPB logical unit. + + The file is read only. + +What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_pinned_region_start_offset +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the start offset of HPB pinned region. + + The file is read only. + +What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_number_pinned_regions +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the number of HPB pinned regions assigned to + the HPB logical unit. + + The file is read only. + +What: /sys/class/scsi_device/*/device/hpb_stats/hit_cnt +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the number of reads that changed to HPB read. + + The file is read only. + +What: /sys/class/scsi_device/*/device/hpb_stats/miss_cnt +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the number of reads that cannot be changed to + HPB read. + + The file is read only. + +What: /sys/class/scsi_device/*/device/hpb_stats/rb_noti_cnt +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the number of response UPIUs that has + recommendations for activating sub-regions and/or inactivating region. + + The file is read only. + +What: /sys/class/scsi_device/*/device/hpb_stats/rb_active_cnt +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the number of active sub-regions recommended by + response UPIUs. + + The file is read only. + +What: /sys/class/scsi_device/*/device/hpb_stats/rb_inactive_cnt +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the number of inactive regions recommended by + response UPIUs. + + The file is read only. + +What: /sys/class/scsi_device/*/device/hpb_stats/map_req_cnt +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the number of read buffer commands for + activating sub-regions recommended by response UPIUs. + + The file is read only. diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index b0bb549167c6..87900016448f 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -183,3 +183,12 @@ config SCSI_UFS_CRYPTO Enabling this makes it possible for the kernel to use the crypto capabilities of the UFS device (if present) to perform crypto operations on data being transferred to/from the device. + +config SCSI_UFS_HPB + bool "Support UFS Host Performance Booster" + depends on SCSI_UFSHCD + help + The UFS HPB feature improves random read performance. It caches + L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB + read command by piggybacking physical page number for bypassing FTL (flash + translation layer)'s L2P address translation. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 06f3a3fe4a44..cce9b3916f5b 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -8,6 +8,7 @@ ufshcd-core-y += ufshcd.o ufs-sysfs.o ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o +ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index 96a192ca09ee..7a58cb4e0946 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -522,6 +522,8 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2); UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1); UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4); UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1); +UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2); +UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1); UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4); UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1); UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1); @@ -554,6 +556,8 @@ static struct attribute *ufs_sysfs_device_descriptor[] = { &dev_attr_number_of_secure_wpa.attr, &dev_attr_psa_max_data_size.attr, &dev_attr_psa_state_timeout.attr, + &dev_attr_hpb_version.attr, + &dev_attr_hpb_control.attr, &dev_attr_ext_feature_sup.attr, &dev_attr_wb_presv_us_en.attr, &dev_attr_wb_type.attr, @@ -627,6 +631,10 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units, _ENM4_MAX_NUM_UNITS, 4); UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor, _ENM4_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2); UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4); UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1); UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1); @@ -664,6 +672,10 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = { &dev_attr_enh3_memory_capacity_adjustment_factor.attr, &dev_attr_enh4_memory_max_alloc_units.attr, &dev_attr_enh4_memory_capacity_adjustment_factor.attr, + &dev_attr_hpb_region_size.attr, + &dev_attr_hpb_number_lu.attr, + &dev_attr_hpb_subregion_size.attr, + &dev_attr_hpb_max_active_regions.attr, &dev_attr_wb_max_alloc_units.attr, &dev_attr_wb_max_wb_luns.attr, &dev_attr_wb_buff_cap_adj.attr, @@ -1048,6 +1060,9 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); +UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2); +UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2); +UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2); UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); @@ -1065,6 +1080,9 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = { &dev_attr_physical_memory_resourse_count.attr, &dev_attr_context_capabilities.attr, &dev_attr_large_unit_granularity.attr, + &dev_attr_hpb_lu_max_active_regions.attr, + &dev_attr_hpb_pinned_region_start_offset.attr, + &dev_attr_hpb_number_pinned_regions.attr, &dev_attr_wb_buf_alloc_units.attr, NULL, }; diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 7f1d4c73b674..12ee42ef4eb8 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -122,6 +122,7 @@ enum flag_idn { QUERY_FLAG_IDN_WB_EN = 0x0E, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F, QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10, + QUERY_FLAG_IDN_HPB_RESET = 0x11, }; /* Attribute idn for Query requests */ @@ -195,6 +196,9 @@ enum unit_desc_param { UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18, UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20, UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23, + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25, + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27, UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29, }; @@ -235,6 +239,8 @@ enum device_desc_param { DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25, DEVICE_DESC_PARAM_PSA_TMT = 0x29, DEVICE_DESC_PARAM_PRDCT_REV = 0x2A, + DEVICE_DESC_PARAM_HPB_VER = 0x40, + DEVICE_DESC_PARAM_HPB_CONTROL = 0x42, DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F, DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53, DEVICE_DESC_PARAM_WB_TYPE = 0x54, @@ -283,6 +289,10 @@ enum geometry_desc_param { GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E, GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42, GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44, + GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48, + GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49, + GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A, + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B, GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F, GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53, GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54, @@ -327,8 +337,10 @@ enum { /* Possible values for dExtendedUFSFeaturesSupport */ enum { + UFS_DEV_HPB_SUPPORT = BIT(7), UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), }; +#define UFS_DEV_HPB_SUPPORT_VERSION 0x310 #define POWER_DESC_MAX_SIZE 0x62 #define POWER_DESC_MAX_ACTV_ICC_LVLS 16 @@ -543,6 +555,8 @@ struct ufs_dev_info { u32 d_wb_alloc_units; bool b_rpm_dev_flush_capable; u8 b_presrv_uspc_en; + /* UFS HPB related flag */ + bool hpb_enabled; }; /** diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 12060ccc15de..8d3283c82b97 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -23,6 +23,7 @@ #include "ufs-debugfs.h" #include "ufs_bsg.h" #include "ufshcd-crypto.h" +#include "ufshpb.h" #include #include @@ -4908,6 +4909,25 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) return scsi_change_queue_depth(sdev, depth); } +static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* skip well-known LU */ + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || !ufshpb_is_allowed(hba)) + return; + + ufshpb_destroy_lu(hba, sdev); +} + +static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* skip well-known LU */ + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || + !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) + return; + + ufshpb_init_hpb_lu(hba, sdev); +} + /** * ufshcd_slave_configure - adjust SCSI device configurations * @sdev: pointer to SCSI device @@ -4917,6 +4937,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) struct ufs_hba *hba = shost_priv(sdev->host); struct request_queue *q = sdev->request_queue; + ufshcd_hpb_configure(hba, sdev); + blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) blk_queue_update_dma_alignment(q, PAGE_SIZE - 1); @@ -4940,6 +4962,9 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) struct ufs_hba *hba; hba = shost_priv(sdev->host); + + ufshcd_hpb_destroy(hba, sdev); + /* Drop the reference as it won't be needed anymore */ if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { unsigned long flags; @@ -7045,6 +7070,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) { int err; + ufshpb_reset_host(hba); /* * Stop the host controller and complete the requests * cleared by h/w @@ -7438,6 +7464,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba) { int err; u8 model_index; + u8 b_ufs_feature_sup; u8 *desc_buf; struct ufs_dev_info *dev_info = &hba->dev_info; @@ -7465,9 +7492,16 @@ static int ufs_get_device_desc(struct ufs_hba *hba) /* getting Specification Version in big endian format */ dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; + b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT]; model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION && + (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) { + dev_info->hpb_enabled = true; + ufshpb_get_dev_info(hba, desc_buf); + } + err = ufshcd_read_string_desc(hba, model_index, &dev_info->model, SD_ASCII_STD); if (err < 0) { @@ -7696,6 +7730,10 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba) else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) hba->dev_info.max_lu_supported = 8; + if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >= + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS) + ufshpb_get_geo_info(hba, desc_buf); + out: kfree(desc_buf); return err; @@ -7838,6 +7876,7 @@ static int ufshcd_add_lus(struct ufs_hba *hba) } ufs_bsg_probe(hba); + ufshpb_init(hba); scsi_scan_host(hba->host); pm_runtime_put_sync(hba->dev); @@ -7983,6 +8022,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); + ufshpb_reset(hba); out: spin_lock_irqsave(hba->host->host_lock, flags); if (ret) @@ -8030,6 +8070,9 @@ out: static const struct attribute_group *ufshcd_driver_groups[] = { &ufs_sysfs_unit_descriptor_group, &ufs_sysfs_lun_attributes_group, +#ifdef CONFIG_SCSI_UFS_HPB + &ufs_sysfs_hpb_stat_group, +#endif NULL, }; @@ -8749,6 +8792,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) req_link_state = UIC_LINK_OFF_STATE; } + ufshpb_suspend(hba); + /* * If we can't transition into any of the low power modes * just gate the clocks. @@ -8867,6 +8912,7 @@ enable_gating: hba->dev_info.b_rpm_dev_flush_capable = false; ufshcd_clear_ua_wluns(hba); ufshcd_release(hba); + ufshpb_resume(hba); out: if (hba->dev_info.b_rpm_dev_flush_capable) { schedule_delayed_work(&hba->rpm_dev_flush_recheck_work, @@ -8966,6 +9012,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); + ufshpb_resume(hba); + if (hba->dev_info.b_rpm_dev_flush_capable) { hba->dev_info.b_rpm_dev_flush_capable = false; cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 6056ea2ccfb6..626105ee723f 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -653,6 +653,25 @@ struct ufs_hba_variant_params { u32 wb_flush_threshold; }; +#ifdef CONFIG_SCSI_UFS_HPB +/** + * struct ufshpb_dev_info - UFSHPB device related info + * @num_lu: the number of user logical unit to check whether all lu finished + * initialization + * @rgn_size: device reported HPB region size + * @srgn_size: device reported HPB sub-region size + * @slave_conf_cnt: counter to check all lu finished initialization + * @hpb_disabled: flag to check if HPB is disabled + */ +struct ufshpb_dev_info { + int num_lu; + int rgn_size; + int srgn_size; + atomic_t slave_conf_cnt; + bool hpb_disabled; +}; +#endif + struct ufs_hba_monitor { unsigned long chunk_size; @@ -863,6 +882,10 @@ struct ufs_hba { bool wb_enabled; struct delayed_work rpm_dev_flush_recheck_work; +#ifdef CONFIG_SCSI_UFS_HPB + struct ufshpb_dev_info ufshpb_dev; +#endif + struct ufs_hba_monitor monitor; #ifdef CONFIG_SCSI_UFS_CRYPTO diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c new file mode 100644 index 000000000000..5dbd730c6be6 --- /dev/null +++ b/drivers/scsi/ufs/ufshpb.c @@ -0,0 +1,569 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Universal Flash Storage Host Performance Booster + * + * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd. + * + * Authors: + * Yongmyung Lee + * Jinyoung Choi + */ + +#include +#include + +#include "ufshcd.h" +#include "ufshpb.h" +#include "../sd.h" + +bool ufshpb_is_allowed(struct ufs_hba *hba) +{ + return !(hba->ufshpb_dev.hpb_disabled); +} + +static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev) +{ + return sdev->hostdata; +} + +static int ufshpb_get_state(struct ufshpb_lu *hpb) +{ + return atomic_read(&hpb->hpb_state); +} + +static void ufshpb_set_state(struct ufshpb_lu *hpb, int state) +{ + atomic_set(&hpb->hpb_state, state); +} + +static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, bool last) +{ + int srgn_idx; + struct ufshpb_subregion *srgn; + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + srgn = rgn->srgn_tbl + srgn_idx; + + srgn->rgn_idx = rgn->rgn_idx; + srgn->srgn_idx = srgn_idx; + srgn->srgn_state = HPB_SRGN_UNUSED; + } + + if (unlikely(last && hpb->last_srgn_entries)) + srgn->is_last = true; +} + +static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, int srgn_cnt) +{ + rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion), + GFP_KERNEL); + if (!rgn->srgn_tbl) + return -ENOMEM; + + rgn->srgn_cnt = srgn_cnt; + return 0; +} + +static void ufshpb_lu_parameter_init(struct ufs_hba *hba, + struct ufshpb_lu *hpb, + struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu_info *hpb_lu_info) +{ + u32 entries_per_rgn; + u64 rgn_mem_size, tmp; + + hpb->lu_pinned_start = hpb_lu_info->pinned_start; + hpb->lu_pinned_end = hpb_lu_info->num_pinned ? + (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1) + : PINNED_NOT_SET; + + rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT + * HPB_ENTRY_SIZE; + do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE); + hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size) + * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE; + + tmp = rgn_mem_size; + do_div(tmp, HPB_ENTRY_SIZE); + entries_per_rgn = (u32)tmp; + hpb->entries_per_rgn_shift = ilog2(entries_per_rgn); + hpb->entries_per_rgn_mask = entries_per_rgn - 1; + + hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE; + hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn); + hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1; + + tmp = rgn_mem_size; + do_div(tmp, hpb->srgn_mem_size); + hpb->srgns_per_rgn = (int)tmp; + + hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, + entries_per_rgn); + hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, + (hpb->srgn_mem_size / HPB_ENTRY_SIZE)); + hpb->last_srgn_entries = hpb_lu_info->num_blocks + % (hpb->srgn_mem_size / HPB_ENTRY_SIZE); + + hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE); +} + +static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn_table, *rgn; + int rgn_idx, i; + int ret = 0; + + rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region), + GFP_KERNEL); + if (!rgn_table) + return -ENOMEM; + + hpb->rgn_tbl = rgn_table; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + int srgn_cnt = hpb->srgns_per_rgn; + bool last_srgn = false; + + rgn = rgn_table + rgn_idx; + rgn->rgn_idx = rgn_idx; + + if (rgn_idx == hpb->rgns_per_lu - 1) { + srgn_cnt = ((hpb->srgns_per_lu - 1) % + hpb->srgns_per_rgn) + 1; + last_srgn = true; + } + + ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt); + if (ret) + goto release_srgn_table; + ufshpb_init_subregion_tbl(hpb, rgn, last_srgn); + + rgn->rgn_state = HPB_RGN_INACTIVE; + } + + return 0; + +release_srgn_table: + for (i = 0; i < rgn_idx; i++) { + rgn = rgn_table + i; + kvfree(rgn->srgn_tbl); + } + kvfree(rgn_table); + return ret; +} + +static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + int srgn_idx; + + for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { + struct ufshpb_subregion *srgn; + + srgn = rgn->srgn_tbl + srgn_idx; + srgn->srgn_state = HPB_SRGN_UNUSED; + } +} + +static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb) +{ + int rgn_idx; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn; + + rgn = hpb->rgn_tbl + rgn_idx; + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + rgn->rgn_state = HPB_RGN_INACTIVE; + + ufshpb_destroy_subregion_tbl(hpb, rgn); + } + + kvfree(rgn->srgn_tbl); + } + + kvfree(hpb->rgn_tbl); +} + +/* SYSFS functions */ +#define ufshpb_sysfs_attr_show_func(__name) \ +static ssize_t __name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \ + \ + if (!hpb) \ + return -ENODEV; \ + \ + return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \ +} \ +\ +static DEVICE_ATTR_RO(__name) + +ufshpb_sysfs_attr_show_func(hit_cnt); +ufshpb_sysfs_attr_show_func(miss_cnt); +ufshpb_sysfs_attr_show_func(rb_noti_cnt); +ufshpb_sysfs_attr_show_func(rb_active_cnt); +ufshpb_sysfs_attr_show_func(rb_inactive_cnt); +ufshpb_sysfs_attr_show_func(map_req_cnt); + +static struct attribute *hpb_dev_attrs[] = { + &dev_attr_hit_cnt.attr, + &dev_attr_miss_cnt.attr, + &dev_attr_rb_noti_cnt.attr, + &dev_attr_rb_active_cnt.attr, + &dev_attr_rb_inactive_cnt.attr, + &dev_attr_map_req_cnt.attr, + NULL, +}; + +struct attribute_group ufs_sysfs_hpb_stat_group = { + .name = "hpb_stats", + .attrs = hpb_dev_attrs, +}; + +static void ufshpb_stat_init(struct ufshpb_lu *hpb) +{ + hpb->stats.hit_cnt = 0; + hpb->stats.miss_cnt = 0; + hpb->stats.rb_noti_cnt = 0; + hpb->stats.rb_active_cnt = 0; + hpb->stats.rb_inactive_cnt = 0; + hpb->stats.map_req_cnt = 0; +} + +static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) +{ + int ret; + + ret = ufshpb_alloc_region_tbl(hba, hpb); + + ufshpb_stat_init(hpb); + + return 0; +} + +static struct ufshpb_lu * +ufshpb_alloc_hpb_lu(struct ufs_hba *hba, int lun, + struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu_info *hpb_lu_info) +{ + struct ufshpb_lu *hpb; + int ret; + + hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL); + if (!hpb) + return NULL; + + hpb->lun = lun; + + ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info); + + ret = ufshpb_lu_hpb_init(hba, hpb); + if (ret) { + dev_err(hba->dev, "hpb lu init failed. ret %d", ret); + goto release_hpb; + } + + return hpb; + +release_hpb: + kfree(hpb); + return NULL; +} + +static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba) +{ + int err = 0; + bool flag_res = true; + int try; + + /* wait for the device to complete HPB reset query */ + for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { + dev_dbg(hba->dev, + "%s start flag reset polling %d times\n", + __func__, try); + + /* Poll fHpbReset flag to be cleared */ + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res); + + if (err) { + dev_err(hba->dev, + "%s reading fHpbReset flag failed with error %d\n", + __func__, err); + return flag_res; + } + + if (!flag_res) + goto out; + + usleep_range(1000, 1100); + } + if (flag_res) { + dev_err(hba->dev, + "%s fHpbReset was not cleared by the device\n", + __func__); + } +out: + return flag_res; +} + +void ufshpb_reset(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = sdev->hostdata; + if (!hpb) + continue; + + if (ufshpb_get_state(hpb) != HPB_RESET) + continue; + + ufshpb_set_state(hpb, HPB_PRESENT); + } +} + +void ufshpb_reset_host(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = sdev->hostdata; + if (!hpb) + continue; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) + continue; + ufshpb_set_state(hpb, HPB_RESET); + } +} + +void ufshpb_suspend(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = sdev->hostdata; + if (!hpb) + continue; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) + continue; + ufshpb_set_state(hpb, HPB_SUSPEND); + } +} + +void ufshpb_resume(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = sdev->hostdata; + if (!hpb) + continue; + + if ((ufshpb_get_state(hpb) != HPB_PRESENT) && + (ufshpb_get_state(hpb) != HPB_SUSPEND)) + continue; + ufshpb_set_state(hpb, HPB_PRESENT); + } +} + +static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun, + struct ufshpb_lu_info *hpb_lu_info) +{ + u16 max_active_rgns; + u8 lu_enable; + int size; + int ret; + char desc_buf[QUERY_DESC_MAX_SIZE]; + + ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size); + + pm_runtime_get_sync(hba->dev); + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + QUERY_DESC_IDN_UNIT, lun, 0, + desc_buf, &size); + pm_runtime_put_sync(hba->dev); + + if (ret) { + dev_err(hba->dev, + "%s: idn: %d lun: %d query request failed", + __func__, QUERY_DESC_IDN_UNIT, lun); + return ret; + } + + lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE]; + if (lu_enable != LU_ENABLED_HPB_FUNC) + return -ENODEV; + + max_active_rgns = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS); + if (!max_active_rgns) { + dev_err(hba->dev, + "lun %d wrong number of max active regions\n", lun); + return -ENODEV; + } + + hpb_lu_info->num_blocks = get_unaligned_be64( + desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT); + hpb_lu_info->pinned_start = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF); + hpb_lu_info->num_pinned = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS); + hpb_lu_info->max_active_rgns = max_active_rgns; + + return 0; +} + +void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct ufshpb_lu *hpb = sdev->hostdata; + + if (!hpb) + return; + + ufshpb_set_state(hpb, HPB_FAILED); + + sdev = hpb->sdev_ufs_lu; + sdev->hostdata = NULL; + + ufshpb_destroy_region_tbl(hpb); + + list_del_init(&hpb->list_hpb_lu); + + kfree(hpb); +} + +static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + bool init_success; + + init_success = !ufshpb_check_hpb_reset_query(hba); + + shost_for_each_device(sdev, hba->host) { + hpb = sdev->hostdata; + if (!hpb) + continue; + + if (init_success) { + ufshpb_set_state(hpb, HPB_PRESENT); + } else { + dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun); + ufshpb_destroy_lu(hba, sdev); + } + } +} + +void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct ufshpb_lu *hpb; + int ret; + struct ufshpb_lu_info hpb_lu_info = { 0 }; + int lun = sdev->lun; + + if (lun >= hba->dev_info.max_lu_supported) + goto out; + + ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info); + if (ret) + goto out; + + hpb = ufshpb_alloc_hpb_lu(hba, lun, &hba->ufshpb_dev, + &hpb_lu_info); + if (!hpb) + goto out; + + hpb->sdev_ufs_lu = sdev; + sdev->hostdata = hpb; + +out: + /* All LUs are initialized */ + if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt)) + ufshpb_hpb_lu_prepared(hba); +} + +void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) +{ + struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev; + int max_active_rgns = 0; + int hpb_num_lu; + + hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU]; + if (hpb_num_lu == 0) { + dev_err(hba->dev, "No HPB LU supported\n"); + hpb_info->hpb_disabled = true; + return; + } + + hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE]; + hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE]; + max_active_rgns = get_unaligned_be16(geo_buf + + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS); + + if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 || + max_active_rgns == 0) { + dev_err(hba->dev, "No HPB supported device\n"); + hpb_info->hpb_disabled = true; + return; + } +} + +void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int version; + u8 hpb_mode; + + hpb_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; + if (hpb_mode == HPB_HOST_CONTROL) { + dev_err(hba->dev, "%s: host control mode is not supported.\n", + __func__); + hpb_dev_info->hpb_disabled = true; + return; + } + + version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER); + if (version != HPB_SUPPORT_VERSION) { + dev_err(hba->dev, "%s: HPB %x version is not supported.\n", + __func__, version); + hpb_dev_info->hpb_disabled = true; + return; + } + + /* + * Get the number of user logical unit to check whether all + * scsi_device finish initialization + */ + hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU]; +} + +void ufshpb_init(struct ufs_hba *hba) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int try; + int ret; + + if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled) + return; + + atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu); + /* issue HPB reset query */ + for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { + ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_HPB_RESET, 0, NULL); + if (!ret) + break; + } +} diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h new file mode 100644 index 000000000000..fa311ed3fa94 --- /dev/null +++ b/drivers/scsi/ufs/ufshpb.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Universal Flash Storage Host Performance Booster + * + * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd. + * + * Authors: + * Yongmyung Lee + * Jinyoung Choi + */ + +#ifndef _UFSHPB_H_ +#define _UFSHPB_H_ + +/* hpb response UPIU macro */ +#define HPB_RSP_NONE 0x0 +#define HPB_RSP_REQ_REGION_UPDATE 0x1 +#define HPB_RSP_DEV_RESET 0x2 +#define MAX_ACTIVE_NUM 2 +#define MAX_INACTIVE_NUM 2 +#define DEV_DATA_SEG_LEN 0x14 +#define DEV_SENSE_SEG_LEN 0x12 +#define DEV_DES_TYPE 0x80 +#define DEV_ADDITIONAL_LEN 0x10 + +/* hpb map & entries macro */ +#define HPB_RGN_SIZE_UNIT 512 +#define HPB_ENTRY_BLOCK_SIZE 4096 +#define HPB_ENTRY_SIZE 0x8 +#define PINNED_NOT_SET U32_MAX + +/* hpb support chunk size */ +#define HPB_MULTI_CHUNK_HIGH 1 + +/* hpb vender defined opcode */ +#define UFSHPB_READ 0xF8 +#define UFSHPB_READ_BUFFER 0xF9 +#define UFSHPB_READ_BUFFER_ID 0x01 +#define HPB_READ_BUFFER_CMD_LENGTH 10 +#define LU_ENABLED_HPB_FUNC 0x02 + +#define HPB_RESET_REQ_RETRIES 10 + +#define HPB_SUPPORT_VERSION 0x100 + +enum UFSHPB_MODE { + HPB_HOST_CONTROL, + HPB_DEVICE_CONTROL, +}; + +enum UFSHPB_STATE { + HPB_INIT = 0, + HPB_PRESENT = 1, + HPB_SUSPEND, + HPB_FAILED, + HPB_RESET, +}; + +enum HPB_RGN_STATE { + HPB_RGN_INACTIVE, + HPB_RGN_ACTIVE, + /* pinned regions are always active */ + HPB_RGN_PINNED, +}; + +enum HPB_SRGN_STATE { + HPB_SRGN_UNUSED, + HPB_SRGN_INVALID, + HPB_SRGN_VALID, + HPB_SRGN_ISSUED, +}; + +/** + * struct ufshpb_lu_info - UFSHPB logical unit related info + * @num_blocks: the number of logical block + * @pinned_start: the start region number of pinned region + * @num_pinned: the number of pinned regions + * @max_active_rgns: maximum number of active regions + */ +struct ufshpb_lu_info { + int num_blocks; + int pinned_start; + int num_pinned; + int max_active_rgns; +}; + +struct ufshpb_subregion { + enum HPB_SRGN_STATE srgn_state; + int rgn_idx; + int srgn_idx; + bool is_last; +}; + +struct ufshpb_region { + struct ufshpb_subregion *srgn_tbl; + enum HPB_RGN_STATE rgn_state; + int rgn_idx; + int srgn_cnt; +}; + +struct ufshpb_stats { + u64 hit_cnt; + u64 miss_cnt; + u64 rb_noti_cnt; + u64 rb_active_cnt; + u64 rb_inactive_cnt; + u64 map_req_cnt; +}; + +struct ufshpb_lu { + int lun; + struct scsi_device *sdev_ufs_lu; + struct ufshpb_region *rgn_tbl; + + atomic_t hpb_state; + + /* pinned region information */ + u32 lu_pinned_start; + u32 lu_pinned_end; + + /* HPB related configuration */ + u32 rgns_per_lu; + u32 srgns_per_lu; + u32 last_srgn_entries; + int srgns_per_rgn; + u32 srgn_mem_size; + u32 entries_per_rgn_mask; + u32 entries_per_rgn_shift; + u32 entries_per_srgn; + u32 entries_per_srgn_mask; + u32 entries_per_srgn_shift; + u32 pages_per_srgn; + + struct ufshpb_stats stats; + + struct list_head list_hpb_lu; +}; + +struct ufs_hba; +struct ufshcd_lrb; + +#ifndef CONFIG_SCSI_UFS_HPB +static void ufshpb_resume(struct ufs_hba *hba) {} +static void ufshpb_suspend(struct ufs_hba *hba) {} +static void ufshpb_reset(struct ufs_hba *hba) {} +static void ufshpb_reset_host(struct ufs_hba *hba) {} +static void ufshpb_init(struct ufs_hba *hba) {} +static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; } +static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {} +static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {} +#else +void ufshpb_resume(struct ufs_hba *hba); +void ufshpb_suspend(struct ufs_hba *hba); +void ufshpb_reset(struct ufs_hba *hba); +void ufshpb_reset_host(struct ufs_hba *hba); +void ufshpb_init(struct ufs_hba *hba); +void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev); +void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev); +bool ufshpb_is_allowed(struct ufs_hba *hba); +void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf); +void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf); +extern struct attribute_group ufs_sysfs_hpb_stat_group; +#endif + +#endif /* End of Header */ From 466f620a319c8a0684b46d26d5b37d2140fb8479 Mon Sep 17 00:00:00 2001 From: Daejun Park Date: Wed, 16 Jun 2021 17:37:07 +0900 Subject: [PATCH 27/62] FROMLIST: scsi: ufs: L2P map management for HPB read This is a patch for managing L2P map in HPB module. The HPB divides logical addresses into several regions. A region consists of several sub-regions. The sub-region is a basic unit where L2P mapping is managed. The driver loads L2P mapping data of each sub-region. The loaded sub-region is called active-state. The HPB driver unloads L2P mapping data as region unit. The unloaded region is called inactive-state. Sub-region/region candidates to be loaded and unloaded are delivered from the UFS device. The UFS device delivers the recommended active sub-region and inactivate region to the driver using sensedata. The HPB module performs L2P mapping management on the host through the delivered information. A pinned region is a pre-set regions on the UFS device that is always activate-state. The data structure for map data request and L2P map uses mempool API, minimizing allocation overhead while avoiding static allocation. The mininum size of the memory pool used in the HPB is implemented as a module parameter, so that it can be configurable by the user. To gurantee a minimum memory pool size of 4MB: ufshpb_host_map_kbytes=4096 The map_work manages active/inactive by 2 "to-do" lists. Each hpb lun maintains 2 "to-do" lists: hpb->lh_inact_rgn - regions to be inactivated, and hpb->lh_act_srgn - subregions to be activated Those lists are maintained on IO completion. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/linux-scsi/20210616070848epcms2p2819a1f0bf96cdcc357842fe8500af633@epcms2p2/ Reviewed-by: Greg Kroah-Hartman Reviewed-by: Bart Van Assche Reviewed-by: Can Guo Reviewed-by: Bean Huo Reviewed-by: Stanley Chu Acked-by: Avri Altman Tested-by: Bean Huo Tested-by: Can Guo Tested-by: Stanley Chu Signed-off-by: Daejun Park Change-Id: I1284f326332e2d6f2c1221e2d64160939614ad2d --- drivers/scsi/ufs/ufs.h | 36 ++ drivers/scsi/ufs/ufshcd.c | 4 + drivers/scsi/ufs/ufshpb.c | 1088 ++++++++++++++++++++++++++++++++++++- drivers/scsi/ufs/ufshpb.h | 65 +++ 4 files changed, 1178 insertions(+), 15 deletions(-) diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 12ee42ef4eb8..0a504dcf8290 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -472,6 +472,41 @@ struct utp_cmd_rsp { u8 sense_data[UFS_SENSE_SIZE]; }; +struct ufshpb_active_field { + __be16 active_rgn; + __be16 active_srgn; +}; +#define HPB_ACT_FIELD_SIZE 4 + +/** + * struct utp_hpb_rsp - Response UPIU structure + * @residual_transfer_count: Residual transfer count DW-3 + * @reserved1: Reserved double words DW-4 to DW-7 + * @sense_data_len: Sense data length DW-8 U16 + * @desc_type: Descriptor type of sense data + * @additional_len: Additional length of sense data + * @hpb_op: HPB operation type + * @lun: LUN of response UPIU + * @active_rgn_cnt: Active region count + * @inactive_rgn_cnt: Inactive region count + * @hpb_active_field: Recommended to read HPB region and subregion + * @hpb_inactive_field: To be inactivated HPB region and subregion + */ +struct utp_hpb_rsp { + __be32 residual_transfer_count; + __be32 reserved1[4]; + __be16 sense_data_len; + u8 desc_type; + u8 additional_len; + u8 hpb_op; + u8 lun; + u8 active_rgn_cnt; + u8 inactive_rgn_cnt; + struct ufshpb_active_field hpb_active_field[2]; + __be16 hpb_inactive_field[2]; +}; +#define UTP_HPB_RSP_SIZE 40 + /** * struct utp_upiu_rsp - general upiu response structure * @header: UPIU header structure DW-0 to DW-2 @@ -482,6 +517,7 @@ struct utp_upiu_rsp { struct utp_upiu_header header; union { struct utp_cmd_rsp sr; + struct utp_hpb_rsp hr; struct utp_upiu_query qr; }; }; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 8d3283c82b97..f19a7abb659f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -5075,6 +5075,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) */ pm_runtime_get_noresume(hba->dev); } + + if (scsi_status == SAM_STAT_GOOD) + ufshpb_rsp_upiu(hba, lrbp); break; case UPIU_TRANSACTION_REJECT_UPIU: /* TODO: handle Reject UPIU Response */ @@ -9244,6 +9247,7 @@ EXPORT_SYMBOL(ufshcd_shutdown); void ufshcd_remove(struct ufs_hba *hba) { ufs_bsg_remove(hba); + ufshpb_remove(hba); ufs_sysfs_remove_nodes(hba->dev); blk_cleanup_queue(hba->tmf_queue); blk_mq_free_tag_set(&hba->tmf_tag_set); diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 5dbd730c6be6..58bb02d82b97 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -16,6 +16,16 @@ #include "ufshpb.h" #include "../sd.h" +/* memory management */ +static struct kmem_cache *ufshpb_mctx_cache; +static mempool_t *ufshpb_mctx_pool; +static mempool_t *ufshpb_page_pool; +/* A cache size of 2MB can cache ppn in the 1GB range. */ +static unsigned int ufshpb_host_map_kbytes = 2048; +static int tot_active_srgn_pages; + +static struct workqueue_struct *ufshpb_wq; + bool ufshpb_is_allowed(struct ufs_hba *hba) { return !(hba->ufshpb_dev.hpb_disabled); @@ -36,14 +46,889 @@ static void ufshpb_set_state(struct ufshpb_lu *hpb, int state) atomic_set(&hpb->hpb_state, state); } +static bool ufshpb_is_general_lun(int lun) +{ + return lun < UFS_UPIU_MAX_UNIT_NUM_ID; +} + +static bool +ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx) +{ + if (hpb->lu_pinned_end != PINNED_NOT_SET && + rgn_idx >= hpb->lu_pinned_start && + rgn_idx <= hpb->lu_pinned_end) + return true; + + return false; +} + +static void ufshpb_kick_map_work(struct ufshpb_lu *hpb) +{ + bool ret = false; + unsigned long flags; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) + return; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn)) + ret = true; + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + if (ret) + queue_work(ufshpb_wq, &hpb->map_work); +} + +static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, + struct utp_hpb_rsp *rsp_field) +{ + /* Check HPB_UPDATE_ALERT */ + if (!(lrbp->ucd_rsp_ptr->header.dword_2 & + UPIU_HEADER_DWORD(0, 2, 0, 0))) + return false; + + if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN || + rsp_field->desc_type != DEV_DES_TYPE || + rsp_field->additional_len != DEV_ADDITIONAL_LEN || + rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM || + rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM || + rsp_field->hpb_op == HPB_RSP_NONE || + (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE && + !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt)) + return false; + + if (!ufshpb_is_general_lun(rsp_field->lun)) { + dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n", + lrbp->lun); + return false; + } + + return true; +} + +static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + struct request *req; + struct bio *bio; + int retries = HPB_MAP_REQ_RETRIES; + + map_req = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL); + if (!map_req) + return NULL; + +retry: + req = blk_get_request(hpb->sdev_ufs_lu->request_queue, + REQ_OP_SCSI_IN, BLK_MQ_REQ_NOWAIT); + + if ((PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { + usleep_range(3000, 3100); + goto retry; + } + + if (IS_ERR(req)) + goto free_map_req; + + bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn); + if (!bio) { + blk_put_request(req); + goto free_map_req; + } + + map_req->hpb = hpb; + map_req->req = req; + map_req->bio = bio; + + map_req->rgn_idx = srgn->rgn_idx; + map_req->srgn_idx = srgn->srgn_idx; + map_req->mctx = srgn->mctx; + + return map_req; + +free_map_req: + kmem_cache_free(hpb->map_req_cache, map_req); + return NULL; +} + +static void ufshpb_put_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req) +{ + bio_put(map_req->bio); + blk_put_request(map_req->req); + kmem_cache_free(hpb->map_req_cache, map_req); +} + +static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + u32 num_entries = hpb->entries_per_srgn; + + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + return -1; + } + + if (unlikely(srgn->is_last)) + num_entries = hpb->last_srgn_entries; + + bitmap_zero(srgn->mctx->ppn_dirty, num_entries); + return 0; +} + +static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + list_del_init(&rgn->list_inact_rgn); + + if (list_empty(&srgn->list_act_srgn)) + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); +} + +static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int srgn_idx; + + rgn = hpb->rgn_tbl + rgn_idx; + + for_each_sub_region(rgn, srgn_idx, srgn) + list_del_init(&srgn->list_act_srgn); + + if (list_empty(&rgn->list_inact_rgn)) + list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn); +} + +static void ufshpb_activate_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + + /* + * If there is no mctx in subregion + * after I/O progress for HPB_READ_BUFFER, the region to which the + * subregion belongs was evicted. + * Make sure the region must not evict in I/O progress + */ + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + srgn->srgn_state = HPB_SRGN_INVALID; + return; + } + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + + if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "region %d subregion %d evicted\n", + srgn->rgn_idx, srgn->srgn_idx); + srgn->srgn_state = HPB_SRGN_INVALID; + return; + } + srgn->srgn_state = HPB_SRGN_VALID; +} + +static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data; + struct ufshpb_lu *hpb = map_req->hpb; + struct ufshpb_subregion *srgn; + unsigned long flags; + + srgn = hpb->rgn_tbl[map_req->rgn_idx].srgn_tbl + + map_req->srgn_idx; + + ufshpb_clear_dirty_bitmap(hpb, srgn); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_activate_subregion(hpb, srgn); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + ufshpb_put_map_req(map_req->hpb, map_req); +} + +static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx, + int srgn_idx, int srgn_mem_size) +{ + cdb[0] = UFSHPB_READ_BUFFER; + cdb[1] = UFSHPB_READ_BUFFER_ID; + + put_unaligned_be16(rgn_idx, &cdb[2]); + put_unaligned_be16(srgn_idx, &cdb[4]); + put_unaligned_be24(srgn_mem_size, &cdb[6]); + + cdb[9] = 0x00; +} + +static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req, bool last) +{ + struct request_queue *q; + struct request *req; + struct scsi_request *rq; + int mem_size = hpb->srgn_mem_size; + int ret = 0; + int i; + + q = hpb->sdev_ufs_lu->request_queue; + for (i = 0; i < hpb->pages_per_srgn; i++) { + ret = bio_add_pc_page(q, map_req->bio, map_req->mctx->m_page[i], + PAGE_SIZE, 0); + if (ret != PAGE_SIZE) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "bio_add_pc_page fail %d - %d\n", + map_req->rgn_idx, map_req->srgn_idx); + return ret; + } + } + + req = map_req->req; + + blk_rq_append_bio(req, &map_req->bio); + + req->end_io_data = map_req; + + rq = scsi_req(req); + + if (unlikely(last)) + mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE; + + ufshpb_set_read_buf_cmd(rq->cmd, map_req->rgn_idx, + map_req->srgn_idx, mem_size); + rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH; + + blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn); + + hpb->stats.map_req_cnt++; + return 0; +} + +static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb, + bool last) +{ + struct ufshpb_map_ctx *mctx; + u32 num_entries = hpb->entries_per_srgn; + int i, j; + + mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL); + if (!mctx) + return NULL; + + mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL); + if (!mctx->m_page) + goto release_mctx; + + if (unlikely(last)) + num_entries = hpb->last_srgn_entries; + + mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL); + if (!mctx->ppn_dirty) + goto release_m_page; + + for (i = 0; i < hpb->pages_per_srgn; i++) { + mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL); + if (!mctx->m_page[i]) { + for (j = 0; j < i; j++) + mempool_free(mctx->m_page[j], ufshpb_page_pool); + goto release_ppn_dirty; + } + clear_page(page_address(mctx->m_page[i])); + } + + return mctx; + +release_ppn_dirty: + bitmap_free(mctx->ppn_dirty); +release_m_page: + kmem_cache_free(hpb->m_page_cache, mctx->m_page); +release_mctx: + mempool_free(mctx, ufshpb_mctx_pool); + return NULL; +} + +static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx) +{ + int i; + + for (i = 0; i < hpb->pages_per_srgn; i++) + mempool_free(mctx->m_page[i], ufshpb_page_pool); + + bitmap_free(mctx->ppn_dirty); + kmem_cache_free(hpb->m_page_cache, mctx->m_page); + mempool_free(mctx, ufshpb_mctx_pool); +} + +static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (srgn->srgn_state == HPB_SRGN_ISSUED) + return -EPERM; + + return 0; +} + +static void ufshpb_add_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + rgn->rgn_state = HPB_RGN_ACTIVE; + list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); + atomic_inc(&lru_info->active_cnt); +} + +static void ufshpb_hit_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); +} + +static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) +{ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn, *victim_rgn = NULL; + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { + if (!rgn) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: no region allocated\n", + __func__); + return NULL; + } + if (ufshpb_check_srgns_issue_state(hpb, rgn)) + continue; + + victim_rgn = rgn; + break; + } + + return victim_rgn; +} + +static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_del_init(&rgn->list_lru_rgn); + rgn->rgn_state = HPB_RGN_INACTIVE; + atomic_dec(&lru_info->active_cnt); +} + +static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + if (srgn->srgn_state != HPB_SRGN_UNUSED) { + ufshpb_put_map_ctx(hpb, srgn->mctx); + srgn->srgn_state = HPB_SRGN_UNUSED; + srgn->mctx = NULL; + } +} + +static void __ufshpb_evict_region(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct victim_select_info *lru_info; + struct ufshpb_subregion *srgn; + int srgn_idx; + + lru_info = &hpb->lru_info; + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx); + + ufshpb_cleanup_lru_info(lru_info, rgn); + + for_each_sub_region(rgn, srgn_idx, srgn) + ufshpb_purge_active_subregion(hpb, srgn); +} + +static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (rgn->rgn_state == HPB_RGN_PINNED) { + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "pinned region cannot drop-out. region %d\n", + rgn->rgn_idx); + goto out; + } + if (!list_empty(&rgn->list_lru_rgn)) { + if (ufshpb_check_srgns_issue_state(hpb, rgn)) { + ret = -EBUSY; + goto out; + } + + __ufshpb_evict_region(hpb, rgn); + } +out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return ret; +} + +static int ufshpb_issue_map_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + unsigned long flags; + int ret; + int err = -EAGAIN; + bool alloc_required = false; + enum HPB_SRGN_STATE state = HPB_SRGN_INVALID; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + goto unlock_out; + } + + if ((rgn->rgn_state == HPB_RGN_INACTIVE) && + (srgn->srgn_state == HPB_SRGN_INVALID)) { + err = 0; + goto unlock_out; + } + + if (srgn->srgn_state == HPB_SRGN_UNUSED) + alloc_required = true; + + /* + * If the subregion is already ISSUED state, + * a specific event (e.g., GC or wear-leveling, etc.) occurs in + * the device and HPB response for map loading is received. + * In this case, after finishing the HPB_READ_BUFFER, + * the next HPB_READ_BUFFER is performed again to obtain the latest + * map data. + */ + if (srgn->srgn_state == HPB_SRGN_ISSUED) + goto unlock_out; + + srgn->srgn_state = HPB_SRGN_ISSUED; + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + if (alloc_required) { + srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "get map_ctx failed. region %d - %d\n", + rgn->rgn_idx, srgn->srgn_idx); + state = HPB_SRGN_UNUSED; + goto change_srgn_state; + } + } + + map_req = ufshpb_get_map_req(hpb, srgn); + if (!map_req) + goto change_srgn_state; + + + ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last); + if (ret) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: issue map_req failed: %d, region %d - %d\n", + __func__, ret, srgn->rgn_idx, srgn->srgn_idx); + goto free_map_req; + } + return 0; + +free_map_req: + ufshpb_put_map_req(hpb, map_req); +change_srgn_state: + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + srgn->srgn_state = state; +unlock_out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return err; +} + +static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + struct ufshpb_region *victim_rgn; + struct victim_select_info *lru_info = &hpb->lru_info; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + /* + * If region belongs to lru_list, just move the region + * to the front of lru list. because the state of the region + * is already active-state + */ + if (!list_empty(&rgn->list_lru_rgn)) { + ufshpb_hit_lru_info(lru_info, rgn); + goto out; + } + + if (rgn->rgn_state == HPB_RGN_INACTIVE) { + if (atomic_read(&lru_info->active_cnt) == + lru_info->max_lru_active_cnt) { + /* + * If the maximum number of active regions + * is exceeded, evict the least recently used region. + * This case may occur when the device responds + * to the eviction information late. + * It is okay to evict the least recently used region, + * because the device could detect this region + * by not issuing HPB_READ + */ + victim_rgn = ufshpb_victim_lru_info(hpb); + if (!victim_rgn) { + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "cannot get victim region error\n"); + ret = -ENOMEM; + goto out; + } + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "LRU full (%d), choose victim %d\n", + atomic_read(&lru_info->active_cnt), + victim_rgn->rgn_idx); + __ufshpb_evict_region(hpb, victim_rgn); + } + + /* + * When a region is added to lru_info list_head, + * it is guaranteed that the subregion has been + * assigned all mctx. If failed, try to receive mctx again + * without being added to lru_info list_head + */ + ufshpb_add_lru_info(lru_info, rgn); + } +out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return ret; +} + +static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, + struct utp_hpb_rsp *rsp_field) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int i, rgn_i, srgn_i; + + BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE); + /* + * If the active region and the inactive region are the same, + * we will inactivate this region. + * The device could check this (region inactivated) and + * will response the proper active region information + */ + for (i = 0; i < rsp_field->active_rgn_cnt; i++) { + rgn_i = + be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn); + srgn_i = + be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn); + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "activate(%d) region %d - %d\n", i, rgn_i, srgn_i); + + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_active_info(hpb, rgn_i, srgn_i); + spin_unlock(&hpb->rsp_list_lock); + + rgn = hpb->rgn_tbl + rgn_i; + srgn = rgn->srgn_tbl + srgn_i; + + /* blocking HPB_READ */ + spin_lock(&hpb->rgn_state_lock); + if (srgn->srgn_state == HPB_SRGN_VALID) + srgn->srgn_state = HPB_SRGN_INVALID; + spin_unlock(&hpb->rgn_state_lock); + hpb->stats.rb_active_cnt++; + } + + for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) { + rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]); + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "inactivate(%d) region %d\n", i, rgn_i); + + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_inactive_info(hpb, rgn_i); + spin_unlock(&hpb->rsp_list_lock); + + rgn = hpb->rgn_tbl + rgn_i; + + spin_lock(&hpb->rgn_state_lock); + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) { + srgn = rgn->srgn_tbl + srgn_i; + if (srgn->srgn_state == HPB_SRGN_VALID) + srgn->srgn_state = HPB_SRGN_INVALID; + } + } + spin_unlock(&hpb->rgn_state_lock); + + hpb->stats.rb_inactive_cnt++; + } + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n", + rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt); + + if (ufshpb_get_state(hpb) == HPB_PRESENT) + queue_work(ufshpb_wq, &hpb->map_work); +} + +/* + * This function will parse recommended active subregion information in sense + * data field of response UPIU with SAM_STAT_GOOD state. + */ +void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device); + struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr; + int data_seg_len; + + if (unlikely(lrbp->lun != rsp_field->lun)) { + struct scsi_device *sdev; + bool found = false; + + __shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + + if (!hpb) + continue; + + if (rsp_field->lun == hpb->lun) { + found = true; + break; + } + } + + if (!found) + return; + } + + if (!hpb) + return; + + if (ufshpb_get_state(hpb) == HPB_INIT) + return; + + if ((ufshpb_get_state(hpb) != HPB_PRESENT) && + (ufshpb_get_state(hpb) != HPB_SUSPEND)) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT/SUSPEND\n", + __func__); + return; + } + + data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) + & MASK_RSP_UPIU_DATA_SEG_LEN; + + /* To flush remained rsp_list, we queue the map_work task */ + if (!data_seg_len) { + if (!ufshpb_is_general_lun(hpb->lun)) + return; + + ufshpb_kick_map_work(hpb); + return; + } + + BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE); + + if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field)) + return; + + hpb->stats.rb_noti_cnt++; + + switch (rsp_field->hpb_op) { + case HPB_RSP_REQ_REGION_UPDATE: + if (data_seg_len != DEV_DATA_SEG_LEN) + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "%s: data seg length is not same.\n", + __func__); + ufshpb_rsp_req_region_update(hpb, rsp_field); + break; + case HPB_RSP_DEV_RESET: + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "UFS device lost HPB information during PM.\n"); + break; + default: + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "hpb_op is not available: %d\n", + rsp_field->hpb_op); + break; + } +} + +static void ufshpb_add_active_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + if (!list_empty(&rgn->list_inact_rgn)) + return; + + if (!list_empty(&srgn->list_act_srgn)) { + list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn); + return; + } + + list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn); +} + +static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct list_head *pending_list) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + if (!list_empty(&rgn->list_inact_rgn)) + return; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (!list_empty(&srgn->list_act_srgn)) + return; + + list_add_tail(&rgn->list_inact_rgn, pending_list); +} + +static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn, + struct ufshpb_subregion, + list_act_srgn))) { + if (ufshpb_get_state(hpb) == HPB_SUSPEND) + break; + + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + ret = ufshpb_add_region(hpb, rgn); + if (ret) + goto active_failed; + + ret = ufshpb_issue_map_req(hpb, rgn, srgn); + if (ret) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "issue map_req failed. ret %d, region %d - %d\n", + ret, rgn->rgn_idx, srgn->srgn_idx); + goto active_failed; + } + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + return; + +active_failed: + dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n", + rgn->rgn_idx, srgn->srgn_idx); + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_active_list(hpb, rgn, srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + unsigned long flags; + int ret; + LIST_HEAD(pending_list); + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn, + struct ufshpb_region, + list_inact_rgn))) { + if (ufshpb_get_state(hpb) == HPB_SUSPEND) + break; + + list_del_init(&rgn->list_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + ret = ufshpb_evict_region(hpb, rgn); + if (ret) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_pending_evict_list(hpb, rgn, &pending_list); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + + list_splice(&pending_list, &hpb->lh_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_map_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work); + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + return; + } + + ufshpb_run_inactive_region_list(hpb); + ufshpb_run_active_subregion_list(hpb); +} + +/* + * this function doesn't need to hold lock due to be called in init. + * (rgn_state_lock, rsp_list_lock, etc..) + */ +static int ufshpb_init_pinned_active_region(struct ufs_hba *hba, + struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx, i; + int err = 0; + + for_each_sub_region(rgn, srgn_idx, srgn) { + srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); + srgn->srgn_state = HPB_SRGN_INVALID; + if (!srgn->mctx) { + err = -ENOMEM; + dev_err(hba->dev, + "alloc mctx for pinned region failed\n"); + goto release; + } + + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); + } + + rgn->rgn_state = HPB_RGN_PINNED; + return 0; + +release: + for (i = 0; i < srgn_idx; i++) { + srgn = rgn->srgn_tbl + i; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } + return err; +} + static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb, struct ufshpb_region *rgn, bool last) { int srgn_idx; struct ufshpb_subregion *srgn; - for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { - srgn = rgn->srgn_tbl + srgn_idx; + for_each_sub_region(rgn, srgn_idx, srgn) { + INIT_LIST_HEAD(&srgn->list_act_srgn); srgn->rgn_idx = rgn->rgn_idx; srgn->srgn_idx = srgn_idx; @@ -78,6 +963,8 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba, hpb->lu_pinned_end = hpb_lu_info->num_pinned ? (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1) : PINNED_NOT_SET; + hpb->lru_info.max_lru_active_cnt = + hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned; rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT * HPB_ENTRY_SIZE; @@ -129,6 +1016,9 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) rgn = rgn_table + rgn_idx; rgn->rgn_idx = rgn_idx; + INIT_LIST_HEAD(&rgn->list_inact_rgn); + INIT_LIST_HEAD(&rgn->list_lru_rgn); + if (rgn_idx == hpb->rgns_per_lu - 1) { srgn_cnt = ((hpb->srgns_per_lu - 1) % hpb->srgns_per_rgn) + 1; @@ -140,7 +1030,13 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) goto release_srgn_table; ufshpb_init_subregion_tbl(hpb, rgn, last_srgn); - rgn->rgn_state = HPB_RGN_INACTIVE; + if (ufshpb_is_pinned_region(hpb, rgn_idx)) { + ret = ufshpb_init_pinned_active_region(hba, hpb, rgn); + if (ret) + goto release_srgn_table; + } else { + rgn->rgn_state = HPB_RGN_INACTIVE; + } } return 0; @@ -158,13 +1054,13 @@ static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) { int srgn_idx; + struct ufshpb_subregion *srgn; - for (srgn_idx = 0; srgn_idx < rgn->srgn_cnt; srgn_idx++) { - struct ufshpb_subregion *srgn; - - srgn = rgn->srgn_tbl + srgn_idx; - srgn->srgn_state = HPB_SRGN_UNUSED; - } + for_each_sub_region(rgn, srgn_idx, srgn) + if (srgn->srgn_state != HPB_SRGN_UNUSED) { + srgn->srgn_state = HPB_SRGN_UNUSED; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } } static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb) @@ -239,11 +1135,47 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) { int ret; + spin_lock_init(&hpb->rgn_state_lock); + spin_lock_init(&hpb->rsp_list_lock); + + INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn); + INIT_LIST_HEAD(&hpb->lh_act_srgn); + INIT_LIST_HEAD(&hpb->lh_inact_rgn); + INIT_LIST_HEAD(&hpb->list_hpb_lu); + + INIT_WORK(&hpb->map_work, ufshpb_map_work_handler); + + hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", + sizeof(struct ufshpb_req), 0, 0, NULL); + if (!hpb->map_req_cache) { + dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail", + hpb->lun); + return -ENOMEM; + } + + hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache", + sizeof(struct page *) * hpb->pages_per_srgn, + 0, 0, NULL); + if (!hpb->m_page_cache) { + dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail", + hpb->lun); + ret = -ENOMEM; + goto release_req_cache; + } + ret = ufshpb_alloc_region_tbl(hba, hpb); + if (ret) + goto release_m_page_cache; ufshpb_stat_init(hpb); return 0; + +release_m_page_cache: + kmem_cache_destroy(hpb->m_page_cache); +release_req_cache: + kmem_cache_destroy(hpb->map_req_cache); + return ret; } static struct ufshpb_lu * @@ -275,6 +1207,33 @@ release_hpb: return NULL; } +static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn, *next_rgn; + struct ufshpb_subregion *srgn, *next_srgn; + unsigned long flags; + + /* + * If the device reset occurred, the remained HPB region information + * may be stale. Therefore, by dicarding the lists of HPB response + * that remained after reset, it prevents unnecessary work. + */ + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn, + list_inact_rgn) + list_del_init(&rgn->list_inact_rgn); + + list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn, + list_act_srgn) + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) +{ + cancel_work_sync(&hpb->map_work); +} + static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba) { int err = 0; @@ -318,7 +1277,7 @@ void ufshpb_reset(struct ufs_hba *hba) struct scsi_device *sdev; shost_for_each_device(sdev, hba->host) { - hpb = sdev->hostdata; + hpb = ufshpb_get_hpb_data(sdev); if (!hpb) continue; @@ -335,13 +1294,15 @@ void ufshpb_reset_host(struct ufs_hba *hba) struct scsi_device *sdev; shost_for_each_device(sdev, hba->host) { - hpb = sdev->hostdata; + hpb = ufshpb_get_hpb_data(sdev); if (!hpb) continue; if (ufshpb_get_state(hpb) != HPB_PRESENT) continue; ufshpb_set_state(hpb, HPB_RESET); + ufshpb_cancel_jobs(hpb); + ufshpb_discard_rsp_lists(hpb); } } @@ -351,13 +1312,14 @@ void ufshpb_suspend(struct ufs_hba *hba) struct scsi_device *sdev; shost_for_each_device(sdev, hba->host) { - hpb = sdev->hostdata; + hpb = ufshpb_get_hpb_data(sdev); if (!hpb) continue; if (ufshpb_get_state(hpb) != HPB_PRESENT) continue; ufshpb_set_state(hpb, HPB_SUSPEND); + ufshpb_cancel_jobs(hpb); } } @@ -367,7 +1329,7 @@ void ufshpb_resume(struct ufs_hba *hba) struct scsi_device *sdev; shost_for_each_device(sdev, hba->host) { - hpb = sdev->hostdata; + hpb = ufshpb_get_hpb_data(sdev); if (!hpb) continue; @@ -375,6 +1337,7 @@ void ufshpb_resume(struct ufs_hba *hba) (ufshpb_get_state(hpb) != HPB_SUSPEND)) continue; ufshpb_set_state(hpb, HPB_PRESENT); + ufshpb_kick_map_work(hpb); } } @@ -427,7 +1390,7 @@ static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun, void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) { - struct ufshpb_lu *hpb = sdev->hostdata; + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); if (!hpb) return; @@ -437,8 +1400,13 @@ void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) sdev = hpb->sdev_ufs_lu; sdev->hostdata = NULL; + ufshpb_cancel_jobs(hpb); + ufshpb_destroy_region_tbl(hpb); + kmem_cache_destroy(hpb->map_req_cache); + kmem_cache_destroy(hpb->m_page_cache); + list_del_init(&hpb->list_hpb_lu); kfree(hpb); @@ -446,24 +1414,41 @@ void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba) { + int pool_size; struct ufshpb_lu *hpb; struct scsi_device *sdev; bool init_success; + if (tot_active_srgn_pages == 0) { + ufshpb_remove(hba); + return; + } + init_success = !ufshpb_check_hpb_reset_query(hba); + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + if (pool_size > tot_active_srgn_pages) { + mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages); + mempool_resize(ufshpb_page_pool, tot_active_srgn_pages); + } + shost_for_each_device(sdev, hba->host) { - hpb = sdev->hostdata; + hpb = ufshpb_get_hpb_data(sdev); if (!hpb) continue; if (init_success) { ufshpb_set_state(hpb, HPB_PRESENT); + if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0) + queue_work(ufshpb_wq, &hpb->map_work); } else { dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun); ufshpb_destroy_lu(hba, sdev); } } + + if (!init_success) + ufshpb_remove(hba); } void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) @@ -485,6 +1470,9 @@ void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) if (!hpb) goto out; + tot_active_srgn_pages += hpb_lu_info.max_active_rgns * + hpb->srgns_per_rgn * hpb->pages_per_srgn; + hpb->sdev_ufs_lu = sdev; sdev->hostdata = hpb; @@ -494,6 +1482,57 @@ out: ufshpb_hpb_lu_prepared(hba); } +static int ufshpb_init_mem_wq(struct ufs_hba *hba) +{ + int ret; + unsigned int pool_size; + + ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache", + sizeof(struct ufshpb_map_ctx), + 0, 0, NULL); + if (!ufshpb_mctx_cache) { + dev_err(hba->dev, "ufshpb: cannot init mctx cache\n"); + return -ENOMEM; + } + + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n", + __func__, __LINE__, ufshpb_host_map_kbytes, pool_size); + + ufshpb_mctx_pool = mempool_create_slab_pool(pool_size, + ufshpb_mctx_cache); + if (!ufshpb_mctx_pool) { + dev_err(hba->dev, "ufshpb: cannot init mctx pool\n"); + ret = -ENOMEM; + goto release_mctx_cache; + } + + ufshpb_page_pool = mempool_create_page_pool(pool_size, 0); + if (!ufshpb_page_pool) { + dev_err(hba->dev, "ufshpb: cannot init page pool\n"); + ret = -ENOMEM; + goto release_mctx_pool; + } + + ufshpb_wq = alloc_workqueue("ufshpb-wq", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + if (!ufshpb_wq) { + dev_err(hba->dev, "ufshpb: alloc workqueue failed\n"); + ret = -ENOMEM; + goto release_page_pool; + } + + return 0; + +release_page_pool: + mempool_destroy(ufshpb_page_pool); +release_mctx_pool: + mempool_destroy(ufshpb_mctx_pool); +release_mctx_cache: + kmem_cache_destroy(ufshpb_mctx_cache); + return ret; +} + void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) { struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev; @@ -558,7 +1597,13 @@ void ufshpb_init(struct ufs_hba *hba) if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled) return; + if (ufshpb_init_mem_wq(hba)) { + hpb_dev_info->hpb_disabled = true; + return; + } + atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu); + tot_active_srgn_pages = 0; /* issue HPB reset query */ for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, @@ -567,3 +1612,16 @@ void ufshpb_init(struct ufs_hba *hba) break; } } + +void ufshpb_remove(struct ufs_hba *hba) +{ + mempool_destroy(ufshpb_page_pool); + mempool_destroy(ufshpb_mctx_pool); + kmem_cache_destroy(ufshpb_mctx_cache); + + destroy_workqueue(ufshpb_wq); +} + +module_param(ufshpb_host_map_kbytes, uint, 0644); +MODULE_PARM_DESC(ufshpb_host_map_kbytes, + "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool"); diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index fa311ed3fa94..dcc0ca3b8158 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -40,6 +40,7 @@ #define LU_ENABLED_HPB_FUNC 0x02 #define HPB_RESET_REQ_RETRIES 10 +#define HPB_MAP_REQ_RETRIES 5 #define HPB_SUPPORT_VERSION 0x100 @@ -84,11 +85,19 @@ struct ufshpb_lu_info { int max_active_rgns; }; +struct ufshpb_map_ctx { + struct page **m_page; + unsigned long *ppn_dirty; +}; + struct ufshpb_subregion { + struct ufshpb_map_ctx *mctx; enum HPB_SRGN_STATE srgn_state; int rgn_idx; int srgn_idx; bool is_last; + /* below information is used by rsp_list */ + struct list_head list_act_srgn; }; struct ufshpb_region { @@ -96,6 +105,43 @@ struct ufshpb_region { enum HPB_RGN_STATE rgn_state; int rgn_idx; int srgn_cnt; + + /* below information is used by rsp_list */ + struct list_head list_inact_rgn; + + /* below information is used by lru */ + struct list_head list_lru_rgn; +}; + +#define for_each_sub_region(rgn, i, srgn) \ + for ((i) = 0; \ + ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \ + (i)++) + +/** + * struct ufshpb_req - UFSHPB READ BUFFER (for caching map) request structure + * @req: block layer request for READ BUFFER + * @bio: bio for holding map page + * @hpb: ufshpb_lu structure that related to the L2P map + * @mctx: L2P map information + * @rgn_idx: target region index + * @srgn_idx: target sub-region index + * @lun: target logical unit number + */ +struct ufshpb_req { + struct request *req; + struct bio *bio; + struct ufshpb_lu *hpb; + struct ufshpb_map_ctx *mctx; + + unsigned int rgn_idx; + unsigned int srgn_idx; +}; + +struct victim_select_info { + struct list_head lh_lru_rgn; /* LRU list of regions */ + int max_lru_active_cnt; /* supported hpb #region - pinned #region */ + atomic_t active_cnt; }; struct ufshpb_stats { @@ -110,10 +156,22 @@ struct ufshpb_stats { struct ufshpb_lu { int lun; struct scsi_device *sdev_ufs_lu; + + spinlock_t rgn_state_lock; /* for protect rgn/srgn state */ struct ufshpb_region *rgn_tbl; atomic_t hpb_state; + spinlock_t rsp_list_lock; + struct list_head lh_act_srgn; /* hold rsp_list_lock */ + struct list_head lh_inact_rgn; /* hold rsp_list_lock */ + + /* cached L2P map management worker */ + struct work_struct map_work; + + /* for selecting victim */ + struct victim_select_info lru_info; + /* pinned region information */ u32 lu_pinned_start; u32 lu_pinned_end; @@ -133,6 +191,9 @@ struct ufshpb_lu { struct ufshpb_stats stats; + struct kmem_cache *map_req_cache; + struct kmem_cache *m_page_cache; + struct list_head list_hpb_lu; }; @@ -140,6 +201,7 @@ struct ufs_hba; struct ufshcd_lrb; #ifndef CONFIG_SCSI_UFS_HPB +static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {} static void ufshpb_resume(struct ufs_hba *hba) {} static void ufshpb_suspend(struct ufs_hba *hba) {} static void ufshpb_reset(struct ufs_hba *hba) {} @@ -147,10 +209,12 @@ static void ufshpb_reset_host(struct ufs_hba *hba) {} static void ufshpb_init(struct ufs_hba *hba) {} static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static void ufshpb_remove(struct ufs_hba *hba) {} static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; } static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {} static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {} #else +void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); void ufshpb_resume(struct ufs_hba *hba); void ufshpb_suspend(struct ufs_hba *hba); void ufshpb_reset(struct ufs_hba *hba); @@ -158,6 +222,7 @@ void ufshpb_reset_host(struct ufs_hba *hba); void ufshpb_init(struct ufs_hba *hba); void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev); void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev); +void ufshpb_remove(struct ufs_hba *hba); bool ufshpb_is_allowed(struct ufs_hba *hba); void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf); void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf); From 7be6123e72ad475132b9b3c15c6f5ae19e01955e Mon Sep 17 00:00:00 2001 From: Daejun Park Date: Wed, 16 Jun 2021 17:37:22 +0900 Subject: [PATCH 28/62] FROMLIST: scsi: ufs: Prepare HPB read for cached sub-region This patch changes the read I/O to the HPB read I/O. If the logical address of the read I/O belongs to active sub-region, the HPB driver modifies the read I/O command to HPB read. It modifies the UPIU command of UFS instead of modifying the existing SCSI command. In the HPB version 1.0, the maximum read I/O size that can be converted to HPB read is 4KB. The dirty map of the active sub-region prevents an incorrect HPB read that has stale physical page number which is updated by previous write I/O. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/linux-scsi/20210616070913epcms2p83805028905f46225a65cc71678cddde7@epcms2p8/ Reviewed-by: Greg Kroah-Hartman Reviewed-by: Can Guo Reviewed-by: Bart Van Assche Reviewed-by: Bean Huo Reviewed-by: Stanley Chu Acked-by: Avri Altman Tested-by: Bean Huo Tested-by: Can Guo Tested-by: Stanley Chu Signed-off-by: Daejun Park Change-Id: I1f5fde83246ed12c8e095bd987ce97ef63c16e51 --- drivers/scsi/ufs/ufshcd.c | 2 + drivers/scsi/ufs/ufshpb.c | 259 +++++++++++++++++++++++++++++++++++++- drivers/scsi/ufs/ufshpb.h | 2 + 3 files changed, 260 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index f19a7abb659f..0df4ba90973e 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2756,6 +2756,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->req_abort_skip = false; + ufshpb_prep(hba, lrbp); + ufshcd_comp_scsi_upiu(hba, lrbp); err = ufshcd_map_sg(hba, lrbp); diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 58bb02d82b97..60acd3cd9597 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -46,6 +46,29 @@ static void ufshpb_set_state(struct ufshpb_lu *hpb, int state) atomic_set(&hpb->hpb_state, state); } +static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + return rgn->rgn_state != HPB_RGN_INACTIVE && + srgn->srgn_state == HPB_SRGN_VALID; +} + +static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd) +{ + return req_op(cmd->request) == REQ_OP_READ; +} + +static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd) +{ + return op_is_write(req_op(cmd->request)) || + op_is_discard(req_op(cmd->request)); +} + +static bool ufshpb_is_supported_chunk(int transfer_len) +{ + return transfer_len <= HPB_MULTI_CHUNK_HIGH; +} + static bool ufshpb_is_general_lun(int lun) { return lun < UFS_UPIU_MAX_UNIT_NUM_ID; @@ -80,8 +103,8 @@ static void ufshpb_kick_map_work(struct ufshpb_lu *hpb) } static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp, - struct utp_hpb_rsp *rsp_field) + struct ufshcd_lrb *lrbp, + struct utp_hpb_rsp *rsp_field) { /* Check HPB_UPDATE_ALERT */ if (!(lrbp->ucd_rsp_ptr->header.dword_2 & @@ -107,6 +130,236 @@ static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba, return true; } +static void ufshpb_set_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx, int srgn_offset, int cnt) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int set_bit_len; + int bitmap_len; + +next_srgn: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (likely(!srgn->is_last)) + bitmap_len = hpb->entries_per_srgn; + else + bitmap_len = hpb->last_srgn_entries; + + if ((srgn_offset + cnt) > bitmap_len) + set_bit_len = bitmap_len - srgn_offset; + else + set_bit_len = cnt; + + if (rgn->rgn_state != HPB_RGN_INACTIVE && + srgn->srgn_state == HPB_SRGN_VALID) + bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, set_bit_len); + + srgn_offset = 0; + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + + cnt -= set_bit_len; + if (cnt > 0) + goto next_srgn; +} + +static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx, int srgn_offset, int cnt) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int bitmap_len; + int bit_len; + +next_srgn: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (likely(!srgn->is_last)) + bitmap_len = hpb->entries_per_srgn; + else + bitmap_len = hpb->last_srgn_entries; + + if (!ufshpb_is_valid_srgn(rgn, srgn)) + return true; + + /* + * If the region state is active, mctx must be allocated. + * In this case, check whether the region is evicted or + * mctx allcation fail. + */ + if (unlikely(!srgn->mctx)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + return true; + } + + if ((srgn_offset + cnt) > bitmap_len) + bit_len = bitmap_len - srgn_offset; + else + bit_len = cnt; + + if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset, + srgn_offset) < bit_len + srgn_offset) + return true; + + srgn_offset = 0; + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + + cnt -= bit_len; + if (cnt > 0) + goto next_srgn; + + return false; +} + +static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx, int pos, + int len, __be64 *ppn_buf) +{ + struct page *page; + int index, offset; + int copied; + + index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE); + offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE); + + if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE)) + copied = len; + else + copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset; + + page = mctx->m_page[index]; + if (unlikely(!page)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "error. cannot find page in mctx\n"); + return -ENOMEM; + } + + memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE), + copied * HPB_ENTRY_SIZE); + + return copied; +} + +static void +ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx, + int *srgn_idx, int *offset) +{ + int rgn_offset; + + *rgn_idx = lpn >> hpb->entries_per_rgn_shift; + rgn_offset = lpn & hpb->entries_per_rgn_mask; + *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift; + *offset = rgn_offset & hpb->entries_per_srgn_mask; +} + +static void +ufshpb_set_hpb_read_to_upiu(struct ufshpb_lu *hpb, struct ufshcd_lrb *lrbp, + u32 lpn, __be64 ppn, u8 transfer_len) +{ + unsigned char *cdb = lrbp->cmd->cmnd; + + cdb[0] = UFSHPB_READ; + + /* ppn value is stored as big-endian in the host memory */ + memcpy(&cdb[6], &ppn, sizeof(__be64)); + cdb[14] = transfer_len; + + lrbp->cmd->cmd_len = UFS_CDB_SIZE; +} + +/* + * This function will set up HPB read command using host-side L2P map data. + * In HPB v1.0, maximum size of HPB read command is 4KB. + */ +void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + struct scsi_cmnd *cmd = lrbp->cmd; + u32 lpn; + __be64 ppn; + unsigned long flags; + int transfer_len, rgn_idx, srgn_idx, srgn_offset; + int err = 0; + + hpb = ufshpb_get_hpb_data(cmd->device); + if (!hpb) + return; + + if (ufshpb_get_state(hpb) == HPB_INIT) + return; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT", __func__); + return; + } + + if (blk_rq_is_scsi(cmd->request) || + (!ufshpb_is_write_or_discard(cmd) && + !ufshpb_is_read_cmd(cmd))) + return; + + transfer_len = sectors_to_logical(cmd->device, + blk_rq_sectors(cmd->request)); + if (unlikely(!transfer_len)) + return; + + lpn = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request)); + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + /* If command type is WRITE or DISCARD, set bitmap as drity */ + if (ufshpb_is_write_or_discard(cmd)) { + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_set_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return; + } + + if (!ufshpb_is_supported_chunk(transfer_len)) + return; + + WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH); + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len)) { + hpb->stats.miss_cnt++; + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return; + } + + err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + if (unlikely(err < 0)) { + /* + * In this case, the region state is active, + * but the ppn table is not allocated. + * Make sure that ppn table must be allocated on + * active state. + */ + dev_err(hba->dev, "get ppn failed. err %d\n", err); + return; + } + + ufshpb_set_hpb_read_to_upiu(hpb, lrbp, lpn, ppn, transfer_len); + + hpb->stats.hit_cnt++; +} static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, struct ufshpb_subregion *srgn) { @@ -153,7 +406,7 @@ free_map_req: } static void ufshpb_put_map_req(struct ufshpb_lu *hpb, - struct ufshpb_req *map_req) + struct ufshpb_req *map_req) { bio_put(map_req->bio); blk_put_request(map_req->req); diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index dcc0ca3b8158..6e6a0252dc15 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -201,6 +201,7 @@ struct ufs_hba; struct ufshcd_lrb; #ifndef CONFIG_SCSI_UFS_HPB +static void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {} static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {} static void ufshpb_resume(struct ufs_hba *hba) {} static void ufshpb_suspend(struct ufs_hba *hba) {} @@ -214,6 +215,7 @@ static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; } static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {} static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {} #else +void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); void ufshpb_resume(struct ufs_hba *hba); void ufshpb_suspend(struct ufs_hba *hba); From 756ecd96f6497f6d64d56912301387a89794ef4d Mon Sep 17 00:00:00 2001 From: Daejun Park Date: Wed, 16 Jun 2021 17:37:36 +0900 Subject: [PATCH 29/62] FROMLIST: scsi: ufs: Add HPB 2.0 support This patch supports the HPB 2.0. The HPB 2.0 supports read of varying sizes from 4KB to 512KB. In the case of Read (<= 32KB) is supported as single HPB read. In the case of Read (36KB ~ 1MB) is supported by as a combination of write buffer command and HPB read command to deliver more PPN. The write buffer commands may not be issued immediately due to busy tags. To use HPB read more aggressively, the driver can requeue the write buffer command. The requeue threshold is implemented as timeout and can be modified with requeue_timeout_ms entry in sysfs. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/linux-scsi/20210616070942epcms2p5b858c3ab5a1feca32162c8fd75ebed67@epcms2p5/ Reviewed-by: Greg Kroah-Hartman Reviewed-by: Can Guo Reviewed-by: Bean Huo Reviewed-by: Stanley Chu Tested-by: Can Guo Tested-by: Stanley Chu Signed-off-by: Daejun Park Change-Id: I0a54f9ff2c84eed17f77da59331d2400b7edffdc --- Documentation/ABI/testing/sysfs-driver-ufs | 35 ++ drivers/scsi/ufs/ufs-sysfs.c | 4 + drivers/scsi/ufs/ufs.h | 3 +- drivers/scsi/ufs/ufshcd.c | 23 +- drivers/scsi/ufs/ufshcd.h | 4 + drivers/scsi/ufs/ufshpb.c | 617 +++++++++++++++++++-- drivers/scsi/ufs/ufshpb.h | 67 ++- 7 files changed, 680 insertions(+), 73 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs index 9c3188bf2a53..168023f9a240 100644 --- a/Documentation/ABI/testing/sysfs-driver-ufs +++ b/Documentation/ABI/testing/sysfs-driver-ufs @@ -1406,3 +1406,38 @@ Description: This entry shows the number of read buffer commands for activating sub-regions recommended by response UPIUs. The file is read only. + +What: /sys/class/scsi_device/*/device/hpb_params/requeue_timeout_ms +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the requeue timeout threshold for write buffer + command in ms. This value can be changed by writing proper integer to + this entry. + +What: /sys/bus/platform/drivers/ufshcd/*/attributes/max_data_size_hpb_single_cmd +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the maximum HPB data size for using single HPB + command. + + === ======== + 00h 4KB + 01h 8KB + 02h 12KB + ... + FFh 1024KB + === ======== + + The file is read only. + +What: /sys/bus/platform/drivers/ufshcd/*/flags/wb_enable +Date: June 2021 +Contact: Daejun Park +Description: This entry shows the status of HPB. + + == ============================ + 0 HPB is not enabled. + 1 HPB is enabled + == ============================ + + The file is read only. diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index 7a58cb4e0946..4c02591058c6 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -917,6 +917,7 @@ UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE); UFS_FLAG(wb_enable, _WB_EN); UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN); UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8); +UFS_FLAG(hpb_enable, _HPB_EN); static struct attribute *ufs_sysfs_device_flags[] = { &dev_attr_device_init.attr, @@ -930,6 +931,7 @@ static struct attribute *ufs_sysfs_device_flags[] = { &dev_attr_wb_enable.attr, &dev_attr_wb_flush_en.attr, &dev_attr_wb_flush_during_h8.attr, + &dev_attr_hpb_enable.attr, NULL, }; @@ -965,6 +967,7 @@ static ssize_t _name##_show(struct device *dev, \ static DEVICE_ATTR_RO(_name) UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN); +UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD); UFS_ATTRIBUTE(current_power_mode, _POWER_MODE); UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL); UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN); @@ -988,6 +991,7 @@ UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE); static struct attribute *ufs_sysfs_attributes[] = { &dev_attr_boot_lun_enabled.attr, + &dev_attr_max_data_size_hpb_single_cmd.attr, &dev_attr_current_power_mode.attr, &dev_attr_active_icc_level.attr, &dev_attr_ooo_data_enabled.attr, diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 0a504dcf8290..d69032d65463 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -123,12 +123,13 @@ enum flag_idn { QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F, QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10, QUERY_FLAG_IDN_HPB_RESET = 0x11, + QUERY_FLAG_IDN_HPB_EN = 0x12, }; /* Attribute idn for Query requests */ enum attr_idn { QUERY_ATTR_IDN_BOOT_LU_EN = 0x00, - QUERY_ATTR_IDN_RESERVED = 0x01, + QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01, QUERY_ATTR_IDN_POWER_MODE = 0x02, QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03, QUERY_ATTR_IDN_OOO_DATA_EN = 0x04, diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 0df4ba90973e..a39aa555b0c2 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -2756,7 +2756,12 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->req_abort_skip = false; - ufshpb_prep(hba, lrbp); + err = ufshpb_prep(hba, lrbp); + if (err == -EAGAIN) { + lrbp->cmd = NULL; + ufshcd_release(hba); + goto out; + } ufshcd_comp_scsi_upiu(hba, lrbp); @@ -4914,7 +4919,8 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) { /* skip well-known LU */ - if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || !ufshpb_is_allowed(hba)) + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || + !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) return; ufshpb_destroy_lu(hba, sdev); @@ -7503,8 +7509,18 @@ static int ufs_get_device_desc(struct ufs_hba *hba) if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION && (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) { - dev_info->hpb_enabled = true; + bool hpb_en = false; + ufshpb_get_dev_info(hba, desc_buf); + + if (!ufshpb_is_legacy(hba)) + err = ufshcd_query_flag_retry(hba, + UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_EN, 0, + &hpb_en); + + if (ufshpb_is_legacy(hba) || (!err && hpb_en)) + dev_info->hpb_enabled = true; } err = ufshcd_read_string_desc(hba, model_index, @@ -8077,6 +8093,7 @@ static const struct attribute_group *ufshcd_driver_groups[] = { &ufs_sysfs_lun_attributes_group, #ifdef CONFIG_SCSI_UFS_HPB &ufs_sysfs_hpb_stat_group, + &ufs_sysfs_hpb_param_group, #endif NULL, }; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 626105ee723f..1f559256a0f3 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -662,6 +662,8 @@ struct ufs_hba_variant_params { * @srgn_size: device reported HPB sub-region size * @slave_conf_cnt: counter to check all lu finished initialization * @hpb_disabled: flag to check if HPB is disabled + * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value + * @is_legacy: flag to check HPB 1.0 */ struct ufshpb_dev_info { int num_lu; @@ -669,6 +671,8 @@ struct ufshpb_dev_info { int srgn_size; atomic_t slave_conf_cnt; bool hpb_disabled; + u8 max_hpb_single_cmd; + bool is_legacy; }; #endif diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 60acd3cd9597..273e3b6e1ded 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -31,6 +31,12 @@ bool ufshpb_is_allowed(struct ufs_hba *hba) return !(hba->ufshpb_dev.hpb_disabled); } +/* HPB version 1.0 is called as legacy version. */ +bool ufshpb_is_legacy(struct ufs_hba *hba) +{ + return hba->ufshpb_dev.is_legacy; +} + static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev) { return sdev->hostdata; @@ -64,9 +70,19 @@ static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd) op_is_discard(req_op(cmd->request)); } -static bool ufshpb_is_supported_chunk(int transfer_len) +static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len) { - return transfer_len <= HPB_MULTI_CHUNK_HIGH; + return transfer_len <= hpb->pre_req_max_tr_len; +} + +/* + * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as + * default. It is possible to change range of transfer_len through sysfs. + */ +static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len) +{ + return len > hpb->pre_req_min_tr_len && + len <= hpb->pre_req_max_tr_len; } static bool ufshpb_is_general_lun(int lun) @@ -74,8 +90,7 @@ static bool ufshpb_is_general_lun(int lun) return lun < UFS_UPIU_MAX_UNIT_NUM_ID; } -static bool -ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx) +static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx) { if (hpb->lu_pinned_end != PINNED_NOT_SET && rgn_idx >= hpb->lu_pinned_start && @@ -264,7 +279,7 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx, static void ufshpb_set_hpb_read_to_upiu(struct ufshpb_lu *hpb, struct ufshcd_lrb *lrbp, - u32 lpn, __be64 ppn, u8 transfer_len) + u32 lpn, __be64 ppn, u8 transfer_len, int read_id) { unsigned char *cdb = lrbp->cmd->cmnd; @@ -273,15 +288,260 @@ ufshpb_set_hpb_read_to_upiu(struct ufshpb_lu *hpb, struct ufshcd_lrb *lrbp, /* ppn value is stored as big-endian in the host memory */ memcpy(&cdb[6], &ppn, sizeof(__be64)); cdb[14] = transfer_len; + cdb[15] = read_id; lrbp->cmd->cmd_len = UFS_CDB_SIZE; } +static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb, + unsigned long lpn, unsigned int len, + int read_id) +{ + cdb[0] = UFSHPB_WRITE_BUFFER; + cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID; + + put_unaligned_be32(lpn, &cdb[2]); + cdb[6] = read_id; + put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]); + + cdb[9] = 0x00; /* Control = 0x00 */ +} + +static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req; + + if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) { + dev_info(&hpb->sdev_ufs_lu->sdev_dev, + "pre_req throttle. inflight %d throttle %d", + hpb->num_inflight_pre_req, hpb->throttle_pre_req); + return NULL; + } + + pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free, + struct ufshpb_req, list_req); + if (!pre_req) { + dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req"); + return NULL; + } + + list_del_init(&pre_req->list_req); + hpb->num_inflight_pre_req++; + + return pre_req; +} + +static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb, + struct ufshpb_req *pre_req) +{ + pre_req->req = NULL; + bio_reset(pre_req->bio); + list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); + hpb->num_inflight_pre_req--; +} + +static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data; + struct ufshpb_lu *hpb = pre_req->hpb; + unsigned long flags; + + if (error) { + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + struct scsi_sense_hdr sshdr; + + dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error); + scsi_command_normalize_sense(cmd, &sshdr); + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "code %x sense_key %x asc %x ascq %x", + sshdr.response_code, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "byte4 %x byte5 %x byte6 %x additional_len %x", + sshdr.byte4, sshdr.byte5, + sshdr.byte6, sshdr.additional_length); + } + + blk_mq_free_request(req); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_put_pre_req(pre_req->hpb, pre_req); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); +} + +static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page) +{ + struct ufshpb_lu *hpb = pre_req->hpb; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + __be64 *addr; + int offset = 0; + int copied; + unsigned long lpn = pre_req->wb.lpn; + int rgn_idx, srgn_idx, srgn_offset; + unsigned long flags; + + addr = page_address(page); + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + +next_offset: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (!ufshpb_is_valid_srgn(rgn, srgn)) + goto mctx_error; + + if (!srgn->mctx) + goto mctx_error; + + copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, + pre_req->wb.len - offset, + &addr[offset]); + + if (copied < 0) + goto mctx_error; + + offset += copied; + srgn_offset += copied; + + if (srgn_offset == hpb->entries_per_srgn) { + srgn_offset = 0; + + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + } + + if (offset < pre_req->wb.len) + goto next_offset; + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return 0; +mctx_error: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return -ENOMEM; +} + +static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb, + struct request_queue *q, + struct ufshpb_req *pre_req) +{ + struct page *page = pre_req->wb.m_page; + struct bio *bio = pre_req->bio; + int entries_bytes, ret; + + if (!page) + return -ENOMEM; + + if (ufshpb_prep_entry(pre_req, page)) + return -ENOMEM; + + entries_bytes = pre_req->wb.len * sizeof(__be64); + + ret = bio_add_pc_page(q, bio, page, entries_bytes, 0); + if (ret != entries_bytes) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "bio_add_pc_page fail: %d", ret); + return -ENOMEM; + } + return 0; +} + +static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb) +{ + if (++hpb->cur_read_id >= MAX_HPB_READ_ID) + hpb->cur_read_id = 1; + return hpb->cur_read_id; +} + +static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd, + struct ufshpb_req *pre_req, int read_id) +{ + struct scsi_device *sdev = cmd->device; + struct request_queue *q = sdev->request_queue; + struct request *req; + struct scsi_request *rq; + struct bio *bio = pre_req->bio; + + pre_req->hpb = hpb; + pre_req->wb.lpn = sectors_to_logical(cmd->device, + blk_rq_pos(cmd->request)); + pre_req->wb.len = sectors_to_logical(cmd->device, + blk_rq_sectors(cmd->request)); + if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req)) + return -ENOMEM; + + req = pre_req->req; + + /* 1. request setup */ + blk_rq_append_bio(req, &bio); + req->rq_disk = NULL; + req->end_io_data = (void *)pre_req; + req->end_io = ufshpb_pre_req_compl_fn; + + /* 2. scsi_request setup */ + rq = scsi_req(req); + rq->retries = 1; + + ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len, + read_id); + rq->cmd_len = scsi_command_size(rq->cmd); + + if (blk_insert_cloned_request(q, req) != BLK_STS_OK) + return -EAGAIN; + + hpb->stats.pre_req_cnt++; + + return 0; +} + +static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd, + int *read_id) +{ + struct ufshpb_req *pre_req; + struct request *req = NULL; + unsigned long flags; + int _read_id; + int ret = 0; + + req = blk_get_request(cmd->device->request_queue, + REQ_OP_SCSI_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT); + if (IS_ERR(req)) + return -EAGAIN; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + pre_req = ufshpb_get_pre_req(hpb); + if (!pre_req) { + ret = -EAGAIN; + goto unlock_out; + } + _read_id = ufshpb_get_read_id(hpb); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + pre_req->req = req; + + ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id); + if (ret) + goto free_pre_req; + + *read_id = _read_id; + + return ret; +free_pre_req: + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_put_pre_req(hpb, pre_req); +unlock_out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + blk_put_request(req); + return ret; +} + /* * This function will set up HPB read command using host-side L2P map data. - * In HPB v1.0, maximum size of HPB read command is 4KB. */ -void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { struct ufshpb_lu *hpb; struct ufshpb_region *rgn; @@ -291,30 +551,31 @@ void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) __be64 ppn; unsigned long flags; int transfer_len, rgn_idx, srgn_idx, srgn_offset; + int read_id = 0; int err = 0; hpb = ufshpb_get_hpb_data(cmd->device); if (!hpb) - return; + return -ENODEV; if (ufshpb_get_state(hpb) == HPB_INIT) - return; + return -ENODEV; if (ufshpb_get_state(hpb) != HPB_PRESENT) { dev_notice(&hpb->sdev_ufs_lu->sdev_dev, "%s: ufshpb state is not PRESENT", __func__); - return; + return -ENODEV; } if (blk_rq_is_scsi(cmd->request) || (!ufshpb_is_write_or_discard(cmd) && !ufshpb_is_read_cmd(cmd))) - return; + return 0; transfer_len = sectors_to_logical(cmd->device, blk_rq_sectors(cmd->request)); if (unlikely(!transfer_len)) - return; + return 0; lpn = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request)); ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); @@ -327,11 +588,11 @@ void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) ufshpb_set_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, transfer_len); spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); - return; + return 0; } - if (!ufshpb_is_supported_chunk(transfer_len)) - return; + if (!ufshpb_is_supported_chunk(hpb, transfer_len)) + return 0; WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH); @@ -340,7 +601,7 @@ void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) transfer_len)) { hpb->stats.miss_cnt++; spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); - return; + return 0; } err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn); @@ -353,28 +614,46 @@ void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) * active state. */ dev_err(hba->dev, "get ppn failed. err %d\n", err); - return; + return err; } - ufshpb_set_hpb_read_to_upiu(hpb, lrbp, lpn, ppn, transfer_len); + if (!ufshpb_is_legacy(hba) && + ufshpb_is_required_wb(hpb, transfer_len)) { + err = ufshpb_issue_pre_req(hpb, cmd, &read_id); + if (err) { + unsigned long timeout; + + timeout = cmd->jiffies_at_alloc + msecs_to_jiffies( + hpb->params.requeue_timeout_ms); + + if (time_before(jiffies, timeout)) + return -EAGAIN; + + hpb->stats.miss_cnt++; + return 0; + } + } + + ufshpb_set_hpb_read_to_upiu(hpb, lrbp, lpn, ppn, transfer_len, read_id); hpb->stats.hit_cnt++; + return 0; } -static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, - struct ufshpb_subregion *srgn) + +static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, + int rgn_idx, enum req_opf dir) { - struct ufshpb_req *map_req; + struct ufshpb_req *rq; struct request *req; - struct bio *bio; int retries = HPB_MAP_REQ_RETRIES; - map_req = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL); - if (!map_req) + rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL); + if (!rq) return NULL; retry: - req = blk_get_request(hpb->sdev_ufs_lu->request_queue, - REQ_OP_SCSI_IN, BLK_MQ_REQ_NOWAIT); + req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir, + BLK_MQ_REQ_NOWAIT); if ((PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { usleep_range(3000, 3100); @@ -382,35 +661,54 @@ retry: } if (IS_ERR(req)) - goto free_map_req; + goto free_rq; + + rq->hpb = hpb; + rq->req = req; + rq->rb.rgn_idx = rgn_idx; + + return rq; + +free_rq: + kmem_cache_free(hpb->map_req_cache, rq); + return NULL; +} + +static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq) +{ + blk_put_request(rq->req); + kmem_cache_free(hpb->map_req_cache, rq); +} + +static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + struct bio *bio; + + map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN); + if (!map_req) + return NULL; bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn); if (!bio) { - blk_put_request(req); - goto free_map_req; + ufshpb_put_req(hpb, map_req); + return NULL; } - map_req->hpb = hpb; - map_req->req = req; map_req->bio = bio; - map_req->rgn_idx = srgn->rgn_idx; - map_req->srgn_idx = srgn->srgn_idx; - map_req->mctx = srgn->mctx; + map_req->rb.srgn_idx = srgn->srgn_idx; + map_req->rb.mctx = srgn->mctx; return map_req; - -free_map_req: - kmem_cache_free(hpb->map_req_cache, map_req); - return NULL; } static void ufshpb_put_map_req(struct ufshpb_lu *hpb, struct ufshpb_req *map_req) { bio_put(map_req->bio); - blk_put_request(map_req->req); - kmem_cache_free(hpb->map_req_cache, map_req); + ufshpb_put_req(hpb, map_req); } static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, @@ -493,6 +791,13 @@ static void ufshpb_activate_subregion(struct ufshpb_lu *hpb, srgn->srgn_state = HPB_SRGN_VALID; } +static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data; + + ufshpb_put_req(umap_req->hpb, umap_req); +} + static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error) { struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data; @@ -500,8 +805,8 @@ static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error) struct ufshpb_subregion *srgn; unsigned long flags; - srgn = hpb->rgn_tbl[map_req->rgn_idx].srgn_tbl + - map_req->srgn_idx; + srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl + + map_req->rb.srgn_idx; ufshpb_clear_dirty_bitmap(hpb, srgn); spin_lock_irqsave(&hpb->rgn_state_lock, flags); @@ -511,6 +816,16 @@ static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error) ufshpb_put_map_req(map_req->hpb, map_req); } +static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn) +{ + cdb[0] = UFSHPB_WRITE_BUFFER; + cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID : + UFSHPB_WRITE_BUFFER_INACT_ALL_ID; + if (rgn) + put_unaligned_be16(rgn->rgn_idx, &cdb[2]); + cdb[9] = 0x00; +} + static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx, int srgn_idx, int srgn_mem_size) { @@ -524,6 +839,23 @@ static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx, cdb[9] = 0x00; } +static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb, + struct ufshpb_req *umap_req, + struct ufshpb_region *rgn) +{ + struct request *req; + struct scsi_request *rq; + + req = umap_req->req; + req->timeout = 0; + req->end_io_data = (void *)umap_req; + rq = scsi_req(req); + ufshpb_set_unmap_cmd(rq->cmd, rgn); + rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; + + blk_execute_rq_nowait(req->q, NULL, req, 1, ufshpb_umap_req_compl_fn); +} + static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, struct ufshpb_req *map_req, bool last) { @@ -536,12 +868,12 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, q = hpb->sdev_ufs_lu->request_queue; for (i = 0; i < hpb->pages_per_srgn; i++) { - ret = bio_add_pc_page(q, map_req->bio, map_req->mctx->m_page[i], + ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i], PAGE_SIZE, 0); if (ret != PAGE_SIZE) { dev_err(&hpb->sdev_ufs_lu->sdev_dev, "bio_add_pc_page fail %d - %d\n", - map_req->rgn_idx, map_req->srgn_idx); + map_req->rb.rgn_idx, map_req->rb.srgn_idx); return ret; } } @@ -557,8 +889,8 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, if (unlikely(last)) mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE; - ufshpb_set_read_buf_cmd(rq->cmd, map_req->rgn_idx, - map_req->srgn_idx, mem_size); + ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx, + map_req->rb.srgn_idx, mem_size); rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH; blk_execute_rq_nowait(q, NULL, req, 1, ufshpb_map_req_compl_fn); @@ -690,6 +1022,26 @@ static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, } } +static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_req *umap_req; + int rgn_idx = rgn ? rgn->rgn_idx : 0; + + umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT); + if (!umap_req) + return -ENOMEM; + + ufshpb_execute_umap_req(hpb, umap_req, rgn); + + return 0; +} + +static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb) +{ + return ufshpb_issue_umap_req(hpb, NULL); +} + static void __ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) { @@ -1212,6 +1564,17 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba, u32 entries_per_rgn; u64 rgn_mem_size, tmp; + /* for pre_req */ + hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1; + + if (ufshpb_is_legacy(hba)) + hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH; + else + hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH; + + + hpb->cur_read_id = 0; + hpb->lu_pinned_start = hpb_lu_info->pinned_start; hpb->lu_pinned_end = hpb_lu_info->num_pinned ? (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1) @@ -1359,7 +1722,7 @@ ufshpb_sysfs_attr_show_func(rb_active_cnt); ufshpb_sysfs_attr_show_func(rb_inactive_cnt); ufshpb_sysfs_attr_show_func(map_req_cnt); -static struct attribute *hpb_dev_attrs[] = { +static struct attribute *hpb_dev_stat_attrs[] = { &dev_attr_hit_cnt.attr, &dev_attr_miss_cnt.attr, &dev_attr_rb_noti_cnt.attr, @@ -1371,9 +1734,118 @@ static struct attribute *hpb_dev_attrs[] = { struct attribute_group ufs_sysfs_hpb_stat_group = { .name = "hpb_stats", - .attrs = hpb_dev_attrs, + .attrs = hpb_dev_stat_attrs, }; +/* SYSFS functions */ +#define ufshpb_sysfs_param_show_func(__name) \ +static ssize_t __name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \ + \ + if (!hpb) \ + return -ENODEV; \ + \ + return sysfs_emit(buf, "%d\n", hpb->params.__name); \ +} + +ufshpb_sysfs_param_show_func(requeue_timeout_ms); +static ssize_t +requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val < 0) + return -EINVAL; + + hpb->params.requeue_timeout_ms = val; + + return count; +} +static DEVICE_ATTR_RW(requeue_timeout_ms); + +static struct attribute *hpb_dev_param_attrs[] = { + &dev_attr_requeue_timeout_ms.attr, + NULL, +}; + +struct attribute_group ufs_sysfs_hpb_param_group = { + .name = "hpb_params", + .attrs = hpb_dev_param_attrs, +}; + +static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req = NULL, *t; + int qd = hpb->sdev_ufs_lu->queue_depth / 2; + int i; + + INIT_LIST_HEAD(&hpb->lh_pre_req_free); + + hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL); + hpb->throttle_pre_req = qd; + hpb->num_inflight_pre_req = 0; + + if (!hpb->pre_req) + goto release_mem; + + for (i = 0; i < qd; i++) { + pre_req = hpb->pre_req + i; + INIT_LIST_HEAD(&pre_req->list_req); + pre_req->req = NULL; + + pre_req->bio = bio_alloc(GFP_KERNEL, 1); + if (!pre_req->bio) + goto release_mem; + + pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pre_req->wb.m_page) { + bio_put(pre_req->bio); + goto release_mem; + } + + list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); + } + + return 0; +release_mem: + list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) { + list_del_init(&pre_req->list_req); + bio_put(pre_req->bio); + __free_page(pre_req->wb.m_page); + } + + kfree(hpb->pre_req); + return -ENOMEM; +} + +static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req = NULL; + int i; + + for (i = 0; i < hpb->throttle_pre_req; i++) { + pre_req = hpb->pre_req + i; + bio_put(hpb->pre_req[i].bio); + if (!pre_req->wb.m_page) + __free_page(hpb->pre_req[i].wb.m_page); + list_del_init(&pre_req->list_req); + } + + kfree(hpb->pre_req); +} + static void ufshpb_stat_init(struct ufshpb_lu *hpb) { hpb->stats.hit_cnt = 0; @@ -1384,6 +1856,11 @@ static void ufshpb_stat_init(struct ufshpb_lu *hpb) hpb->stats.map_req_cnt = 0; } +static void ufshpb_param_init(struct ufshpb_lu *hpb) +{ + hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS; +} + static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) { int ret; @@ -1416,14 +1893,24 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) goto release_req_cache; } + ret = ufshpb_pre_req_mempool_init(hpb); + if (ret) { + dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail", + hpb->lun); + goto release_m_page_cache; + } + ret = ufshpb_alloc_region_tbl(hba, hpb); if (ret) - goto release_m_page_cache; + goto release_pre_req_mempool; ufshpb_stat_init(hpb); + ufshpb_param_init(hpb); return 0; +release_pre_req_mempool: + ufshpb_pre_req_mempool_destroy(hpb); release_m_page_cache: kmem_cache_destroy(hpb->m_page_cache); release_req_cache: @@ -1432,7 +1919,7 @@ release_req_cache: } static struct ufshpb_lu * -ufshpb_alloc_hpb_lu(struct ufs_hba *hba, int lun, +ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev, struct ufshpb_dev_info *hpb_dev_info, struct ufshpb_lu_info *hpb_lu_info) { @@ -1443,7 +1930,8 @@ ufshpb_alloc_hpb_lu(struct ufs_hba *hba, int lun, if (!hpb) return NULL; - hpb->lun = lun; + hpb->lun = sdev->lun; + hpb->sdev_ufs_lu = sdev; ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info); @@ -1453,6 +1941,7 @@ ufshpb_alloc_hpb_lu(struct ufs_hba *hba, int lun, goto release_hpb; } + sdev->hostdata = hpb; return hpb; release_hpb: @@ -1655,6 +2144,7 @@ void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) ufshpb_cancel_jobs(hpb); + ufshpb_pre_req_mempool_destroy(hpb); ufshpb_destroy_region_tbl(hpb); kmem_cache_destroy(hpb->map_req_cache); @@ -1694,6 +2184,7 @@ static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba) ufshpb_set_state(hpb, HPB_PRESENT); if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0) queue_work(ufshpb_wq, &hpb->map_work); + ufshpb_issue_umap_all_req(hpb); } else { dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun); ufshpb_destroy_lu(hba, sdev); @@ -1718,7 +2209,7 @@ void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) if (ret) goto out; - hpb = ufshpb_alloc_hpb_lu(hba, lun, &hba->ufshpb_dev, + hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev, &hpb_lu_info); if (!hpb) goto out; @@ -1726,9 +2217,6 @@ void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) tot_active_srgn_pages += hpb_lu_info.max_active_rgns * hpb->srgns_per_rgn * hpb->pages_per_srgn; - hpb->sdev_ufs_lu = sdev; - sdev->hostdata = hpb; - out: /* All LUs are initialized */ if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt)) @@ -1815,8 +2303,9 @@ void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) { struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; - int version; + int version, ret; u8 hpb_mode; + u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW; hpb_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; if (hpb_mode == HPB_HOST_CONTROL) { @@ -1827,13 +2316,27 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) } version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER); - if (version != HPB_SUPPORT_VERSION) { + if ((version != HPB_SUPPORT_VERSION) && + (version != HPB_SUPPORT_LEGACY_VERSION)) { dev_err(hba->dev, "%s: HPB %x version is not supported.\n", __func__, version); hpb_dev_info->hpb_disabled = true; return; } + if (version == HPB_SUPPORT_LEGACY_VERSION) + hpb_dev_info->is_legacy = true; + + pm_runtime_get_sync(hba->dev); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd); + pm_runtime_put_sync(hba->dev); + + if (ret) + dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed", + __func__); + hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd; + /* * Get the number of user logical unit to check whether all * scsi_device finish initialization diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index 6e6a0252dc15..1e8d6e1d909e 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -30,19 +30,29 @@ #define PINNED_NOT_SET U32_MAX /* hpb support chunk size */ -#define HPB_MULTI_CHUNK_HIGH 1 +#define HPB_LEGACY_CHUNK_HIGH 1 +#define HPB_MULTI_CHUNK_LOW 7 +#define HPB_MULTI_CHUNK_HIGH 256 /* hpb vender defined opcode */ #define UFSHPB_READ 0xF8 #define UFSHPB_READ_BUFFER 0xF9 #define UFSHPB_READ_BUFFER_ID 0x01 +#define UFSHPB_WRITE_BUFFER 0xFA +#define UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID 0x01 +#define UFSHPB_WRITE_BUFFER_PREFETCH_ID 0x02 +#define UFSHPB_WRITE_BUFFER_INACT_ALL_ID 0x03 +#define HPB_WRITE_BUFFER_CMD_LENGTH 10 +#define MAX_HPB_READ_ID 0x7F #define HPB_READ_BUFFER_CMD_LENGTH 10 #define LU_ENABLED_HPB_FUNC 0x02 #define HPB_RESET_REQ_RETRIES 10 #define HPB_MAP_REQ_RETRIES 5 +#define HPB_REQUEUE_TIME_MS 0 -#define HPB_SUPPORT_VERSION 0x100 +#define HPB_SUPPORT_VERSION 0x200 +#define HPB_SUPPORT_LEGACY_VERSION 0x100 enum UFSHPB_MODE { HPB_HOST_CONTROL, @@ -119,23 +129,38 @@ struct ufshpb_region { (i)++) /** - * struct ufshpb_req - UFSHPB READ BUFFER (for caching map) request structure - * @req: block layer request for READ BUFFER - * @bio: bio for holding map page - * @hpb: ufshpb_lu structure that related to the L2P map + * struct ufshpb_req - HPB related request structure (write/read buffer) + * @req: block layer request structure + * @bio: bio for this request + * @hpb: ufshpb_lu structure that related to + * @list_req: ufshpb_req mempool list + * @sense: store its sense data * @mctx: L2P map information * @rgn_idx: target region index * @srgn_idx: target sub-region index * @lun: target logical unit number + * @m_page: L2P map information data for pre-request + * @len: length of host-side cached L2P map in m_page + * @lpn: start LPN of L2P map in m_page */ struct ufshpb_req { struct request *req; struct bio *bio; struct ufshpb_lu *hpb; - struct ufshpb_map_ctx *mctx; - - unsigned int rgn_idx; - unsigned int srgn_idx; + struct list_head list_req; + union { + struct { + struct ufshpb_map_ctx *mctx; + unsigned int rgn_idx; + unsigned int srgn_idx; + unsigned int lun; + } rb; + struct { + struct page *m_page; + unsigned int len; + unsigned long lpn; + } wb; + }; }; struct victim_select_info { @@ -144,6 +169,10 @@ struct victim_select_info { atomic_t active_cnt; }; +struct ufshpb_params { + unsigned int requeue_timeout_ms; +}; + struct ufshpb_stats { u64 hit_cnt; u64 miss_cnt; @@ -151,6 +180,7 @@ struct ufshpb_stats { u64 rb_active_cnt; u64 rb_inactive_cnt; u64 map_req_cnt; + u64 pre_req_cnt; }; struct ufshpb_lu { @@ -166,6 +196,15 @@ struct ufshpb_lu { struct list_head lh_act_srgn; /* hold rsp_list_lock */ struct list_head lh_inact_rgn; /* hold rsp_list_lock */ + /* pre request information */ + struct ufshpb_req *pre_req; + int num_inflight_pre_req; + int throttle_pre_req; + struct list_head lh_pre_req_free; + int cur_read_id; + int pre_req_min_tr_len; + int pre_req_max_tr_len; + /* cached L2P map management worker */ struct work_struct map_work; @@ -190,6 +229,7 @@ struct ufshpb_lu { u32 pages_per_srgn; struct ufshpb_stats stats; + struct ufshpb_params params; struct kmem_cache *map_req_cache; struct kmem_cache *m_page_cache; @@ -201,7 +241,7 @@ struct ufs_hba; struct ufshcd_lrb; #ifndef CONFIG_SCSI_UFS_HPB -static void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {} +static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0; } static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {} static void ufshpb_resume(struct ufs_hba *hba) {} static void ufshpb_suspend(struct ufs_hba *hba) {} @@ -214,8 +254,9 @@ static void ufshpb_remove(struct ufs_hba *hba) {} static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; } static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {} static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {} +static bool ufshpb_is_legacy(struct ufs_hba *hba) { return false; } #else -void ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); void ufshpb_resume(struct ufs_hba *hba); void ufshpb_suspend(struct ufs_hba *hba); @@ -228,7 +269,9 @@ void ufshpb_remove(struct ufs_hba *hba); bool ufshpb_is_allowed(struct ufs_hba *hba); void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf); void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf); +bool ufshpb_is_legacy(struct ufs_hba *hba); extern struct attribute_group ufs_sysfs_hpb_stat_group; +extern struct attribute_group ufs_sysfs_hpb_param_group; #endif #endif /* End of Header */ From bce9649c9feee55c9f144a0c333b021f64f41f32 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Wed, 2 Sep 2020 12:32:14 +0300 Subject: [PATCH 30/62] FROMLIST: scsi: ufs: Cache HPB Control mode on init We will use it later, when we'll need to differentiate between device and host control modes. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-2-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: Id1eb0408814fd9fda6815c6b19faf4ade106ba05 --- drivers/scsi/ufs/ufshcd.h | 2 ++ drivers/scsi/ufs/ufshpb.c | 8 +++++--- drivers/scsi/ufs/ufshpb.h | 2 ++ 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 1f559256a0f3..e8683aa64b78 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -664,6 +664,7 @@ struct ufs_hba_variant_params { * @hpb_disabled: flag to check if HPB is disabled * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value * @is_legacy: flag to check HPB 1.0 + * @control_mode: either host or device */ struct ufshpb_dev_info { int num_lu; @@ -673,6 +674,7 @@ struct ufshpb_dev_info { bool hpb_disabled; u8 max_hpb_single_cmd; bool is_legacy; + u8 control_mode; }; #endif diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 273e3b6e1ded..d8e5e5fdeed8 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -1610,6 +1610,9 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba, % (hpb->srgn_mem_size / HPB_ENTRY_SIZE); hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE); + + if (hpb_dev_info->control_mode == HPB_HOST_CONTROL) + hpb->is_hcm = true; } static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) @@ -2304,11 +2307,10 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) { struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; int version, ret; - u8 hpb_mode; u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW; - hpb_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; - if (hpb_mode == HPB_HOST_CONTROL) { + hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; + if (hpb_dev_info->control_mode == HPB_HOST_CONTROL) { dev_err(hba->dev, "%s: host control mode is not supported.\n", __func__); hpb_dev_info->hpb_disabled = true; diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index 1e8d6e1d909e..dc168ba08a09 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -228,6 +228,8 @@ struct ufshpb_lu { u32 entries_per_srgn_shift; u32 pages_per_srgn; + bool is_hcm; + struct ufshpb_stats stats; struct ufshpb_params params; From a210fd6f26349ff839de03a9632991e46a7b9870 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Sun, 28 Jun 2020 15:50:28 +0300 Subject: [PATCH 31/62] FROMLIST: scsi: ufs: Add HCM support to rsp_upiu In device control mode, the device may recommend the host to either activate or inactivate a region, and the host should follow. Meaning those are not actually recommendations, but more of instructions. On the contrary, in host control mode, the recommendation protocol is slightly changed: a) The device may only recommend the host to update a subregion of an already-active region. And, b) The device may *not* recommend to inactivate a region. Furthermore, in host control mode, the host may choose not to follow any of the device's recommendations. However, in case of a recommendation to update an active and clean subregion, it is better to follow those recommendation because otherwise the host has no other way to know that some internal relocation took place. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-3-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: I02cb053ae4e7fdadd663f9190c95e5f5a79c0e4b --- drivers/scsi/ufs/ufshpb.c | 34 +++++++++++++++++++++++++++++++++- drivers/scsi/ufs/ufshpb.h | 2 ++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index d8e5e5fdeed8..d2475d4e0694 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -167,6 +167,8 @@ next_srgn: else set_bit_len = cnt; + set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + if (rgn->rgn_state != HPB_RGN_INACTIVE && srgn->srgn_state == HPB_SRGN_VALID) bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, set_bit_len); @@ -236,6 +238,11 @@ next_srgn: return false; } +static inline bool is_rgn_dirty(struct ufshpb_region *rgn) +{ + return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); +} + static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb, struct ufshpb_map_ctx *mctx, int pos, int len, __be64 *ppn_buf) @@ -714,6 +721,7 @@ static void ufshpb_put_map_req(struct ufshpb_lu *hpb, static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, struct ufshpb_subregion *srgn) { + struct ufshpb_region *rgn; u32 num_entries = hpb->entries_per_srgn; if (!srgn->mctx) { @@ -727,6 +735,10 @@ static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, num_entries = hpb->last_srgn_entries; bitmap_zero(srgn->mctx->ppn_dirty, num_entries); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + return 0; } @@ -1239,6 +1251,18 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, srgn_i = be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn); + rgn = hpb->rgn_tbl + rgn_i; + if (hpb->is_hcm && + (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) { + /* + * in host control mode, subregion activation + * recommendations are only allowed to active regions. + * Also, ignore recommendations for dirty regions - the + * host will make decisions concerning those by himself + */ + continue; + } + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "activate(%d) region %d - %d\n", i, rgn_i, srgn_i); @@ -1246,7 +1270,6 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, ufshpb_update_active_info(hpb, rgn_i, srgn_i); spin_unlock(&hpb->rsp_list_lock); - rgn = hpb->rgn_tbl + rgn_i; srgn = rgn->srgn_tbl + srgn_i; /* blocking HPB_READ */ @@ -1257,6 +1280,14 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, hpb->stats.rb_active_cnt++; } + if (hpb->is_hcm) { + /* + * in host control mode the device is not allowed to inactivate + * regions + */ + goto out; + } + for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) { rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]); dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, @@ -1281,6 +1312,7 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, hpb->stats.rb_inactive_cnt++; } +out: dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n", rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt); diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index dc168ba08a09..9ab502f82835 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -121,6 +121,8 @@ struct ufshpb_region { /* below information is used by lru */ struct list_head list_lru_rgn; + unsigned long rgn_flags; +#define RGN_FLAG_DIRTY 0 }; #define for_each_sub_region(rgn, i, srgn) \ From ac898f51d17c06fb7127d06768150e23da008024 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Sat, 27 Mar 2021 14:18:36 +0300 Subject: [PATCH 32/62] FROMLIST: scsi: ufs: Transform set_dirty to iterate_rgn Given a transfer length, set_dirty meticulously runs over all the entries, across subregions and regions if needed. Currently its only use is to mark dirty blocks, but soon HCM may profit from it as well, when managing its read counters. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-4-avri.altman@wdc.com/ Signed-off-by: Avri Altman Reviewed-by: Daejun Park Change-Id: I916f4bf80490e31e5ef797d67647a41a07cefa02 --- drivers/scsi/ufs/ufshpb.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index d2475d4e0694..2310a8ddd40b 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -145,13 +145,14 @@ static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba, return true; } -static void ufshpb_set_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, - int srgn_idx, int srgn_offset, int cnt) +static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx, + int srgn_offset, int cnt, bool set_dirty) { struct ufshpb_region *rgn; struct ufshpb_subregion *srgn; int set_bit_len; int bitmap_len; + unsigned long flags; next_srgn: rgn = hpb->rgn_tbl + rgn_idx; @@ -167,11 +168,14 @@ next_srgn: else set_bit_len = cnt; - set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + if (set_dirty) + set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); - if (rgn->rgn_state != HPB_RGN_INACTIVE && + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (set_dirty && rgn->rgn_state != HPB_RGN_INACTIVE && srgn->srgn_state == HPB_SRGN_VALID) bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, set_bit_len); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); srgn_offset = 0; if (++srgn_idx == hpb->srgns_per_rgn) { @@ -591,10 +595,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) /* If command type is WRITE or DISCARD, set bitmap as drity */ if (ufshpb_is_write_or_discard(cmd)) { - spin_lock_irqsave(&hpb->rgn_state_lock, flags); - ufshpb_set_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, - transfer_len); - spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len, true); return 0; } From a782d4350fc95b03c11e68a2307ee2f414d42fe7 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Tue, 30 Jun 2020 11:07:08 +0300 Subject: [PATCH 33/62] FROMLIST: scsi: ufs: Add region's reads counter In host control mode, reads are the major source of activation trials. Keep track of those reads counters, for both active as well inactive regions. We reset the read counter upon write - we are only interested in "clean" reads. Keep those counters normalized, as we are using those reads as a comparative score, to make various decisions. If during consecutive normalizations an active region has exhaust its reads - inactivate it. while at it, protect the {active,inactive}_count stats by adding them into the applicable handler. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-5-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: I0541c39e3dd7656ca1816cac3599ab73eb8697a8 --- drivers/scsi/ufs/ufshpb.c | 94 ++++++++++++++++++++++++++++++++++++--- drivers/scsi/ufs/ufshpb.h | 9 ++++ 2 files changed, 96 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 2310a8ddd40b..055019d52d5c 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -16,6 +16,8 @@ #include "ufshpb.h" #include "../sd.h" +#define ACTIVATION_THRESHOLD 8 /* 8 IOs */ + /* memory management */ static struct kmem_cache *ufshpb_mctx_cache; static mempool_t *ufshpb_mctx_pool; @@ -26,6 +28,9 @@ static int tot_active_srgn_pages; static struct workqueue_struct *ufshpb_wq; +static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx); + bool ufshpb_is_allowed(struct ufs_hba *hba) { return !(hba->ufshpb_dev.hpb_disabled); @@ -149,7 +154,7 @@ static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx, int srgn_offset, int cnt, bool set_dirty) { struct ufshpb_region *rgn; - struct ufshpb_subregion *srgn; + struct ufshpb_subregion *srgn, *prev_srgn = NULL; int set_bit_len; int bitmap_len; unsigned long flags; @@ -168,15 +173,39 @@ next_srgn: else set_bit_len = cnt; - if (set_dirty) - set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); - spin_lock_irqsave(&hpb->rgn_state_lock, flags); if (set_dirty && rgn->rgn_state != HPB_RGN_INACTIVE && srgn->srgn_state == HPB_SRGN_VALID) bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, set_bit_len); spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + if (hpb->is_hcm && prev_srgn != srgn) { + bool activate = false; + + spin_lock(&rgn->rgn_lock); + if (set_dirty) { + rgn->reads -= srgn->reads; + srgn->reads = 0; + set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + } else { + srgn->reads++; + rgn->reads++; + if (srgn->reads == ACTIVATION_THRESHOLD) + activate = true; + } + spin_unlock(&rgn->rgn_lock); + + if (activate) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_active_info(hpb, rgn_idx, srgn_idx); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "activate region %d-%d\n", rgn_idx, srgn_idx); + } + + prev_srgn = srgn; + } + srgn_offset = 0; if (++srgn_idx == hpb->srgns_per_rgn) { srgn_idx = 0; @@ -605,6 +634,19 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH); + if (hpb->is_hcm) { + /* + * in host control mode, reads are the main source for + * activation trials. + */ + ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len, false); + + /* keep those counters normalized */ + if (rgn->reads > hpb->entries_per_srgn) + schedule_work(&hpb->ufshpb_normalization_work); + } + spin_lock_irqsave(&hpb->rgn_state_lock, flags); if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, transfer_len)) { @@ -757,6 +799,8 @@ static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, if (list_empty(&srgn->list_act_srgn)) list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); + + hpb->stats.rb_active_cnt++; } static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx) @@ -772,6 +816,8 @@ static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx) if (list_empty(&rgn->list_inact_rgn)) list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn); + + hpb->stats.rb_inactive_cnt++; } static void ufshpb_activate_subregion(struct ufshpb_lu *hpb, @@ -1279,7 +1325,6 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, if (srgn->srgn_state == HPB_SRGN_VALID) srgn->srgn_state = HPB_SRGN_INVALID; spin_unlock(&hpb->rgn_state_lock); - hpb->stats.rb_active_cnt++; } if (hpb->is_hcm) { @@ -1310,8 +1355,6 @@ static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, } } spin_unlock(&hpb->rgn_state_lock); - - hpb->stats.rb_inactive_cnt++; } out: @@ -1510,6 +1553,36 @@ static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb) spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); } +static void ufshpb_normalization_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, + ufshpb_normalization_work); + int rgn_idx; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx; + int srgn_idx; + + spin_lock(&rgn->rgn_lock); + rgn->reads = 0; + for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) { + struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx; + + srgn->reads >>= 1; + rgn->reads += srgn->reads; + } + spin_unlock(&rgn->rgn_lock); + + if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads) + continue; + + /* if region is active but has no reads - inactivate it */ + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); + spin_unlock(&hpb->rsp_list_lock); + } +} + static void ufshpb_map_work_handler(struct work_struct *work) { struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work); @@ -1669,6 +1742,8 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) rgn = rgn_table + rgn_idx; rgn->rgn_idx = rgn_idx; + spin_lock_init(&rgn->rgn_lock); + INIT_LIST_HEAD(&rgn->list_inact_rgn); INIT_LIST_HEAD(&rgn->list_lru_rgn); @@ -1911,6 +1986,9 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) INIT_LIST_HEAD(&hpb->list_hpb_lu); INIT_WORK(&hpb->map_work, ufshpb_map_work_handler); + if (hpb->is_hcm) + INIT_WORK(&hpb->ufshpb_normalization_work, + ufshpb_normalization_work_handler); hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", sizeof(struct ufshpb_req), 0, 0, NULL); @@ -2010,6 +2088,8 @@ static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb) static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) { + if (hpb->is_hcm) + cancel_work_sync(&hpb->ufshpb_normalization_work); cancel_work_sync(&hpb->map_work); } diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index 9ab502f82835..33d163e76d41 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -106,6 +106,10 @@ struct ufshpb_subregion { int rgn_idx; int srgn_idx; bool is_last; + + /* subregion reads - for host mode */ + unsigned int reads; + /* below information is used by rsp_list */ struct list_head list_act_srgn; }; @@ -123,6 +127,10 @@ struct ufshpb_region { struct list_head list_lru_rgn; unsigned long rgn_flags; #define RGN_FLAG_DIRTY 0 + + /* region reads - for host mode */ + spinlock_t rgn_lock; + unsigned int reads; }; #define for_each_sub_region(rgn, i, srgn) \ @@ -212,6 +220,7 @@ struct ufshpb_lu { /* for selecting victim */ struct victim_select_info lru_info; + struct work_struct ufshpb_normalization_work; /* pinned region information */ u32 lu_pinned_start; From d5b978446c1670fe956e05b5de217a027d5178a5 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Tue, 30 Jun 2020 13:13:02 +0300 Subject: [PATCH 34/62] FROMLIST: scsi: ufs: Eviction in HCM In host mode, eviction is considered an extreme measure. verify that the entering region has enough reads, and the exiting region has much less reads. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-6-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: Ia08e3af69302c4f0474efa7c616832dde48df4e0 --- drivers/scsi/ufs/ufshpb.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 055019d52d5c..09db4b91ad36 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -17,6 +17,7 @@ #include "../sd.h" #define ACTIVATION_THRESHOLD 8 /* 8 IOs */ +#define EVICTION_THRESHOLD (ACTIVATION_THRESHOLD << 5) /* 256 IOs */ /* memory management */ static struct kmem_cache *ufshpb_mctx_cache; @@ -1057,6 +1058,13 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) if (ufshpb_check_srgns_issue_state(hpb, rgn)) continue; + /* + * in host control mode, verify that the exiting region + * has less reads + */ + if (hpb->is_hcm && rgn->reads > (EVICTION_THRESHOLD >> 1)) + continue; + victim_rgn = rgn; break; } @@ -1223,7 +1231,7 @@ unlock_out: static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) { - struct ufshpb_region *victim_rgn; + struct ufshpb_region *victim_rgn = NULL; struct victim_select_info *lru_info = &hpb->lru_info; unsigned long flags; int ret = 0; @@ -1250,7 +1258,15 @@ static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) * It is okay to evict the least recently used region, * because the device could detect this region * by not issuing HPB_READ + * + * in host control mode, verify that the entering + * region has enough reads */ + if (hpb->is_hcm && rgn->reads < EVICTION_THRESHOLD) { + ret = -EACCES; + goto out; + } + victim_rgn = ufshpb_victim_lru_info(hpb); if (!victim_rgn) { dev_warn(&hpb->sdev_ufs_lu->sdev_dev, From dbf4aa202ca8848e21cc5961068d2df7e7afcc41 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Tue, 30 Jun 2020 15:14:31 +0300 Subject: [PATCH 35/62] FROMLIST: scsi: ufs: Region inactivation in HCM In host mode, the host is expected to send HPB-WRITE-BUFFER with buffer-id = 0x1 when it inactivates a region. Use the map-requests pool as there is no point in assigning a designated cache for umap-requests. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-7-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: I1a6696b38d4abfb4d9fbe44e84016a6238825125 --- drivers/scsi/ufs/ufshpb.c | 47 +++++++++++++++++++++++++++++++++------ drivers/scsi/ufs/ufshpb.h | 1 + 2 files changed, 41 insertions(+), 7 deletions(-) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 09db4b91ad36..9a4cb8dfaaca 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -693,7 +693,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) } static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, - int rgn_idx, enum req_opf dir) + int rgn_idx, enum req_opf dir, + bool atomic) { struct ufshpb_req *rq; struct request *req; @@ -707,7 +708,7 @@ retry: req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir, BLK_MQ_REQ_NOWAIT); - if ((PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { + if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { usleep_range(3000, 3100); goto retry; } @@ -738,7 +739,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, struct ufshpb_req *map_req; struct bio *bio; - map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN); + map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN, false); if (!map_req) return NULL; @@ -915,6 +916,8 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb, rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; blk_execute_rq_nowait(req->q, NULL, req, 1, ufshpb_umap_req_compl_fn); + + hpb->stats.umap_req_cnt++; } static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, @@ -1091,12 +1094,13 @@ static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, } static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, - struct ufshpb_region *rgn) + struct ufshpb_region *rgn, + bool atomic) { struct ufshpb_req *umap_req; int rgn_idx = rgn ? rgn->rgn_idx : 0; - umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT); + umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_SCSI_OUT, atomic); if (!umap_req) return -ENOMEM; @@ -1105,13 +1109,19 @@ static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, return 0; } +static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + return ufshpb_issue_umap_req(hpb, rgn, true); +} + static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb) { - return ufshpb_issue_umap_req(hpb, NULL); + return ufshpb_issue_umap_req(hpb, NULL, false); } static void __ufshpb_evict_region(struct ufshpb_lu *hpb, - struct ufshpb_region *rgn) + struct ufshpb_region *rgn) { struct victim_select_info *lru_info; struct ufshpb_subregion *srgn; @@ -1145,6 +1155,14 @@ static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) goto out; } + if (hpb->is_hcm) { + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + ret = ufshpb_issue_umap_single_req(hpb, rgn); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (ret) + goto out; + } + __ufshpb_evict_region(hpb, rgn); } out: @@ -1279,6 +1297,18 @@ static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) "LRU full (%d), choose victim %d\n", atomic_read(&lru_info->active_cnt), victim_rgn->rgn_idx); + + if (hpb->is_hcm) { + spin_unlock_irqrestore(&hpb->rgn_state_lock, + flags); + ret = ufshpb_issue_umap_single_req(hpb, + victim_rgn); + spin_lock_irqsave(&hpb->rgn_state_lock, + flags); + if (ret) + goto out; + } + __ufshpb_evict_region(hpb, victim_rgn); } @@ -1849,6 +1879,7 @@ ufshpb_sysfs_attr_show_func(rb_noti_cnt); ufshpb_sysfs_attr_show_func(rb_active_cnt); ufshpb_sysfs_attr_show_func(rb_inactive_cnt); ufshpb_sysfs_attr_show_func(map_req_cnt); +ufshpb_sysfs_attr_show_func(umap_req_cnt); static struct attribute *hpb_dev_stat_attrs[] = { &dev_attr_hit_cnt.attr, @@ -1857,6 +1888,7 @@ static struct attribute *hpb_dev_stat_attrs[] = { &dev_attr_rb_active_cnt.attr, &dev_attr_rb_inactive_cnt.attr, &dev_attr_map_req_cnt.attr, + &dev_attr_umap_req_cnt.attr, NULL, }; @@ -1982,6 +2014,7 @@ static void ufshpb_stat_init(struct ufshpb_lu *hpb) hpb->stats.rb_active_cnt = 0; hpb->stats.rb_inactive_cnt = 0; hpb->stats.map_req_cnt = 0; + hpb->stats.umap_req_cnt = 0; } static void ufshpb_param_init(struct ufshpb_lu *hpb) diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index 33d163e76d41..0204e4fec6bc 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -191,6 +191,7 @@ struct ufshpb_stats { u64 rb_inactive_cnt; u64 map_req_cnt; u64 pre_req_cnt; + u64 umap_req_cnt; }; struct ufshpb_lu { From 992cbc0e10f3d37667d432d616c0fd68a7735d59 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Wed, 3 Jun 2020 18:01:07 +0300 Subject: [PATCH 36/62] FROMLIST: scsi: ufs: Add hpb dev reset response The spec does not define what is the host's recommended response when the device send hpb dev reset response (oper 0x2). We will update all active hpb regions: mark them and do that on the next read. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-8-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: Ibe87969a4130b4e77f5d163771648679bc5ac7e8 --- drivers/scsi/ufs/ufshpb.c | 32 +++++++++++++++++++++++++++++++- drivers/scsi/ufs/ufshpb.h | 1 + 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 9a4cb8dfaaca..95ad543edaf4 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -196,7 +196,8 @@ next_srgn: } spin_unlock(&rgn->rgn_lock); - if (activate) { + if (activate || + test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) { spin_lock_irqsave(&hpb->rsp_list_lock, flags); ufshpb_update_active_info(hpb, rgn_idx, srgn_idx); spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); @@ -1411,6 +1412,21 @@ out: queue_work(ufshpb_wq, &hpb->map_work); } +static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb) +{ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn; + unsigned long flags; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) + set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags); + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); +} + + /* * This function will parse recommended active subregion information in sense * data field of response UPIU with SAM_STAT_GOOD state. @@ -1485,6 +1501,18 @@ void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) case HPB_RSP_DEV_RESET: dev_warn(&hpb->sdev_ufs_lu->sdev_dev, "UFS device lost HPB information during PM.\n"); + + if (hpb->is_hcm) { + struct scsi_device *sdev; + + __shost_for_each_device(sdev, hba->host) { + struct ufshpb_lu *h = sdev->hostdata; + + if (h) + ufshpb_dev_reset_handler(h); + } + } + break; default: dev_notice(&hpb->sdev_ufs_lu->sdev_dev, @@ -1811,6 +1839,8 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) } else { rgn->rgn_state = HPB_RGN_INACTIVE; } + + rgn->rgn_flags = 0; } return 0; diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index 0204e4fec6bc..43a95c670763 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -127,6 +127,7 @@ struct ufshpb_region { struct list_head list_lru_rgn; unsigned long rgn_flags; #define RGN_FLAG_DIRTY 0 +#define RGN_FLAG_UPDATE 1 /* region reads - for host mode */ spinlock_t rgn_lock; From 76aa39a9f2d88e9ac124b4d0e8c828bafa5d67e7 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Thu, 4 Jun 2020 15:20:33 +0300 Subject: [PATCH 37/62] FROMLIST: scsi: ufs: Add "Cold" regions timer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order not to hang on to “cold” regions, we shall inactivate a region that has no READ access for a predefined amount of time - READ_TO_MS. For that purpose we shall monitor the active regions list, polling it on every POLLING_INTERVAL_MS. On timeout expiry we shall add the region to the "to-be-inactivated" list, unless it is clean and did not exhaust its READ_TO_EXPIRIES - another parameter. All this does not apply to pinned regions. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-9-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: I2d2efbbc612ccec6ef7036cc1e1d31bd8bfd4174 --- drivers/scsi/ufs/ufshpb.c | 73 +++++++++++++++++++++++++++++++++++++-- drivers/scsi/ufs/ufshpb.h | 8 +++++ 2 files changed, 79 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 95ad543edaf4..af1aa7790df1 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -18,6 +18,9 @@ #define ACTIVATION_THRESHOLD 8 /* 8 IOs */ #define EVICTION_THRESHOLD (ACTIVATION_THRESHOLD << 5) /* 256 IOs */ +#define READ_TO_MS 1000 +#define READ_TO_EXPIRIES 100 +#define POLLING_INTERVAL_MS 200 /* memory management */ static struct kmem_cache *ufshpb_mctx_cache; @@ -1033,12 +1036,63 @@ static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb, return 0; } +static void ufshpb_read_to_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, + ufshpb_read_to_work.work); + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn, *next_rgn; + unsigned long flags; + LIST_HEAD(expired_list); + + if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits)) + return; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn, + list_lru_rgn) { + bool timedout = ktime_after(ktime_get(), rgn->read_timeout); + + if (timedout) { + rgn->read_timeout_expiries--; + if (is_rgn_dirty(rgn) || + rgn->read_timeout_expiries == 0) + list_add(&rgn->list_expired_rgn, &expired_list); + else + rgn->read_timeout = ktime_add_ms(ktime_get(), + READ_TO_MS); + } + } + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + list_for_each_entry_safe(rgn, next_rgn, &expired_list, + list_expired_rgn) { + list_del_init(&rgn->list_expired_rgn); + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + ufshpb_kick_map_work(hpb); + + clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits); + + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(POLLING_INTERVAL_MS)); +} + static void ufshpb_add_lru_info(struct victim_select_info *lru_info, struct ufshpb_region *rgn) { rgn->rgn_state = HPB_RGN_ACTIVE; list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); atomic_inc(&lru_info->active_cnt); + if (rgn->hpb->is_hcm) { + rgn->read_timeout = ktime_add_ms(ktime_get(), READ_TO_MS); + rgn->read_timeout_expiries = READ_TO_EXPIRIES; + } } static void ufshpb_hit_lru_info(struct victim_select_info *lru_info, @@ -1820,6 +1874,7 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) INIT_LIST_HEAD(&rgn->list_inact_rgn); INIT_LIST_HEAD(&rgn->list_lru_rgn); + INIT_LIST_HEAD(&rgn->list_expired_rgn); if (rgn_idx == hpb->rgns_per_lu - 1) { srgn_cnt = ((hpb->srgns_per_lu - 1) % @@ -1841,6 +1896,7 @@ static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) } rgn->rgn_flags = 0; + rgn->hpb = hpb; } return 0; @@ -2065,9 +2121,12 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) INIT_LIST_HEAD(&hpb->list_hpb_lu); INIT_WORK(&hpb->map_work, ufshpb_map_work_handler); - if (hpb->is_hcm) + if (hpb->is_hcm) { INIT_WORK(&hpb->ufshpb_normalization_work, ufshpb_normalization_work_handler); + INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work, + ufshpb_read_to_handler); + } hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", sizeof(struct ufshpb_req), 0, 0, NULL); @@ -2101,6 +2160,10 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) ufshpb_stat_init(hpb); ufshpb_param_init(hpb); + if (hpb->is_hcm) + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(POLLING_INTERVAL_MS)); + return 0; release_pre_req_mempool: @@ -2167,8 +2230,10 @@ static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb) static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) { - if (hpb->is_hcm) + if (hpb->is_hcm) { + cancel_delayed_work_sync(&hpb->ufshpb_read_to_work); cancel_work_sync(&hpb->ufshpb_normalization_work); + } cancel_work_sync(&hpb->map_work); } @@ -2276,6 +2341,10 @@ void ufshpb_resume(struct ufs_hba *hba) continue; ufshpb_set_state(hpb, HPB_PRESENT); ufshpb_kick_map_work(hpb); + if (hpb->is_hcm) + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(POLLING_INTERVAL_MS)); + } } diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index 43a95c670763..8309b59c7819 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -115,6 +115,7 @@ struct ufshpb_subregion { }; struct ufshpb_region { + struct ufshpb_lu *hpb; struct ufshpb_subregion *srgn_tbl; enum HPB_RGN_STATE rgn_state; int rgn_idx; @@ -132,6 +133,10 @@ struct ufshpb_region { /* region reads - for host mode */ spinlock_t rgn_lock; unsigned int reads; + /* region "cold" timer - for host mode */ + ktime_t read_timeout; + unsigned int read_timeout_expiries; + struct list_head list_expired_rgn; }; #define for_each_sub_region(rgn, i, srgn) \ @@ -223,6 +228,9 @@ struct ufshpb_lu { /* for selecting victim */ struct victim_select_info lru_info; struct work_struct ufshpb_normalization_work; + struct delayed_work ufshpb_read_to_work; + unsigned long work_data_bits; +#define TIMEOUT_WORK_RUNNING 0 /* pinned region information */ u32 lu_pinned_start; From 5e463110fb9369e09dedc5400260aaeda8a82842 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Mon, 1 Mar 2021 11:41:34 +0200 Subject: [PATCH 38/62] FROMLIST: scsi: ufs: Limit the number of inflight rb in host control mode the host is the originator of map requests. To not flood the device with map requests, use a simple throttling mechanism that limits the number of inflight map requests. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-10-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: I75a5ced3be60569adcd75befa17d8a6340c147fd --- drivers/scsi/ufs/ufshpb.c | 11 +++++++++++ drivers/scsi/ufs/ufshpb.h | 1 + 2 files changed, 12 insertions(+) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index af1aa7790df1..53e3f77c87e6 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -21,6 +21,7 @@ #define READ_TO_MS 1000 #define READ_TO_EXPIRIES 100 #define POLLING_INTERVAL_MS 200 +#define THROTTLE_MAP_REQ_DEFAULT 1 /* memory management */ static struct kmem_cache *ufshpb_mctx_cache; @@ -743,6 +744,14 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, struct ufshpb_req *map_req; struct bio *bio; + if (hpb->is_hcm && + hpb->num_inflight_map_req >= THROTTLE_MAP_REQ_DEFAULT) { + dev_info(&hpb->sdev_ufs_lu->sdev_dev, + "map_req throttle. inflight %d throttle %d", + hpb->num_inflight_map_req, THROTTLE_MAP_REQ_DEFAULT); + return NULL; + } + map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_SCSI_IN, false); if (!map_req) return NULL; @@ -757,6 +766,7 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, map_req->rb.srgn_idx = srgn->srgn_idx; map_req->rb.mctx = srgn->mctx; + hpb->num_inflight_map_req++; return map_req; } @@ -766,6 +776,7 @@ static void ufshpb_put_map_req(struct ufshpb_lu *hpb, { bio_put(map_req->bio); ufshpb_put_req(hpb, map_req); + hpb->num_inflight_map_req--; } static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index 8309b59c7819..edf565e9036f 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -217,6 +217,7 @@ struct ufshpb_lu { struct ufshpb_req *pre_req; int num_inflight_pre_req; int throttle_pre_req; + int num_inflight_map_req; struct list_head lh_pre_req_free; int cur_read_id; int pre_req_min_tr_len; From dd41ce55815ae30eef561455c3c9783defa0fb7b Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Wed, 14 Apr 2021 12:10:35 +0300 Subject: [PATCH 39/62] FROMLIST: scsi: ufs: Do not send umap_all in host control mode HPB-WRITE-BUFFER with buffer-id = 0x3h is supported in device control mode only. Bug: 183467926 Bug: 170940265 Bug: 183454255 link: https://lore.kernel.org/lkml/20210607061401.58884-11-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: I719f8bb876270d960aa92ddc53f29c9fc863fda6 --- drivers/scsi/ufs/ufshpb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 53e3f77c87e6..cc81b8095c59 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -2460,7 +2460,8 @@ static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba) ufshpb_set_state(hpb, HPB_PRESENT); if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0) queue_work(ufshpb_wq, &hpb->map_work); - ufshpb_issue_umap_all_req(hpb); + if (!hpb->is_hcm) + ufshpb_issue_umap_all_req(hpb); } else { dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun); ufshpb_destroy_lu(hba, sdev); From fbf68bf104d77dc0958e63bd66750d0f7f70de46 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Wed, 2 Sep 2020 17:48:18 +0300 Subject: [PATCH 40/62] FROMLIST: scsi: ufs: Add support for HCM Support devices that report they are using host control mode. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-12-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: I7cb80024255626feacfe4c344bbcc17727291a26 --- drivers/scsi/ufs/ufshpb.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index cc81b8095c59..555b7717eda3 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -2584,12 +2584,6 @@ void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW; hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; - if (hpb_dev_info->control_mode == HPB_HOST_CONTROL) { - dev_err(hba->dev, "%s: host control mode is not supported.\n", - __func__); - hpb_dev_info->hpb_disabled = true; - return; - } version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER); if ((version != HPB_SUPPORT_VERSION) && From d6a486a1940c69748b42c8a3000b46e229174cb0 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Mon, 1 Feb 2021 11:52:12 +0200 Subject: [PATCH 41/62] FROMLIST: scsi: ufs: Make HCM parameter configurable We can make use of this commit, to elaborate some more of the host control mode logic, explaining what role play each and every variable. While at it, allow those parameters to be configurable. Bug: 183467926 Bug: 170940265 Bug: 183454255 Link: https://lore.kernel.org/lkml/20210607061401.58884-13-avri.altman@wdc.com/ Signed-off-by: Avri Altman Change-Id: Ib05c6643c69504b8d9442b0024cfe1b0b687a4ce --- Documentation/ABI/testing/sysfs-driver-ufs | 76 +++++- drivers/scsi/ufs/ufshpb.c | 289 +++++++++++++++++++-- drivers/scsi/ufs/ufshpb.h | 20 ++ 3 files changed, 368 insertions(+), 17 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs index 168023f9a240..f276b38287f8 100644 --- a/Documentation/ABI/testing/sysfs-driver-ufs +++ b/Documentation/ABI/testing/sysfs-driver-ufs @@ -1430,7 +1430,7 @@ Description: This entry shows the maximum HPB data size for using single HPB The file is read only. -What: /sys/bus/platform/drivers/ufshcd/*/flags/wb_enable +What: /sys/bus/platform/drivers/ufshcd/*/flags/hpb_enable Date: June 2021 Contact: Daejun Park Description: This entry shows the status of HPB. @@ -1441,3 +1441,77 @@ Description: This entry shows the status of HPB. == ============================ The file is read only. + +What: /sys/class/scsi_device/*/device/hpb_param_sysfs/activation_thld +Date: February 2021 +Contact: Avri Altman +Description: In host control mode, reads are the major source of activation + trials. once this threshold hs met, the region is added to the + "to-be-activated" list. Since we reset the read counter upon + write, this include sending a rb command updating the region + ppn as well. + +What: /sys/class/scsi_device/*/device/hpb_param_sysfs/normalization_factor +Date: February 2021 +Contact: Avri Altman +Description: In host control mode, We think of the regions as "buckets". + Those buckets are being filled with reads, and emptied on write. + We use entries_per_srgn - the amount of blocks in a subregion as + our bucket size. This applies because HPB1.0 only concern a + single-block reads. Once the bucket size is crossed, we trigger + a normalization work - not only to avoid overflow, but mainly + because we want to keep those counters normalized, as we are + using those reads as a comparative score, to make various decisions. + The normalization is dividing (shift right) the read counter by + the normalization_factor. If during consecutive normalizations + an active region has exhaust its reads - inactivate it. + +What: /sys/class/scsi_device/*/device/hpb_param_sysfs/eviction_thld_enter +Date: February 2021 +Contact: Avri Altman +Description: Region deactivation is often due to the fact that eviction took + place: a region become active on the expense of another. This is + happening when the max-active-regions limit has crossed. + In host mode, eviction is considered an extreme measure. We + want to verify that the entering region has enough reads, and + the exiting region has much less reads. eviction_thld_enter is + the min reads that a region must have in order to be considered + as a candidate to evict other region. + +What: /sys/class/scsi_device/*/device/hpb_param_sysfs/eviction_thld_exit +Date: February 2021 +Contact: Avri Altman +Description: same as above for the exiting region. A region is consider to + be a candidate to be evicted, only if it has less reads than + eviction_thld_exit. + +What: /sys/class/scsi_device/*/device/hpb_param_sysfs/read_timeout_ms +Date: February 2021 +Contact: Avri Altman +Description: In order not to hang on to “cold” regions, we shall inactivate + a region that has no READ access for a predefined amount of + time - read_timeout_ms. If read_timeout_ms has expired, and the + region is dirty - it is less likely that we can make any use of + HPB-READing it. So we inactivate it. Still, deactivation has + its overhead, and we may still benefit from HPB-READing this + region if it is clean - see read_timeout_expiries. + +What: /sys/class/scsi_device/*/device/hpb_param_sysfs/read_timeout_expiries +Date: February 2021 +Contact: Avri Altman +Description: if the region read timeout has expired, but the region is clean, + just re-wind its timer for another spin. Do that as long as it + is clean and did not exhaust its read_timeout_expiries threshold. + +What: /sys/class/scsi_device/*/device/hpb_param_sysfs/timeout_polling_interval_ms +Date: February 2021 +Contact: Avri Altman +Description: the frequency in which the delayed worker that checks the + read_timeouts is awaken. + +What: /sys/class/scsi_device/*/device/hpb_param_sysfs/inflight_map_req +Date: February 2021 +Contact: Avri Altman +Description: in host control mode the host is the originator of map requests. + To not flood the device with map requests, use a simple throttling + mechanism that limits the number of inflight map requests. diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c index 555b7717eda3..cb4a34ae5be1 100644 --- a/drivers/scsi/ufs/ufshpb.c +++ b/drivers/scsi/ufs/ufshpb.c @@ -17,7 +17,6 @@ #include "../sd.h" #define ACTIVATION_THRESHOLD 8 /* 8 IOs */ -#define EVICTION_THRESHOLD (ACTIVATION_THRESHOLD << 5) /* 256 IOs */ #define READ_TO_MS 1000 #define READ_TO_EXPIRIES 100 #define POLLING_INTERVAL_MS 200 @@ -195,7 +194,7 @@ next_srgn: } else { srgn->reads++; rgn->reads++; - if (srgn->reads == ACTIVATION_THRESHOLD) + if (srgn->reads == hpb->params.activation_thld) activate = true; } spin_unlock(&rgn->rgn_lock); @@ -745,10 +744,11 @@ static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, struct bio *bio; if (hpb->is_hcm && - hpb->num_inflight_map_req >= THROTTLE_MAP_REQ_DEFAULT) { + hpb->num_inflight_map_req >= hpb->params.inflight_map_req) { dev_info(&hpb->sdev_ufs_lu->sdev_dev, "map_req throttle. inflight %d throttle %d", - hpb->num_inflight_map_req, THROTTLE_MAP_REQ_DEFAULT); + hpb->num_inflight_map_req, + hpb->params.inflight_map_req); return NULL; } @@ -1054,6 +1054,7 @@ static void ufshpb_read_to_handler(struct work_struct *work) struct victim_select_info *lru_info = &hpb->lru_info; struct ufshpb_region *rgn, *next_rgn; unsigned long flags; + unsigned int poll; LIST_HEAD(expired_list); if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits)) @@ -1072,7 +1073,7 @@ static void ufshpb_read_to_handler(struct work_struct *work) list_add(&rgn->list_expired_rgn, &expired_list); else rgn->read_timeout = ktime_add_ms(ktime_get(), - READ_TO_MS); + hpb->params.read_timeout_ms); } } @@ -1090,8 +1091,9 @@ static void ufshpb_read_to_handler(struct work_struct *work) clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits); + poll = hpb->params.timeout_polling_interval_ms; schedule_delayed_work(&hpb->ufshpb_read_to_work, - msecs_to_jiffies(POLLING_INTERVAL_MS)); + msecs_to_jiffies(poll)); } static void ufshpb_add_lru_info(struct victim_select_info *lru_info, @@ -1101,8 +1103,11 @@ static void ufshpb_add_lru_info(struct victim_select_info *lru_info, list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); atomic_inc(&lru_info->active_cnt); if (rgn->hpb->is_hcm) { - rgn->read_timeout = ktime_add_ms(ktime_get(), READ_TO_MS); - rgn->read_timeout_expiries = READ_TO_EXPIRIES; + rgn->read_timeout = + ktime_add_ms(ktime_get(), + rgn->hpb->params.read_timeout_ms); + rgn->read_timeout_expiries = + rgn->hpb->params.read_timeout_expiries; } } @@ -1131,7 +1136,8 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) * in host control mode, verify that the exiting region * has less reads */ - if (hpb->is_hcm && rgn->reads > (EVICTION_THRESHOLD >> 1)) + if (hpb->is_hcm && + rgn->reads > hpb->params.eviction_thld_exit) continue; victim_rgn = rgn; @@ -1346,7 +1352,8 @@ static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) * in host control mode, verify that the entering * region has enough reads */ - if (hpb->is_hcm && rgn->reads < EVICTION_THRESHOLD) { + if (hpb->is_hcm && + rgn->reads < hpb->params.eviction_thld_enter) { ret = -EACCES; goto out; } @@ -1697,6 +1704,7 @@ static void ufshpb_normalization_work_handler(struct work_struct *work) struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, ufshpb_normalization_work); int rgn_idx; + u8 factor = hpb->params.normalization_factor; for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx; @@ -1707,7 +1715,7 @@ static void ufshpb_normalization_work_handler(struct work_struct *work) for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) { struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx; - srgn->reads >>= 1; + srgn->reads >>= factor; rgn->reads += srgn->reads; } spin_unlock(&rgn->rgn_lock); @@ -2032,8 +2040,248 @@ requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RW(requeue_timeout_ms); +ufshpb_sysfs_param_show_func(activation_thld); +static ssize_t +activation_thld_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0) + return -EINVAL; + + hpb->params.activation_thld = val; + + return count; +} +static DEVICE_ATTR_RW(activation_thld); + +ufshpb_sysfs_param_show_func(normalization_factor); +static ssize_t +normalization_factor_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0 || val > ilog2(hpb->entries_per_srgn)) + return -EINVAL; + + hpb->params.normalization_factor = val; + + return count; +} +static DEVICE_ATTR_RW(normalization_factor); + +ufshpb_sysfs_param_show_func(eviction_thld_enter); +static ssize_t +eviction_thld_enter_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= hpb->params.eviction_thld_exit) + return -EINVAL; + + hpb->params.eviction_thld_enter = val; + + return count; +} +static DEVICE_ATTR_RW(eviction_thld_enter); + +ufshpb_sysfs_param_show_func(eviction_thld_exit); +static ssize_t +eviction_thld_exit_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= hpb->params.activation_thld) + return -EINVAL; + + hpb->params.eviction_thld_exit = val; + + return count; +} +static DEVICE_ATTR_RW(eviction_thld_exit); + +ufshpb_sysfs_param_show_func(read_timeout_ms); +static ssize_t +read_timeout_ms_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + /* read_timeout >> timeout_polling_interval */ + if (val < hpb->params.timeout_polling_interval_ms * 2) + return -EINVAL; + + hpb->params.read_timeout_ms = val; + + return count; +} +static DEVICE_ATTR_RW(read_timeout_ms); + +ufshpb_sysfs_param_show_func(read_timeout_expiries); +static ssize_t +read_timeout_expiries_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0) + return -EINVAL; + + hpb->params.read_timeout_expiries = val; + + return count; +} +static DEVICE_ATTR_RW(read_timeout_expiries); + +ufshpb_sysfs_param_show_func(timeout_polling_interval_ms); +static ssize_t +timeout_polling_interval_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + /* timeout_polling_interval << read_timeout */ + if (val <= 0 || val > hpb->params.read_timeout_ms / 2) + return -EINVAL; + + hpb->params.timeout_polling_interval_ms = val; + + return count; +} +static DEVICE_ATTR_RW(timeout_polling_interval_ms); + +ufshpb_sysfs_param_show_func(inflight_map_req); +static ssize_t inflight_map_req_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1) + return -EINVAL; + + hpb->params.inflight_map_req = val; + + return count; +} +static DEVICE_ATTR_RW(inflight_map_req); + + +static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb) +{ + hpb->params.activation_thld = ACTIVATION_THRESHOLD; + hpb->params.normalization_factor = 1; + hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5); + hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4); + hpb->params.read_timeout_ms = READ_TO_MS; + hpb->params.read_timeout_expiries = READ_TO_EXPIRIES; + hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS; + hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT; +} + static struct attribute *hpb_dev_param_attrs[] = { &dev_attr_requeue_timeout_ms.attr, + &dev_attr_activation_thld.attr, + &dev_attr_normalization_factor.attr, + &dev_attr_eviction_thld_enter.attr, + &dev_attr_eviction_thld_exit.attr, + &dev_attr_read_timeout_ms.attr, + &dev_attr_read_timeout_expiries.attr, + &dev_attr_timeout_polling_interval_ms.attr, + &dev_attr_inflight_map_req.attr, NULL, }; @@ -2117,6 +2365,8 @@ static void ufshpb_stat_init(struct ufshpb_lu *hpb) static void ufshpb_param_init(struct ufshpb_lu *hpb) { hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS; + if (hpb->is_hcm) + ufshpb_hcm_param_init(hpb); } static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) @@ -2171,9 +2421,13 @@ static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) ufshpb_stat_init(hpb); ufshpb_param_init(hpb); - if (hpb->is_hcm) + if (hpb->is_hcm) { + unsigned int poll; + + poll = hpb->params.timeout_polling_interval_ms; schedule_delayed_work(&hpb->ufshpb_read_to_work, - msecs_to_jiffies(POLLING_INTERVAL_MS)); + msecs_to_jiffies(poll)); + } return 0; @@ -2352,10 +2606,13 @@ void ufshpb_resume(struct ufs_hba *hba) continue; ufshpb_set_state(hpb, HPB_PRESENT); ufshpb_kick_map_work(hpb); - if (hpb->is_hcm) - schedule_delayed_work(&hpb->ufshpb_read_to_work, - msecs_to_jiffies(POLLING_INTERVAL_MS)); + if (hpb->is_hcm) { + unsigned int poll = + hpb->params.timeout_polling_interval_ms; + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(poll)); + } } } diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h index edf565e9036f..c74a6c35a446 100644 --- a/drivers/scsi/ufs/ufshpb.h +++ b/drivers/scsi/ufs/ufshpb.h @@ -185,8 +185,28 @@ struct victim_select_info { atomic_t active_cnt; }; +/** + * ufshpb_params - ufs hpb parameters + * @requeue_timeout_ms - requeue threshold of wb command (0x2) + * @activation_thld - min reads [IOs] to activate/update a region + * @normalization_factor - shift right the region's reads + * @eviction_thld_enter - min reads [IOs] for the entering region in eviction + * @eviction_thld_exit - max reads [IOs] for the exiting region in eviction + * @read_timeout_ms - timeout [ms] from the last read IO to the region + * @read_timeout_expiries - amount of allowable timeout expireis + * @timeout_polling_interval_ms - frequency in which timeouts are checked + * @inflight_map_req - number of inflight map requests + */ struct ufshpb_params { unsigned int requeue_timeout_ms; + unsigned int activation_thld; + unsigned int normalization_factor; + unsigned int eviction_thld_enter; + unsigned int eviction_thld_exit; + unsigned int read_timeout_ms; + unsigned int read_timeout_expiries; + unsigned int timeout_polling_interval_ms; + unsigned int inflight_map_req; }; struct ufshpb_stats { From b971e8ab3251885d69f5e3ab643e6511bff6ddf5 Mon Sep 17 00:00:00 2001 From: Roman Kiryanov Date: Fri, 18 Jun 2021 23:52:29 -0700 Subject: [PATCH 42/62] ANDROID: Add CONFIG_CAN=y to gki_defconfig required for Android Auto. Bug: 190375772 Signed-off-by: Roman Kiryanov Change-Id: I34ec0d68ce3ea1463738a16158adf854eebbb5af --- arch/arm64/configs/gki_defconfig | 1 + arch/x86/configs/gki_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index 4dc06f9c2f37..0727b5d385ad 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -257,6 +257,7 @@ CONFIG_NET_ACT_MIRRED=y CONFIG_NET_ACT_SKBEDIT=y CONFIG_VSOCKETS=y CONFIG_BPF_JIT=y +CONFIG_CAN=y CONFIG_BT=y CONFIG_BT_RFCOMM=y CONFIG_BT_RFCOMM_TTY=y diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index c8bf506d767e..746dff8c2d1f 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -233,6 +233,7 @@ CONFIG_NET_ACT_MIRRED=y CONFIG_NET_ACT_SKBEDIT=y CONFIG_VSOCKETS=y CONFIG_BPF_JIT=y +CONFIG_CAN=y CONFIG_BT=y CONFIG_BT_RFCOMM=y CONFIG_BT_RFCOMM_TTY=y From cebb002b99cf890af81c1dc0ad4e4c607d85f0b3 Mon Sep 17 00:00:00 2001 From: Todd Kjos Date: Wed, 30 Jun 2021 19:44:53 -0700 Subject: [PATCH 43/62] ANDROID: ABI: update virtual device symbols for CAN Need symbols for newly added CAN drivers Bug: 190375772 Signed-off-by: Todd Kjos Change-Id: Ibaa1c0963e2e5efb0cf77e6661a683cb00f095d9 --- android/abi_gki_aarch64_virtual_device | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/android/abi_gki_aarch64_virtual_device b/android/abi_gki_aarch64_virtual_device index cdca090375a8..0914089ac299 100644 --- a/android/abi_gki_aarch64_virtual_device +++ b/android/abi_gki_aarch64_virtual_device @@ -1325,3 +1325,28 @@ _raw_read_unlock _raw_write_lock _raw_write_unlock + +# required by gs_usb.ko + usb_kill_anchored_urbs + alloc_candev_mqs + register_candev + free_candev + can_change_mtu + open_candev + usb_anchor_urb + usb_unanchor_urb + alloc_can_skb + can_get_echo_skb + alloc_can_err_skb + close_candev + can_put_echo_skb + can_free_echo_skb + unregister_candev + +# required by vcan.ko + sock_efree + +# required by slcan.ko + tty_mode_ioctl + tty_hangup + hex_asc_upper From 3a0675c6ca5365dfd42098820815a2ca865b2b4f Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Wed, 26 May 2021 16:52:47 +0800 Subject: [PATCH 44/62] ANDROID: GKI: Add ANDROID_OEM_DATA in struct request_queue Add ANDROID_OEM_DATA for implement of oem gki Bug: 188749221 Change-Id: I96b1c690fda172d0c490e944557a674a37620742 Signed-off-by: Yang Yang --- include/linux/blkdev.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 668e6a732025..dcb7b342f0d3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -588,6 +588,8 @@ struct request_queue { #define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; + + ANDROID_OEM_DATA(1); }; /* Keep blk_queue_flag_name[] in sync with the definitions below */ From 5b388812e812b0824216a1b4e254fb1195665e22 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Thu, 27 May 2021 18:52:36 +0800 Subject: [PATCH 45/62] ANDROID: GKI: Add ANDROID_OEM_DATA in struct blk_mq_ctx Add ANDROID_OEM_DATA for implement of oem gki Bug: 188749221 Change-Id: Ide8378a898de01a34d8ca3c34472844cd4ffa71c Signed-off-by: Yang Yang --- block/blk-mq.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/block/blk-mq.h b/block/blk-mq.h index f792a0920ebb..d533ab59fcfd 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -35,6 +35,8 @@ struct blk_mq_ctx { struct request_queue *queue; struct blk_mq_ctxs *ctxs; struct kobject kobj; + + ANDROID_OEM_DATA_ARRAY(1, 2); } ____cacheline_aligned_in_smp; void blk_mq_exit_queue(struct request_queue *q); From 1f23e139ad237c5172716f351e846a3e4b955388 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Thu, 27 May 2021 18:53:41 +0800 Subject: [PATCH 46/62] ANDROID: GKI: Add ANDROID_OEM_DATA in struct blk_mq_tags Add ANDROID_OEM_DATA for implement of oem gki Bug: 188749221 Change-Id: I1feba2334aa34e3bc46eb9d0217118485405beb4 Signed-off-by: Yang Yang --- block/blk-mq-tag.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index f887988e5ef6..689cd1e9d7ed 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -26,6 +26,8 @@ struct blk_mq_tags { * request pool */ spinlock_t lock; + + ANDROID_OEM_DATA(1); }; extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, From 19316b4889f88088e9ffc94817d36be5639ca348 Mon Sep 17 00:00:00 2001 From: Yang Yang Date: Wed, 16 Jun 2021 18:06:17 +0800 Subject: [PATCH 47/62] ANDROID: GKI: enable CONFIG_BLK_CGROUP_IOCOST Enable CONFIG_BLK_CGROUP_IOCOST to help control IO resources. Bug: 188749221 Change-Id: I611b3ff5929d0a998fa6241967887803636b7588 Signed-off-by: Yang Yang --- arch/arm64/configs/gki_defconfig | 1 + arch/x86/configs/gki_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index 0727b5d385ad..fe79f0148936 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -97,6 +97,7 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SCMVERSION=y +CONFIG_BLK_CGROUP_IOCOST=y CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_IOSCHED_BFQ=y diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index 746dff8c2d1f..286f2c64a111 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -73,6 +73,7 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SCMVERSION=y +CONFIG_BLK_CGROUP_IOCOST=y CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y CONFIG_IOSCHED_BFQ=y From f9fcdaeab7006daef51a87b61801264805a7a729 Mon Sep 17 00:00:00 2001 From: Shaleen Agrawal Date: Wed, 23 Jun 2021 12:50:51 -0700 Subject: [PATCH 48/62] ANDROID: sched: remove regular vendor hooks for 32bit execve As restricted hooks have been introduced, regular vendor hooks are no longer necessary. Bug: 187917024 Change-Id: Ia70e9dd1bd7373e19bdc82e90a2384201076bc0b Signed-off-by: Shaleen Agrawal --- android/abi_gki_aarch64_qcom | 4 ---- drivers/android/vendor_hooks.c | 2 -- include/trace/hooks/sched.h | 8 -------- kernel/sched/core.c | 2 -- 4 files changed, 16 deletions(-) diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index 9ca7f943d6b9..a23291abd129 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -2480,8 +2480,6 @@ __traceiter_android_vh_cpuidle_psci_enter __traceiter_android_vh_cpuidle_psci_exit __traceiter_android_vh_dump_throttled_rt_tasks - __traceiter_android_vh_force_compatible_post - __traceiter_android_vh_force_compatible_pre __traceiter_android_vh_freq_table_limits __traceiter_android_vh_ftrace_dump_buffer __traceiter_android_vh_ftrace_format_check @@ -2589,8 +2587,6 @@ __tracepoint_android_vh_cpuidle_psci_enter __tracepoint_android_vh_cpuidle_psci_exit __tracepoint_android_vh_dump_throttled_rt_tasks - __tracepoint_android_vh_force_compatible_post - __tracepoint_android_vh_force_compatible_pre __tracepoint_android_vh_freq_table_limits __tracepoint_android_vh_ftrace_dump_buffer __tracepoint_android_vh_ftrace_format_check diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index 423bcb7121c6..0a632d52d2a6 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -321,8 +321,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_v4l2subdev_set_fmt); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_v4l2subdev_set_frame_interval); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_scmi_timeout_sync); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_new_ilb); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_force_compatible_pre); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_force_compatible_post); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_add_request); diff --git a/include/trace/hooks/sched.h b/include/trace/hooks/sched.h index 914f5fdc4709..3b7d022c4328 100644 --- a/include/trace/hooks/sched.h +++ b/include/trace/hooks/sched.h @@ -370,14 +370,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_find_new_ilb, TP_PROTO(struct cpumask *nohz_idle_cpus_mask, int *ilb), TP_ARGS(nohz_idle_cpus_mask, ilb), 1); -DECLARE_HOOK(android_vh_force_compatible_pre, - TP_PROTO(void *unused), - TP_ARGS(unused)); - -DECLARE_HOOK(android_vh_force_compatible_post, - TP_PROTO(void *unused), - TP_ARGS(unused)); - DECLARE_RESTRICTED_HOOK(android_rvh_force_compatible_pre, TP_PROTO(void *unused), TP_ARGS(unused), 1); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 37c6ca73b000..36521a857126 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2107,7 +2107,6 @@ void force_compatible_cpus_allowed_ptr(struct task_struct *p) * offlining of the chosen destination CPU, so take the hotplug * lock to ensure that the migration succeeds. */ - trace_android_vh_force_compatible_pre(NULL); trace_android_rvh_force_compatible_pre(NULL); cpus_read_lock(); if (!cpumask_available(new_mask)) @@ -2133,7 +2132,6 @@ out_set_mask: WARN_ON(set_cpus_allowed_ptr(p, override_mask)); out_free_mask: cpus_read_unlock(); - trace_android_vh_force_compatible_post(NULL); trace_android_rvh_force_compatible_post(NULL); free_cpumask_var(new_mask); } From 4b7c035626241d98f5c63e84150b0a54404cb9fa Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Fri, 11 Jun 2021 09:54:42 +0800 Subject: [PATCH 49/62] UPSTREAM: mm: relocate 'write_protect_seq' in struct mm_struct 0day robot reported a 9.2% regression for will-it-scale mmap1 test case[1], caused by commit 57efa1fe5957 ("mm/gup: prevent gup_fast from racing with COW during fork"). Further debug shows the regression is due to that commit changes the offset of hot fields 'mmap_lock' inside structure 'mm_struct', thus some cache alignment changes. From the perf data, the contention for 'mmap_lock' is very severe and takes around 95% cpu cycles, and it is a rw_semaphore struct rw_semaphore { atomic_long_t count; /* 8 bytes */ atomic_long_t owner; /* 8 bytes */ struct optimistic_spin_queue osq; /* spinner MCS lock */ ... Before commit 57efa1fe5957 adds the 'write_protect_seq', it happens to have a very optimal cache alignment layout, as Linus explained: "and before the addition of the 'write_protect_seq' field, the mmap_sem was at offset 120 in 'struct mm_struct'. Which meant that count and owner were in two different cachelines, and then when you have contention and spend time in rwsem_down_write_slowpath(), this is probably *exactly* the kind of layout you want. Because first the rwsem_write_trylock() will do a cmpxchg on the first cacheline (for the optimistic fast-path), and then in the case of contention, rwsem_down_write_slowpath() will just access the second cacheline. Which is probably just optimal for a load that spends a lot of time contended - new waiters touch that first cacheline, and then they queue themselves up on the second cacheline." After the commit, the rw_semaphore is at offset 128, which means the 'count' and 'owner' fields are now in the same cacheline, and causes more cache bouncing. Currently there are 3 "#ifdef CONFIG_XXX" before 'mmap_lock' which will affect its offset: CONFIG_MMU CONFIG_MEMBARRIER CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES The layout above is on 64 bits system with 0day's default kernel config (similar to RHEL-8.3's config), in which all these 3 options are 'y'. And the layout can vary with different kernel configs. Relayouting a structure is usually a double-edged sword, as sometimes it can helps one case, but hurt other cases. For this case, one solution is, as the newly added 'write_protect_seq' is a 4 bytes long seqcount_t (when CONFIG_DEBUG_LOCK_ALLOC=n), placing it into an existing 4 bytes hole in 'mm_struct' will not change other fields' alignment, while restoring the regression. Link: https://lore.kernel.org/lkml/20210525031636.GB7744@xsang-OptiPlex-9020/ [1] Reported-by: kernel test robot Signed-off-by: Feng Tang Reviewed-by: John Hubbard Reviewed-by: Jason Gunthorpe Cc: Peter Xu Signed-off-by: Linus Torvalds Bug: 161946584 (cherry picked from commit 2e3025434a6ba090c85871a1d4080ff784109e1f) Signed-off-by: Greg Kroah-Hartman Change-Id: I9142789c5d57d167e5bb1f450d914bf2111894a2 --- include/linux/mm_types.h | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 50648bc12864..4398d84d2f8b 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -468,13 +468,6 @@ struct mm_struct { */ atomic_t has_pinned; - /** - * @write_protect_seq: Locked when any thread is write - * protecting pages mapped by this mm to enforce a later COW, - * for instance during page table copying for fork(). - */ - seqcount_t write_protect_seq; - #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* PTE page table pages */ #endif @@ -483,6 +476,18 @@ struct mm_struct { spinlock_t page_table_lock; /* Protects page tables and some * counters */ + /* + * With some kernel config, the current mmap_lock's offset + * inside 'mm_struct' is at 0x120, which is very optimal, as + * its two hot fields 'count' and 'owner' sit in 2 different + * cachelines, and when mmap_lock is highly contended, both + * of the 2 fields will be accessed frequently, current layout + * will help to reduce cache bouncing. + * + * So please be careful with adding new fields before + * mmap_lock, which can easily push the 2 fields into one + * cacheline. + */ struct rw_semaphore mmap_lock; struct list_head mmlist; /* List of maybe swapped mm's. These @@ -503,7 +508,15 @@ struct mm_struct { unsigned long stack_vm; /* VM_STACK */ unsigned long def_flags; + /** + * @write_protect_seq: Locked when any thread is write + * protecting pages mapped by this mm to enforce a later COW, + * for instance during page table copying for fork(). + */ + seqcount_t write_protect_seq; + spinlock_t arg_lock; /* protect the below fields */ + unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; From 3ad47a04c95bf75a174f6c86e4edd5920d210050 Mon Sep 17 00:00:00 2001 From: zhang chuang Date: Fri, 18 Jun 2021 13:24:52 +0800 Subject: [PATCH 50/62] ANDROID: Add oem data array for async binder transaction We need to obtain the pid and tid information of the caller in the async binder transaction. So we need to add the pid and tid information in the async binder transaction. Bug: 190413570 Signed-off-by: zhang chuang Change-Id: If67c972aa53196d626ccfeb46b6b61e43ddc57ae --- drivers/android/binder_internal.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 177ffc2918dc..2ddbec09d2f9 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -572,6 +572,7 @@ struct binder_transaction { */ spinlock_t lock; ANDROID_VENDOR_DATA(1); + ANDROID_OEM_DATA_ARRAY(1, 2); }; /** From df80ec7469958eb2c7a95e3211b110610fec1250 Mon Sep 17 00:00:00 2001 From: Rick Yiu Date: Thu, 24 Jun 2021 18:52:06 +0800 Subject: [PATCH 51/62] ANDROID: sched: Add vendor data in struct cfs_rq For vendor specific data in struct cfs_rq. Bug: 188947181 Signed-off-by: Rick Yiu Change-Id: I7c322c6812829c19014426b5721cd1fb0c37a53f --- kernel/sched/sched.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index bcd1b1d9ebb8..e21ffcae3508 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -613,6 +613,8 @@ struct cfs_rq { int throttle_count; struct list_head throttled_list; #endif /* CONFIG_CFS_BANDWIDTH */ + + ANDROID_VENDOR_DATA_ARRAY(1, 16); #endif /* CONFIG_FAIR_GROUP_SCHED */ }; From 019990276b252fe8df496939455034c2c9d79696 Mon Sep 17 00:00:00 2001 From: Siddharth Gupta Date: Fri, 25 Jun 2021 11:58:39 -0700 Subject: [PATCH 52/62] ANDROID: rproc: Add vendor hook for recovery Add vendor hook for rproc recovery to allow vendor enhancements. Bug: 188764827 Change-Id: If6f0846c141a4ad40748d552002f65b94d9c52d5 Signed-off-by: Siddharth Gupta --- drivers/android/vendor_hooks.c | 2 ++ drivers/remoteproc/remoteproc_core.c | 2 ++ include/trace/hooks/remoteproc.h | 21 +++++++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100644 include/trace/hooks/remoteproc.h diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index 0a632d52d2a6..fd57c2ed2fb0 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -63,6 +63,7 @@ #include #include #include +#include /* * Export tracepoints that act as a bare tracehook (ie: have no trace event @@ -337,3 +338,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_force_compatible_pre); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_force_compatible_post); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_print_transaction_info); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_setscheduler_uclamp); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rproc_recovery); diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 9107ec4ca0d7..5ec0502c04c4 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -39,6 +39,7 @@ #include #include #include +#include #include "remoteproc_internal.h" @@ -1725,6 +1726,7 @@ int rproc_trigger_recovery(struct rproc *rproc) release_firmware(firmware_p); unlock_mutex: + trace_android_vh_rproc_recovery(rproc); mutex_unlock(&rproc->lock); return ret; } diff --git a/include/trace/hooks/remoteproc.h b/include/trace/hooks/remoteproc.h new file mode 100644 index 000000000000..7cc5e93d2ebe --- /dev/null +++ b/include/trace/hooks/remoteproc.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM remoteproc + +#define TRACE_INCLUDE_PATH trace/hooks + +#if !defined(_TRACE_HOOK_RPROC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HOOK_RPROC_H + +#include +#include + +struct rproc; + +DECLARE_HOOK(android_vh_rproc_recovery, + TP_PROTO(struct rproc *rproc), + TP_ARGS(rproc)); + +#endif /* _TRACE_HOOK_RPROC_H */ +/* This part must be outside protection */ +#include From 3136de9b19f43bc8284a8038f8dcf14d7d159851 Mon Sep 17 00:00:00 2001 From: Siddharth Gupta Date: Fri, 25 Jun 2021 11:59:35 -0700 Subject: [PATCH 53/62] ANDROID: GKI: Add remoteproc vendor hook symbols Add the the symbols exported by the remoteproc vendor hook to the qcom symbol list. Change-Id: Iffd58aa5d367141de1c065488519b29fb802fd86 Signed-off-by: Siddharth Gupta --- android/abi_gki_aarch64_qcom | 2 ++ 1 file changed, 2 insertions(+) diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index a23291abd129..f18f7aff7377 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -2494,6 +2494,7 @@ __traceiter_android_vh_logbuf __traceiter_android_vh_logbuf_pr_cont __traceiter_android_vh_printk_hotplug + __traceiter_android_vh_rproc_recovery __traceiter_android_vh_scheduler_tick __traceiter_android_vh_show_max_freq __traceiter_android_vh_show_resume_epoch_val @@ -2605,6 +2606,7 @@ __tracepoint_android_vh_process_killed __tracepoint_android_vh_psi_event __tracepoint_android_vh_psi_group + __tracepoint_android_vh_rproc_recovery __tracepoint_android_vh_scheduler_tick __tracepoint_android_vh_show_max_freq __tracepoint_android_vh_show_resume_epoch_val From 00d9f57a6966d5800b8a429ac70a56df10fe7288 Mon Sep 17 00:00:00 2001 From: Siddharth Gupta Date: Tue, 29 Jun 2021 12:18:25 -0700 Subject: [PATCH 54/62] ANDROID: GKI: Add rproc coredump APIs to symbol list This change adds the rproc_coredump() and rproc_coredump_cleanup() APIs to the qcom symbol list. Bug: 188764827 Change-Id: I32a56f5d3caabc61ed94f6de0d7daa29becb490d Signed-off-by: Siddharth Gupta --- android/abi_gki_aarch64_qcom | 2 ++ 1 file changed, 2 insertions(+) diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index f18f7aff7377..e10238d2debb 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -2015,8 +2015,10 @@ rproc_add_subdev rproc_alloc rproc_boot + rproc_coredump rproc_coredump_add_custom_segment rproc_coredump_add_segment + rproc_coredump_cleanup rproc_coredump_set_elf_info rproc_coredump_using_sections rproc_del From f45304d0919d8306870297826d342ca3f010823b Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Wed, 30 Jun 2021 11:04:47 +0530 Subject: [PATCH 55/62] ANDROID: GKI: mmc: add Android ABI padding to some structures Add ABI padding to some of the data structures to accommodate new eMMC features enablement later. Bug: 192337957 Change-Id: Ica3f96ea004fb89e4b46ef9734864c655cdcd277 Signed-off-by: Sahitya Tummala --- drivers/mmc/core/core.h | 2 ++ include/linux/mmc/card.h | 2 ++ include/linux/mmc/host.h | 1 + 3 files changed, 5 insertions(+) diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index db3c9c68875d..dbe7ee457bed 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -30,6 +30,8 @@ struct mmc_bus_ops { int (*hw_reset)(struct mmc_host *); int (*sw_reset)(struct mmc_host *); bool (*cache_enabled)(struct mmc_host *); + + ANDROID_VENDOR_DATA_ARRAY(1, 2); }; void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 42df06c6b19c..6f13f15e95f0 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -313,6 +313,8 @@ struct mmc_card { unsigned int bouncesz; /* Bounce buffer size */ struct workqueue_struct *complete_wq; /* Private workqueue */ + + ANDROID_VENDOR_DATA(1); }; static inline bool mmc_large_sector(struct mmc_card *card) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 511b14d76b7c..7891e314599d 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -485,6 +485,7 @@ struct mmc_host { /* Host Software Queue support */ bool hsq_enabled; + ANDROID_VENDOR_DATA(1); ANDROID_OEM_DATA(1); unsigned long private[] ____cacheline_aligned; From 32f0fa685cf9ddcfbbb30c7add0edea71103a9b6 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Wed, 30 Jun 2021 17:10:41 +0000 Subject: [PATCH 56/62] Revert "ANDROID: dma-buf: Don't change vm_ops if vm_file changes" This reverts commit fca37c251af3c542ba8b9786143188ce9cd51989. Reason for revert: mmap_count is no longer used for reporting dma-bufs and introduces subtle bugs related to changing the vm_ops Bug: 192459295 Signed-off-by: Kalesh Singh Change-Id: I52fb55e1048a151fae7641c9646a231d59b3224d --- drivers/dma-buf/dma-buf.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 9ca8f84732b3..0ab865543d1f 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -173,17 +173,12 @@ static void dma_buf_vma_close(struct vm_area_struct *vma) static int dma_buf_do_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { - int ret; - struct file *orig_vm_file = vma->vm_file; - /* call this first because the exporter might override vma->vm_ops */ - ret = dmabuf->ops->mmap(dmabuf, vma); + int ret = dmabuf->ops->mmap(dmabuf, vma); + if (ret) return ret; - if (orig_vm_file != vma->vm_file) - return 0; - /* save the exporter provided vm_ops */ dmabuf->exp_vm_ops = vma->vm_ops; dmabuf->vm_ops = *(dmabuf->exp_vm_ops); From 25c500f2dc5216e5e8cfe05d21d4e156bd4bdbe2 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Wed, 30 Jun 2021 13:14:17 -0400 Subject: [PATCH 57/62] Revert "ANDROID: dmabuf: Add mmap_count to struct dmabuf" This reverts commit 9132fbe545925a36b45c0738d1f9aa5cecb86050. Reason for revert: mmap_count is no longer used for reporting dma-bufs and introduces subtle bugs related to changing the vm_ops Bug: 192459295 Signed-off-by: Kalesh Singh Change-Id: Id07802e5a3e18918c5c46e31b73be4a594f7dc26 --- .../ABI/testing/sysfs-kernel-dmabuf-buffers | 7 --- drivers/dma-buf/dma-buf-sysfs-stats.c | 10 ---- drivers/dma-buf/dma-buf.c | 50 +------------------ include/linux/dma-buf.h | 6 --- 4 files changed, 1 insertion(+), 72 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers b/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers index 721d486a186b..6f7c65209f07 100644 --- a/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers +++ b/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers @@ -50,10 +50,3 @@ KernelVersion: v5.12 Contact: Hridya Valsaraju Description: This file is read-only and contains a map_counter indicating the number of distinct device mappings of the attachment. - -What: /sys/kernel/dmabuf/buffers//mmap_count -Date: January 2021 -KernelVersion: v5.10 -Contact: Kalesh Singh -Description: This file is read-only and contains a counter indicating the - number of times the buffer has been mmap(). diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c index 943e395d1807..e5f9e33044ed 100644 --- a/drivers/dma-buf/dma-buf-sysfs-stats.c +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -52,13 +52,6 @@ static ssize_t exporter_name_show(struct dma_buf *dmabuf, return sysfs_emit(buf, "%s\n", dmabuf->exp_name); } -static ssize_t mmap_count_show(struct dma_buf *dmabuf, - struct dma_buf_stats_attribute *attr, - char *buf) -{ - return sysfs_emit(buf, "%d\n", dmabuf->mmap_count); -} - static ssize_t size_show(struct dma_buf *dmabuf, struct dma_buf_stats_attribute *attr, char *buf) @@ -69,13 +62,10 @@ static ssize_t size_show(struct dma_buf *dmabuf, static struct dma_buf_stats_attribute exporter_name_attribute = __ATTR_RO(exporter_name); static struct dma_buf_stats_attribute size_attribute = __ATTR_RO(size); -static struct dma_buf_stats_attribute mmap_count_attribute = - __ATTR_RO(mmap_count); static struct attribute *dma_buf_stats_default_attrs[] = { &exporter_name_attribute.attr, &size_attribute.attr, - &mmap_count_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(dma_buf_stats_default); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 0ab865543d1f..eb8b66b569c8 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -149,54 +149,6 @@ static struct file_system_type dma_buf_fs_type = { .kill_sb = kill_anon_super, }; -#ifdef CONFIG_DMABUF_SYSFS_STATS -static void dma_buf_vma_open(struct vm_area_struct *vma) -{ - struct dma_buf *dmabuf = vma->vm_file->private_data; - - dmabuf->mmap_count++; - /* call the heap provided vma open() op */ - if (dmabuf->exp_vm_ops->open) - dmabuf->exp_vm_ops->open(vma); -} - -static void dma_buf_vma_close(struct vm_area_struct *vma) -{ - struct dma_buf *dmabuf = vma->vm_file->private_data; - - if (dmabuf->mmap_count) - dmabuf->mmap_count--; - /* call the heap provided vma close() op */ - if (dmabuf->exp_vm_ops->close) - dmabuf->exp_vm_ops->close(vma); -} - -static int dma_buf_do_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) -{ - /* call this first because the exporter might override vma->vm_ops */ - int ret = dmabuf->ops->mmap(dmabuf, vma); - - if (ret) - return ret; - - /* save the exporter provided vm_ops */ - dmabuf->exp_vm_ops = vma->vm_ops; - dmabuf->vm_ops = *(dmabuf->exp_vm_ops); - /* override open() and close() to provide buffer mmap count */ - dmabuf->vm_ops.open = dma_buf_vma_open; - dmabuf->vm_ops.close = dma_buf_vma_close; - vma->vm_ops = &dmabuf->vm_ops; - dmabuf->mmap_count++; - - return ret; -} -#else /* CONFIG_DMABUF_SYSFS_STATS */ -static int dma_buf_do_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) -{ - return dmabuf->ops->mmap(dmabuf, vma); -} -#endif /* CONFIG_DMABUF_SYSFS_STATS */ - static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) { struct dma_buf *dmabuf; @@ -215,7 +167,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) dmabuf->size >> PAGE_SHIFT) return -EINVAL; - return dma_buf_do_mmap(dmabuf, vma); + return dmabuf->ops->mmap(dmabuf, vma); } static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index f1242b50f627..303f559d7b30 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -381,9 +381,6 @@ struct dma_buf_ops { * @sysfs_entry: for exposing information about this buffer in sysfs. * The attachment_uid member of @sysfs_entry is protected by dma_resv lock * and is incremented on each attach. - * @mmap_count: number of times buffer has been mmapped. - * @exp_vm_ops: the vm ops provided by the buffer exporter. - * @vm_ops: the overridden vm_ops used to track mmap_count of the buffer. * * This represents a shared buffer, created by calling dma_buf_export(). The * userspace representation is a normal file descriptor, which can be created by @@ -427,9 +424,6 @@ struct dma_buf { unsigned int attachment_uid; struct kset *attach_stats_kset; } *sysfs_entry; - int mmap_count; - const struct vm_operations_struct *exp_vm_ops; - struct vm_operations_struct vm_ops; #endif }; From ee12781f0c77113a93329a8c16b13a253e1bf54b Mon Sep 17 00:00:00 2001 From: Todd Kjos Date: Tue, 22 Jun 2021 11:53:38 -0700 Subject: [PATCH 58/62] Revert "ANDROID: GKI: Enable some necessary CFG80211 configs for WIFI" This reverts commit bba0d8a87e2679388c1c09bd47be0274fbe6a69a. CFG80211 changing to a module so these configs go into device-specific defconfig fragments. Bug: 189918667 Change-Id: Ie4b70407369da3c865541e4857c3ba18fec24587 --- arch/arm64/configs/gki_defconfig | 4 ++-- arch/x86/configs/gki_defconfig | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index fe79f0148936..5ae9733b5cfa 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -270,8 +270,8 @@ CONFIG_BT_HCIUART_BCM=y CONFIG_BT_HCIUART_QCA=y CONFIG_CFG80211=y CONFIG_NL80211_TESTMODE=y -CONFIG_CFG80211_CERTIFICATION_ONUS=y -CONFIG_CFG80211_REG_CELLULAR_HINTS=y +# CONFIG_CFG80211_DEFAULT_PS is not set +# CONFIG_CFG80211_CRDA_SUPPORT is not set CONFIG_MAC80211=y CONFIG_RFKILL=y CONFIG_PCI=y diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index 286f2c64a111..0b110382103f 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -246,8 +246,6 @@ CONFIG_BT_HCIUART_BCM=y CONFIG_BT_HCIUART_QCA=y CONFIG_CFG80211=y CONFIG_NL80211_TESTMODE=y -CONFIG_CFG80211_CERTIFICATION_ONUS=y -CONFIG_CFG80211_REG_CELLULAR_HINTS=y # CONFIG_CFG80211_DEFAULT_PS is not set # CONFIG_CFG80211_CRDA_SUPPORT is not set CONFIG_MAC80211=y From 7d8c327a64d94ee2679fc6fbf5e46b13738cf2eb Mon Sep 17 00:00:00 2001 From: Todd Kjos Date: Thu, 10 Jun 2021 10:55:31 -0700 Subject: [PATCH 59/62] ANDROID: ABI: gki_defconfig: Make cfg80211 and mac80211 modules Remove CONFIG_CFG80211 and CONFIG_MAC80211 from gki_defconfig to allow vendors to incorporate features that have landed upstream. Also need to update symbol lists since the related 80211 symbols are no longer exported from the core kernel. Bug: 189918667 Signed-off-by: Todd Kjos Change-Id: Iab3971cd0d78d669536b8eb0505c60caa3aafeee --- android/abi_gki_aarch64_db845c | 314 +++++++++++++++---------- android/abi_gki_aarch64_generic | 38 --- android/abi_gki_aarch64_hikey960 | 49 ---- android/abi_gki_aarch64_mtk | 41 ---- android/abi_gki_aarch64_qcom | 39 --- android/abi_gki_aarch64_unisoc | 37 --- android/abi_gki_aarch64_virtual_device | 38 --- arch/arm64/configs/db845c_gki.fragment | 5 + arch/arm64/configs/gki_defconfig | 5 - arch/x86/configs/gki_defconfig | 5 - 10 files changed, 201 insertions(+), 370 deletions(-) diff --git a/android/abi_gki_aarch64_db845c b/android/abi_gki_aarch64_db845c index ef65c2cad42b..86ef49b9a976 100644 --- a/android/abi_gki_aarch64_db845c +++ b/android/abi_gki_aarch64_db845c @@ -2,19 +2,25 @@ # commonly used symbols add_uevent_var alloc_io_pgtable_ops + __alloc_skb alloc_workqueue __arch_copy_from_user __arch_copy_to_user arm64_const_caps_ready arm64_use_ng_mappings + bcmp blocking_notifier_call_chain blocking_notifier_chain_register blocking_notifier_chain_unregister + bpf_trace_run1 + bpf_trace_run2 bpf_trace_run3 bpf_trace_run4 bpf_trace_run5 + bpf_trace_run6 bus_register bus_unregister + cancel_delayed_work cancel_delayed_work_sync cancel_work_sync capable @@ -23,6 +29,8 @@ cdev_init __cfi_slowpath __check_object_size + __class_register + class_unregister clk_bulk_disable clk_bulk_enable clk_bulk_prepare @@ -54,7 +62,9 @@ cpumask_next cpu_number __cpu_online_mask + __cpu_possible_mask crc32_le + _ctype debugfs_create_dir debugfs_create_file debugfs_create_u32 @@ -62,8 +72,10 @@ debugfs_remove default_llseek delayed_work_timer_fn + del_timer del_timer_sync destroy_workqueue + dev_close dev_coredumpv dev_driver_string _dev_err @@ -108,6 +120,7 @@ devm_pinctrl_register devm_platform_ioremap_resource devm_regmap_add_irq_chip + devm_regmap_field_alloc __devm_regmap_init __devm_regmap_init_i2c __devm_regmap_init_mmio_clk @@ -175,15 +188,20 @@ drm_helper_probe_single_connector_modes drm_mode_vrefresh enable_irq + eth_mac_addr eth_platform_get_mac_address + ethtool_op_get_link + eth_type_trans eth_validate_addr event_triggers_call find_next_bit find_next_zero_bit finish_wait + flush_work flush_workqueue free_io_pgtable_ops free_irq + gcd generic_handle_irq generic_mii_ioctl get_device @@ -232,12 +250,13 @@ idr_alloc_cyclic idr_destroy idr_find + idr_for_each idr_get_next idr_remove - ieee80211_get_channel_khz init_net __init_swait_queue_head init_timer_key + init_uts_ns init_wait_entry __init_waitqueue_head iomem_resource @@ -276,6 +295,8 @@ irq_to_desc is_vmalloc_addr jiffies + jiffies_to_msecs + jiffies_to_usecs kasan_flag_enabled kasprintf kernel_connect @@ -284,6 +305,7 @@ kernel_sendmsg kfree kfree_const + kfree_sensitive kfree_skb __kmalloc kmalloc_caches @@ -297,8 +319,12 @@ ktime_get ktime_get_mono_fast_ns ktime_get_real_ts64 + kvfree + kvfree_call_rcu + kvmalloc_node __list_add_valid __list_del_entry_valid + __local_bh_enable_ip __log_post_read_mmio __log_read_mmio __log_write_mmio @@ -313,10 +339,12 @@ memremap memset memstart_addr + memunmap mii_ethtool_gset mii_nway_restart misc_deregister misc_register + mod_delayed_work_on mod_timer module_layout __msecs_to_jiffies @@ -333,10 +361,15 @@ netdev_err netdev_info netdev_warn + netif_carrier_on netif_napi_add __netif_napi_del + __nla_parse + nla_put no_llseek nr_cpu_ids + nvmem_cell_get + nvmem_cell_put nvmem_cell_read of_address_to_resource of_alias_get_id @@ -348,6 +381,7 @@ of_device_is_compatible of_device_uevent_modalias of_dma_configure_id + of_find_device_by_node of_find_property of_fwnode_ops of_genpd_add_provider_onecell @@ -380,8 +414,11 @@ of_property_read_u32_index of_property_read_variable_u32_array of_property_read_variable_u8_array + of_prop_next_u32 of_reserved_mem_lookup param_ops_bool + param_ops_charp + param_ops_int param_ops_uint __pci_register_driver pci_unregister_driver @@ -419,10 +456,12 @@ __pm_runtime_set_status __pm_runtime_suspend __pm_runtime_use_autosuspend + preempt_schedule preempt_schedule_notrace prepare_to_wait_event printk pskb_expand_head + __pskb_pull_tail put_device __put_task_struct qcom_smem_state_register @@ -451,10 +490,13 @@ regcache_cache_only regcache_mark_dirty regcache_sync + register_netdevice_notifier register_reboot_notifier __register_rpmsg_driver regmap_bulk_read regmap_bulk_write + regmap_field_read + regmap_field_update_bits_base __regmap_init regmap_irq_get_virq regmap_multi_reg_write @@ -480,6 +522,7 @@ reset_control_assert reset_control_deassert reset_control_reset + round_jiffies_up rpmsg_register_device rpmsg_send rpmsg_unregister_device @@ -490,6 +533,9 @@ rproc_del rproc_free rproc_remove_subdev + rtnl_is_locked + rtnl_lock + rtnl_unlock schedule schedule_timeout scnprintf @@ -507,10 +553,14 @@ single_open single_release skb_clone + skb_copy + skb_copy_bits + skb_copy_expand skb_dequeue skb_pull skb_push skb_put + skb_queue_head skb_queue_purge skb_queue_tail skb_trim @@ -557,16 +607,22 @@ strncpy strpbrk strsep + __sw_hweight16 __sw_hweight32 __sw_hweight64 + __sw_hweight8 synchronize_irq synchronize_net + synchronize_rcu syscon_node_to_regmap syscon_regmap_lookup_by_phandle sysfs_create_link sysfs_remove_link sysrq_mask + system_power_efficient_wq system_wq + tasklet_init + tasklet_kill __tasklet_schedule thermal_cooling_device_unregister trace_event_buffer_commit @@ -597,6 +653,7 @@ uart_write_wakeup __udelay unregister_chrdev_region + unregister_netdevice_notifier unregister_reboot_notifier unregister_rpmsg_driver usb_deregister @@ -623,6 +680,7 @@ usbnet_write_cmd_async usbnet_write_cmd_nopm usb_register_driver + __usecs_to_jiffies usleep_range vabits_actual vfree @@ -657,12 +715,10 @@ iommu_group_ref_get iommu_put_dma_cookie of_dma_is_coherent - param_ops_int pci_bus_type pci_device_group # required by asix.ko - eth_mac_addr genphy_resume mdiobus_alloc_size mdiobus_free @@ -679,7 +735,6 @@ phy_print_status phy_start phy_stop - skb_copy_expand usbnet_change_mtu usbnet_get_drvinfo usbnet_get_link @@ -687,98 +742,23 @@ usbnet_set_link_ksettings usbnet_unlink_rx_urbs -# required by ath.ko - freq_reg_info - reg_initiator_name - wiphy_apply_custom_regulatory - # required by ath10k_core.ko - bcmp - cancel_delayed_work - __cfg80211_alloc_event_skb - __cfg80211_alloc_reply_skb - cfg80211_calculate_bitrate - cfg80211_find_elem_match - cfg80211_find_vendor_elem - cfg80211_get_bss - cfg80211_put_bss - __cfg80211_send_event_skb - cfg80211_vendor_cmd_reply cpu_latency_qos_add_request cpu_latency_qos_remove_request device_get_mac_address device_set_wakeup_enable firmware_request_nowarn guid_gen - idr_for_each - ieee80211_alloc_hw_nm - ieee80211_beacon_cntdwn_is_complete - ieee80211_beacon_get_template - ieee80211_beacon_get_tim - ieee80211_beacon_loss - ieee80211_beacon_update_cntdwn - ieee80211_bss_get_elem - ieee80211_channel_to_freq_khz - ieee80211_connection_loss - ieee80211_csa_finish - ieee80211_find_sta - ieee80211_find_sta_by_ifaddr - ieee80211_free_hw - ieee80211_free_txskb - ieee80211_hdrlen - ieee80211_iterate_active_interfaces_atomic - ieee80211_iterate_stations_atomic - ieee80211_iter_chan_contexts_atomic - ieee80211_manage_rx_ba_offl - ieee80211_next_txq - ieee80211_proberesp_get - ieee80211_queue_delayed_work - ieee80211_queue_work - ieee80211_radar_detected - ieee80211_ready_on_channel - ieee80211_register_hw - ieee80211_remain_on_channel_expired - ieee80211_report_low_ack - ieee80211_restart_hw - ieee80211_rx_napi - ieee80211_scan_completed - __ieee80211_schedule_txq - ieee80211_sta_register_airtime - ieee80211_stop_queue - ieee80211_stop_queues - ieee80211_tdls_oper_request - ieee80211_tx_dequeue - ieee80211_txq_get_depth - ieee80211_txq_may_transmit - ieee80211_txq_schedule_start - ieee80211_tx_rate_update - ieee80211_tx_status - ieee80211_tx_status_irqsafe - ieee80211_unregister_hw - ieee80211_wake_queue - ieee80211_wake_queues init_dummy_netdev - init_uts_ns __kfifo_alloc __kfifo_free - __local_bh_enable_ip - __nla_parse - nla_put param_ops_ulong - regulatory_hint - skb_copy skb_dequeue_tail - skb_queue_head skb_realloc_headroom strlcat strscpy - __sw_hweight16 - __sw_hweight8 thermal_cooling_device_register vzalloc - wiphy_read_of_freq_limits - wiphy_rfkill_set_hw_state - wiphy_to_ieee80211_hw # required by ath10k_pci.ko pci_clear_master @@ -801,11 +781,9 @@ iommu_map # required by ax88179_178a.ko - ethtool_op_get_link ethtool_op_get_ts_info mii_ethtool_get_link_ksettings mii_ethtool_set_link_ksettings - netif_carrier_on # required by bam_dma.ko dma_async_device_register @@ -815,7 +793,6 @@ of_dma_controller_free of_dma_controller_register pm_runtime_irq_safe - tasklet_kill tasklet_setup vchan_dma_desc_free_list vchan_find_desc @@ -823,6 +800,59 @@ vchan_tx_desc_free vchan_tx_submit +# required by cfg80211.ko + bpf_trace_run10 + bpf_trace_run7 + debugfs_rename + dev_change_net_namespace + __dev_get_by_index + dev_get_by_index + device_add + device_del + device_rename + genlmsg_multicast_allns + genlmsg_put + genl_register_family + genl_unregister_family + get_net_ns_by_fd + get_net_ns_by_pid + inet_csk_get_port + key_create_or_update + key_put + keyring_alloc + ktime_get_coarse_with_offset + memcmp + netif_rx_ni + netlink_broadcast + netlink_register_notifier + netlink_unicast + netlink_unregister_notifier + net_ns_type_operations + nla_find + nla_memcpy + nla_put_64bit + nla_reserve + __nla_validate + __put_net + rb_erase + rb_insert_color + register_pernet_device + request_firmware_nowait + rfkill_alloc + rfkill_blocked + rfkill_destroy + rfkill_pause_polling + rfkill_register + rfkill_resume_polling + rfkill_set_hw_state + rfkill_unregister + skb_add_rx_frag + __sock_create + trace_print_array_seq + unregister_pernet_device + verify_pkcs7_signature + wireless_nlevent_flush + # required by clk-qcom.ko __clk_determine_rate clk_fixed_factor_ops @@ -836,7 +866,6 @@ __clk_mux_determine_rate_closest divider_ro_round_rate_parent of_find_node_opts_by_path - of_prop_next_u32 pm_genpd_remove_subdomain # required by clk-rpmh.ko @@ -858,7 +887,6 @@ gpiod_get_value_cansleep gpiod_set_debounce gpiod_to_irq - system_power_efficient_wq # required by fastrpc.ko dma_buf_attach @@ -925,9 +953,6 @@ i2c_put_dma_safe_msg_buf of_machine_is_compatible -# required by i2c-qup.ko - __usecs_to_jiffies - # required by i2c-rk3x.ko clk_notifier_register clk_notifier_unregister @@ -953,12 +978,88 @@ mipi_dsi_device_unregister of_find_mipi_dsi_host_by_node +# required by mac80211.ko + alloc_netdev_mqs + __alloc_percpu_gfp + arc4_crypt + arc4_setkey + call_rcu + crc32_be + crypto_aead_decrypt + crypto_aead_encrypt + crypto_aead_setauthsize + crypto_aead_setkey + crypto_alloc_aead + crypto_alloc_shash + crypto_alloc_skcipher + crypto_destroy_tfm + __crypto_memneq + crypto_shash_digest + crypto_shash_finup + crypto_shash_setkey + crypto_shash_update + crypto_skcipher_decrypt + crypto_skcipher_encrypt + crypto_skcipher_setkey + __crypto_xor + dev_alloc_name + dev_fetch_sw_netstats + dev_printk + dev_queue_xmit + ether_setup + flush_delayed_work + free_netdev + free_percpu + get_random_u32 + __hw_addr_init + __hw_addr_sync + __hw_addr_unsync + kernel_param_lock + kernel_param_unlock + kfree_skb_list + ktime_get_seconds + ktime_get_with_offset + napi_gro_receive + netdev_set_default_ethtool_ops + netif_carrier_off + netif_receive_skb + netif_receive_skb_list + netif_rx + netif_tx_stop_all_queues + netif_tx_wake_queue + net_ratelimit + __per_cpu_offset + prandom_bytes + prandom_u32 + ___pskb_trim + rcu_barrier + register_inet6addr_notifier + register_inetaddr_notifier + register_netdevice + rhashtable_free_and_destroy + rhashtable_insert_slow + rhltable_init + __rht_bucket_nested + rht_bucket_nested + rht_bucket_nested_insert + round_jiffies + round_jiffies_relative + sg_init_one + skb_checksum_help + skb_clone_sk + skb_complete_wifi_ack + skb_ensure_writable + __skb_get_hash + __skb_gso_segment + system_freezable_wq + unregister_inet6addr_notifier + unregister_inetaddr_notifier + unregister_netdevice_many + unregister_netdevice_queue + # required by msm.ko __bitmap_andnot __bitmap_weight - bpf_trace_run1 - bpf_trace_run2 - bpf_trace_run6 bpf_trace_run8 clk_get_parent __clk_hw_register_divider @@ -975,10 +1076,8 @@ component_master_add_with_match component_master_del component_unbind_all - _ctype debugfs_create_bool debugfs_create_u64 - del_timer dev_coredumpm devfreq_recommended_opp devfreq_resume_device @@ -1198,12 +1297,9 @@ kthread_create_worker kthread_destroy_worker kthread_queue_work - kvfree - kvmalloc_node llist_add_batch memdup_user_nul memparse - memunmap mipi_dsi_create_packet mipi_dsi_host_register mipi_dsi_host_unregister @@ -1211,20 +1307,16 @@ mutex_lock_interruptible mutex_trylock_recursive nsecs_to_jiffies - nvmem_cell_get - nvmem_cell_put of_clk_hw_onecell_get of_device_is_available of_drm_find_bridge of_drm_find_panel - of_find_device_by_node of_find_matching_node_and_match of_get_compatible_child of_graph_get_endpoint_by_regs of_graph_get_next_endpoint of_graph_get_remote_port_parent of_icc_get - param_ops_charp phy_calibrate phy_configure pid_task @@ -1240,7 +1332,6 @@ regulator_get regulator_put reservation_ww_class - round_jiffies_up sched_set_fifo schedule_timeout_interruptible __sg_page_iter_dma_next @@ -1280,7 +1371,6 @@ dma_pool_create dma_pool_destroy dma_pool_free - flush_work free_pages gen_pool_dma_alloc_align gen_pool_dma_zalloc_align @@ -1391,7 +1481,6 @@ cpufreq_get_driver_data cpufreq_register_driver cpufreq_unregister_driver - __cpu_possible_mask dev_pm_opp_adjust_voltage dev_pm_opp_disable dev_pm_opp_enable @@ -1450,9 +1539,6 @@ # required by qcom_hwspinlock.ko devm_hwspin_lock_register - devm_regmap_field_alloc - regmap_field_read - regmap_field_update_bits_base # required by qcom_pil_info.ko __memset_io @@ -1476,7 +1562,6 @@ __num_online_cpus # required by qcom_spmi-regulator.ko - jiffies_to_usecs regulator_disable_regmap regulator_enable_regmap regulator_is_enabled_regmap @@ -1486,14 +1571,16 @@ rproc_get_by_child try_wait_for_completion -# required by qrtr-smd.ko - __pskb_pull_tail +# required by qcom_tsens.ko + debugfs_lookup + devm_thermal_zone_of_sensor_register + thermal_zone_device_update + thermal_zone_get_slope # required by qrtr-tun.ko _copy_to_iter # required by qrtr.ko - __alloc_skb autoremove_wake_function datagram_poll do_wait_intr_irq @@ -1507,7 +1594,6 @@ refcount_dec_and_mutex_lock release_sock sk_alloc - skb_copy_bits skb_copy_datagram_iter skb_free_datagram __skb_pad @@ -1526,7 +1612,6 @@ sock_queue_rcv_skb sock_register sock_unregister - synchronize_rcu # required by reboot-mode.ko devres_add @@ -1545,8 +1630,6 @@ # required by rmtfs_mem.ko alloc_chrdev_region - __class_register - class_unregister # required by rtc-pm8xxx.ko devm_request_any_context_irq @@ -1605,9 +1688,6 @@ snd_soc_of_parse_aux_devs snd_soc_of_parse_card_name -# required by snd-soc-rl6231.ko - gcd - # required by snd-soc-rt5663.ko regcache_cache_bypass snd_soc_add_component_controls @@ -1666,7 +1746,6 @@ spi_delay_exec spi_finalize_current_message spi_get_next_queued_message - tasklet_init # required by spmi-pmic-arb.ko irq_domain_set_info @@ -1686,7 +1765,6 @@ dma_sync_sg_for_cpu dma_sync_sg_for_device __free_pages - preempt_schedule __sg_page_iter_next # required by ufs_qcom.ko diff --git a/android/abi_gki_aarch64_generic b/android/abi_gki_aarch64_generic index 98995368def0..20a0ee2d7683 100644 --- a/android/abi_gki_aarch64_generic +++ b/android/abi_gki_aarch64_generic @@ -115,35 +115,6 @@ cdev_device_add cdev_device_del cdev_init - __cfg80211_alloc_event_skb - __cfg80211_alloc_reply_skb - cfg80211_chandef_create - cfg80211_ch_switch_notify - cfg80211_connect_done - cfg80211_del_sta_sinfo - cfg80211_disconnected - cfg80211_external_auth_request - cfg80211_find_elem_match - cfg80211_get_bss - cfg80211_ibss_joined - cfg80211_inform_bss_frame_data - cfg80211_mgmt_tx_status - cfg80211_michael_mic_failure - cfg80211_new_sta - cfg80211_port_authorized - cfg80211_put_bss - cfg80211_ready_on_channel - cfg80211_remain_on_channel_expired - cfg80211_roamed - cfg80211_rx_mgmt_khz - cfg80211_scan_done - cfg80211_sched_scan_results - cfg80211_sched_scan_stopped - cfg80211_sched_scan_stopped_rtnl - __cfg80211_send_event_skb - cfg80211_unlink_bss - cfg80211_unregister_wdev - cfg80211_vendor_cmd_reply __cfi_slowpath __check_object_size __class_create @@ -887,9 +858,6 @@ idr_for_each idr_preload idr_remove - ieee80211_channel_to_freq_khz - ieee80211_freq_khz_to_channel - ieee80211_get_channel_khz iio_device_unregister import_iovec in6_pton @@ -1503,7 +1471,6 @@ regulator_set_voltage regulator_set_voltage_sel_regmap regulator_unregister - regulatory_hint release_firmware __release_region remap_pfn_range @@ -2195,11 +2162,6 @@ watchdog_register_device watchdog_set_restart_priority watchdog_unregister_device - wiphy_apply_custom_regulatory - wiphy_free - wiphy_new_nm - wiphy_register - wiphy_unregister woken_wake_function work_busy __xfrm_state_destroy diff --git a/android/abi_gki_aarch64_hikey960 b/android/abi_gki_aarch64_hikey960 index 2515273bd123..93f48f3642bd 100644 --- a/android/abi_gki_aarch64_hikey960 +++ b/android/abi_gki_aarch64_hikey960 @@ -166,12 +166,6 @@ ida_alloc_range ida_destroy ida_free - ieee80211_channel_to_freq_khz - ieee80211_connection_loss - ieee80211_find_sta - ieee80211_get_hdrlen_from_skb - ieee80211_queue_delayed_work - ieee80211_stop_rx_ba_session __init_swait_queue_head init_timer_key init_wait_entry @@ -1225,17 +1219,10 @@ tcpci_unregister_port # required by wl18xx.ko - __cfg80211_alloc_event_skb - __cfg80211_send_event_skb - ieee80211_radar_detected kstrtou8_from_user # required by wlcore.ko bcmp - __cfg80211_alloc_reply_skb - cfg80211_find_elem_match - cfg80211_find_vendor_elem - cfg80211_vendor_cmd_reply complete_all consume_skb device_create_bin_file @@ -1244,40 +1231,6 @@ dev_pm_set_dedicated_wake_irq disable_irq_nosync get_random_u32 - ieee80211_alloc_hw_nm - ieee80211_ap_probereq_get - ieee80211_beacon_get_tim - ieee80211_chswitch_done - ieee80211_cqm_beacon_loss_notify - ieee80211_cqm_rssi_notify - ieee80211_csa_finish - ieee80211_free_hw - ieee80211_free_txskb - ieee80211_freq_khz_to_channel - ieee80211_hdrlen - ieee80211_iterate_active_interfaces_atomic - ieee80211_iterate_interfaces - ieee80211_nullfunc_get - ieee80211_probereq_get - ieee80211_proberesp_get - ieee80211_pspoll_get - ieee80211_queue_work - ieee80211_ready_on_channel - ieee80211_register_hw - ieee80211_remain_on_channel_expired - ieee80211_report_low_ack - ieee80211_restart_hw - ieee80211_rx_napi - ieee80211_scan_completed - ieee80211_sched_scan_results - ieee80211_sched_scan_stopped - ieee80211_sta_ps_transition - ieee80211_stop_queue - ieee80211_stop_queues - ieee80211_tx_status - ieee80211_unregister_hw - ieee80211_wake_queue - ieee80211_wake_queues jiffies_to_msecs jiffies_to_usecs __local_bh_enable_ip @@ -1286,14 +1239,12 @@ no_seek_end_llseek _raw_spin_trylock request_firmware_nowait - rfc1042_header skb_dequeue skb_push skb_put skb_queue_head skb_trim vscnprintf - wiphy_to_ieee80211_hw # required by wlcore_sdio.ko platform_device_add diff --git a/android/abi_gki_aarch64_mtk b/android/abi_gki_aarch64_mtk index 15b9ba8265c0..3514b621e32a 100644 --- a/android/abi_gki_aarch64_mtk +++ b/android/abi_gki_aarch64_mtk @@ -92,38 +92,6 @@ cdev_device_add cdev_device_del cdev_init - __cfg80211_alloc_event_skb - __cfg80211_alloc_reply_skb - cfg80211_cac_event - cfg80211_chandef_create - cfg80211_ch_switch_notify - cfg80211_classify8021d - cfg80211_connect_done - cfg80211_del_sta_sinfo - cfg80211_disconnected - cfg80211_external_auth_request - cfg80211_find_elem_match - cfg80211_ft_event - cfg80211_get_bss - cfg80211_inform_bss_data - cfg80211_inform_bss_frame_data - cfg80211_mgmt_tx_status - cfg80211_michael_mic_failure - cfg80211_new_sta - cfg80211_pmksa_candidate_notify - cfg80211_put_bss - cfg80211_radar_event - cfg80211_ready_on_channel - cfg80211_remain_on_channel_expired - cfg80211_roamed - cfg80211_rx_mgmt_khz - cfg80211_scan_done - cfg80211_sched_scan_results - cfg80211_sched_scan_stopped - __cfg80211_send_event_skb - cfg80211_tdls_oper_request - cfg80211_unlink_bss - cfg80211_vendor_cmd_reply __cfi_slowpath __check_object_size check_preempt_curr @@ -829,9 +797,6 @@ idr_for_each idr_get_next idr_remove - ieee80211_channel_to_freq_khz - ieee80211_freq_khz_to_channel - ieee80211_get_channel_khz iio_alloc_pollfunc iio_buffer_init iio_buffer_put @@ -1482,7 +1447,6 @@ regulator_set_voltage_time regulator_set_voltage_time_sel regulator_sync_voltage - regulatory_hint release_firmware release_pages __release_region @@ -2179,11 +2143,6 @@ __warn_printk watchdog_init_timeout watchdog_set_restart_priority - wiphy_apply_custom_regulatory - wiphy_free - wiphy_new_nm - wiphy_register - wiphy_unregister wireless_send_event woken_wake_function work_busy diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index e10238d2debb..8b10073acacb 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -121,37 +121,6 @@ cdev_device_add cdev_device_del cdev_init - __cfg80211_alloc_event_skb - __cfg80211_alloc_reply_skb - cfg80211_calculate_bitrate - cfg80211_chandef_create - cfg80211_ch_switch_notify - cfg80211_connect_done - cfg80211_del_sta_sinfo - cfg80211_disconnected - cfg80211_external_auth_request - cfg80211_ft_event - cfg80211_get_bss - cfg80211_gtk_rekey_notify - cfg80211_inform_bss_frame_data - cfg80211_mgmt_tx_status - cfg80211_michael_mic_failure - cfg80211_new_sta - cfg80211_pmksa_candidate_notify - cfg80211_put_bss - cfg80211_ready_on_channel - cfg80211_remain_on_channel_expired - cfg80211_roamed - cfg80211_rx_mgmt_khz - cfg80211_rx_unprot_mlme_mgmt - cfg80211_scan_done - cfg80211_sched_scan_results - __cfg80211_send_event_skb - cfg80211_stop_iface - cfg80211_tdls_oper_request - cfg80211_unlink_bss - cfg80211_update_owe_info_event - cfg80211_vendor_cmd_reply __cfi_slowpath cgroup_path_ns cgroup_taskset_first @@ -1084,9 +1053,6 @@ idr_preload idr_remove idr_replace - ieee80211_freq_khz_to_channel - ieee80211_get_channel_khz - ieee80211_hdrlen iio_channel_get_all iio_read_channel_processed import_iovec @@ -1960,7 +1926,6 @@ regulator_set_mode regulator_set_voltage regulator_unregister_notifier - regulatory_set_wiphy_regd release_firmware __release_region release_sock @@ -2923,10 +2888,6 @@ wakeup_source_register wakeup_source_unregister __warn_printk - wiphy_free - wiphy_new_nm - wiphy_register - wiphy_unregister wireless_send_event woken_wake_function work_busy diff --git a/android/abi_gki_aarch64_unisoc b/android/abi_gki_aarch64_unisoc index c1c6e5e983bc..7387328527ff 100644 --- a/android/abi_gki_aarch64_unisoc +++ b/android/abi_gki_aarch64_unisoc @@ -317,9 +317,6 @@ idr_find idr_for_each idr_remove - ieee80211_channel_to_freq_khz - ieee80211_freq_khz_to_channel - ieee80211_get_channel_khz iget_failed iget5_locked ignore_console_lock_warning @@ -1956,40 +1953,11 @@ # required by sprdwl_ng.ko bcmp build_skb - __cfg80211_alloc_event_skb - __cfg80211_alloc_reply_skb - cfg80211_chandef_create - cfg80211_ch_switch_notify - cfg80211_connect_done - cfg80211_cqm_rssi_notify - cfg80211_del_sta_sinfo - cfg80211_disconnected - cfg80211_find_elem_match - cfg80211_get_bss - cfg80211_ibss_joined - cfg80211_inform_bss_data - cfg80211_mgmt_tx_status - cfg80211_michael_mic_failure - cfg80211_new_sta - cfg80211_put_bss - cfg80211_ready_on_channel - cfg80211_remain_on_channel_expired - cfg80211_roamed - cfg80211_rx_mgmt - cfg80211_rx_unprot_mlme_mgmt - cfg80211_scan_done - cfg80211_sched_scan_results - __cfg80211_send_event_skb - cfg80211_tdls_oper_request - cfg80211_unlink_bss - cfg80211_unregister_wdev - cfg80211_vendor_cmd_reply console_printk consume_skb _ctype dev_get_by_index down_timeout - freq_reg_info genlmsg_put jiffies_to_usecs kfree_skb_list @@ -2007,7 +1975,6 @@ register_inet6addr_notifier register_inetaddr_notifier register_netdevice - regulatory_hint rtnl_lock rtnl_unlock simple_open @@ -2017,10 +1984,6 @@ unregister_inet6addr_notifier unregister_inetaddr_notifier unregister_netdevice_queue - wiphy_free - wiphy_new_nm - wiphy_register - wiphy_unregister # required by sunwave_fp.ko input_unregister_device diff --git a/android/abi_gki_aarch64_virtual_device b/android/abi_gki_aarch64_virtual_device index 0914089ac299..724632832596 100644 --- a/android/abi_gki_aarch64_virtual_device +++ b/android/abi_gki_aarch64_virtual_device @@ -34,8 +34,6 @@ cancel_delayed_work_sync cancel_work_sync capable - cfg80211_inform_bss_data - cfg80211_put_bss __cfi_slowpath __check_object_size __class_create @@ -565,10 +563,6 @@ # required by mac80211_hwsim.ko alloc_netdev_mqs - __cfg80211_alloc_event_skb - __cfg80211_alloc_reply_skb - __cfg80211_send_event_skb - cfg80211_vendor_cmd_reply debugfs_attr_read debugfs_attr_write dev_alloc_name @@ -583,28 +577,6 @@ hrtimer_forward hrtimer_init hrtimer_start_range_ns - ieee80211_alloc_hw_nm - ieee80211_beacon_cntdwn_is_complete - ieee80211_beacon_get_tim - ieee80211_csa_finish - ieee80211_free_hw - ieee80211_free_txskb - ieee80211_get_buffered_bc - ieee80211_get_tx_rates - ieee80211_iterate_active_interfaces_atomic - ieee80211_probereq_get - ieee80211_queue_delayed_work - ieee80211_radar_detected - ieee80211_ready_on_channel - ieee80211_register_hw - ieee80211_remain_on_channel_expired - ieee80211_rx_irqsafe - ieee80211_scan_completed - ieee80211_stop_queues - ieee80211_stop_tx_ba_cb_irqsafe - ieee80211_tx_status_irqsafe - ieee80211_unregister_hw - ieee80211_wake_queues init_net __netdev_alloc_skb netif_rx @@ -619,7 +591,6 @@ nla_put param_ops_ushort register_pernet_device - regulatory_hint rhashtable_destroy rhashtable_init rhashtable_insert_slow @@ -635,7 +606,6 @@ skb_trim skb_unlink unregister_pernet_device - wiphy_apply_custom_regulatory # required by md-mod.ko ack_all_badblocks @@ -940,9 +910,6 @@ devm_mfd_add_devices # required by virt_wifi.ko - cfg80211_connect_done - cfg80211_disconnected - cfg80211_scan_done __dev_get_by_index dev_printk __module_get @@ -952,13 +919,8 @@ rtnl_link_unregister skb_clone unregister_netdevice_many - wiphy_free - wiphy_new_nm - wiphy_register - wiphy_unregister # required by virt_wifi_sim.ko - ieee80211_get_channel_khz release_firmware request_firmware diff --git a/arch/arm64/configs/db845c_gki.fragment b/arch/arm64/configs/db845c_gki.fragment index d487757cfa3c..e7c8f04055e6 100644 --- a/arch/arm64/configs/db845c_gki.fragment +++ b/arch/arm64/configs/db845c_gki.fragment @@ -1,3 +1,8 @@ +CONFIG_CFG80211=m +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEFAULT_PS is not set +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_MAC80211=m CONFIG_QRTR=m CONFIG_QRTR_TUN=m CONFIG_SCSI_UFS_QCOM=m diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index 5ae9733b5cfa..230fe3b32171 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -268,11 +268,6 @@ CONFIG_BT_HCIUART=y CONFIG_BT_HCIUART_LL=y CONFIG_BT_HCIUART_BCM=y CONFIG_BT_HCIUART_QCA=y -CONFIG_CFG80211=y -CONFIG_NL80211_TESTMODE=y -# CONFIG_CFG80211_DEFAULT_PS is not set -# CONFIG_CFG80211_CRDA_SUPPORT is not set -CONFIG_MAC80211=y CONFIG_RFKILL=y CONFIG_PCI=y CONFIG_PCIEPORTBUS=y diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index 0b110382103f..fe73eef03f0f 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -244,11 +244,6 @@ CONFIG_BT_HCIUART=y CONFIG_BT_HCIUART_LL=y CONFIG_BT_HCIUART_BCM=y CONFIG_BT_HCIUART_QCA=y -CONFIG_CFG80211=y -CONFIG_NL80211_TESTMODE=y -# CONFIG_CFG80211_DEFAULT_PS is not set -# CONFIG_CFG80211_CRDA_SUPPORT is not set -CONFIG_MAC80211=y CONFIG_RFKILL=y CONFIG_PCI=y CONFIG_PCIEPORTBUS=y From 6ae626fd8e09c52a976c004a5379e0e30cbc38df Mon Sep 17 00:00:00 2001 From: John Stultz Date: Tue, 22 Jun 2021 22:00:39 +0000 Subject: [PATCH 60/62] ANDROID: ABI: hikey960_gki.fragment: Add cfg80211 and mac80211 as modules In change Iab3971cd0d78d669536b8eb0505c60caa3aafeee the cfg80211 and mac80211 drivers were switched to modules, so we need to add them as such to the hikey960_gki.fragment. With this change, hikey960 boots and wifi comes up. Bug: 189918667 Fixes: Iab3971cd0d78d669536b8eb0505c60caa3aafeee Signed-off-by: John Stultz Change-Id: I8cd3dd3dc76852e270b7b4ba518323af92ff6dda --- arch/arm64/configs/hikey960_gki.fragment | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/configs/hikey960_gki.fragment b/arch/arm64/configs/hikey960_gki.fragment index ba7e473b55ae..fcc01e20dc7f 100644 --- a/arch/arm64/configs/hikey960_gki.fragment +++ b/arch/arm64/configs/hikey960_gki.fragment @@ -50,3 +50,8 @@ CONFIG_PHY_HI3660_USB=m CONFIG_PINCTRL_SINGLE=m CONFIG_DMABUF_HEAPS_CMA=m CONFIG_DMABUF_HEAPS_SYSTEM=m +CONFIG_CFG80211=m +CONFIG_NL80211_TESTMODE=y +# CONFIG_CFG80211_DEFAULT_PS is not set +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_MAC80211=m From 4e90d52c8272015d89b0ffac5f0acc5d2ac954cf Mon Sep 17 00:00:00 2001 From: Todd Kjos Date: Fri, 25 Jun 2021 15:56:40 -0700 Subject: [PATCH 61/62] ANDROID: GKI: 7/2/2021 KMI update Set KMI_GENERATION=8 for 7/2 KMI update Leaf changes summary: 3604 artifacts changed (2 filtered out) Changed leaf types summary: 18 (2 filtered out) leaf types changed Removed/Changed/Added functions summary: 121 Removed, 3342 Changed, 55 Added functions Removed/Changed/Added variables summary: 3 Removed, 62 Changed, 3 Added variables 121 Removed functions: [D] 'function sk_buff* __cfg80211_alloc_event_skb(wiphy*, wireless_dev*, nl80211_commands, nl80211_attrs, unsigned int, int, int, gfp_t)' [D] 'function sk_buff* __cfg80211_alloc_reply_skb(wiphy*, nl80211_commands, nl80211_attrs, int)' [D] 'function void __cfg80211_send_event_skb(sk_buff*, gfp_t)' [D] 'function void __ieee80211_schedule_txq(ieee80211_hw*, ieee80211_txq*, bool)' [D] 'function int __traceiter_android_vh_force_compatible_post(void*, void*)' [D] 'function int __traceiter_android_vh_force_compatible_pre(void*, void*)' [D] 'function void cfg80211_cac_event(net_device*, const cfg80211_chan_def*, nl80211_radar_event, gfp_t)' [D] 'function u32 cfg80211_calculate_bitrate(rate_info*)' [D] 'function void cfg80211_ch_switch_notify(net_device*, cfg80211_chan_def*)' [D] 'function void cfg80211_chandef_create(cfg80211_chan_def*, ieee80211_channel*, nl80211_channel_type)' [D] 'function unsigned int cfg80211_classify8021d(sk_buff*, cfg80211_qos_map*)' [D] 'function void cfg80211_connect_done(net_device*, cfg80211_connect_resp_params*, gfp_t)' [D] 'function void cfg80211_del_sta_sinfo(net_device*, const u8*, station_info*, gfp_t)' [D] 'function void cfg80211_disconnected(net_device*, u16, const u8*, size_t, bool, gfp_t)' [D] 'function int cfg80211_external_auth_request(net_device*, cfg80211_external_auth_params*, gfp_t)' [D] 'function const element* cfg80211_find_elem_match(u8, const u8*, unsigned int, const u8*, unsigned int, unsigned int)' [D] 'function const element* cfg80211_find_vendor_elem(unsigned int, int, const u8*, unsigned int)' [D] 'function void cfg80211_ft_event(net_device*, cfg80211_ft_event_params*)' [D] 'function cfg80211_bss* cfg80211_get_bss(wiphy*, ieee80211_channel*, const u8*, const u8*, size_t, ieee80211_bss_type, ieee80211_privacy)' [D] 'function void cfg80211_gtk_rekey_notify(net_device*, const u8*, const u8*, gfp_t)' [D] 'function void cfg80211_ibss_joined(net_device*, const u8*, ieee80211_channel*, gfp_t)' [D] 'function cfg80211_bss* cfg80211_inform_bss_data(wiphy*, cfg80211_inform_bss*, cfg80211_bss_frame_type, const u8*, u64, u16, u16, const u8*, size_t, gfp_t)' [D] 'function cfg80211_bss* cfg80211_inform_bss_frame_data(wiphy*, cfg80211_inform_bss*, ieee80211_mgmt*, size_t, gfp_t)' [D] 'function void cfg80211_mgmt_tx_status(wireless_dev*, u64, const u8*, size_t, bool, gfp_t)' [D] 'function void cfg80211_michael_mic_failure(net_device*, const u8*, nl80211_key_type, int, const u8*, gfp_t)' [D] 'function void cfg80211_new_sta(net_device*, const u8*, station_info*, gfp_t)' [D] 'function void cfg80211_pmksa_candidate_notify(net_device*, int, const u8*, bool, gfp_t)' [D] 'function void cfg80211_port_authorized(net_device*, const u8*, gfp_t)' [D] 'function void cfg80211_put_bss(wiphy*, cfg80211_bss*)' [D] 'function void cfg80211_radar_event(wiphy*, cfg80211_chan_def*, gfp_t)' [D] 'function void cfg80211_ready_on_channel(wireless_dev*, u64, ieee80211_channel*, unsigned int, gfp_t)' [D] 'function void cfg80211_remain_on_channel_expired(wireless_dev*, u64, ieee80211_channel*, gfp_t)' [D] 'function void cfg80211_roamed(net_device*, cfg80211_roam_info*, gfp_t)' [D] 'function bool cfg80211_rx_mgmt_khz(wireless_dev*, int, int, const u8*, size_t, u32)' [D] 'function void cfg80211_rx_unprot_mlme_mgmt(net_device*, const u8*, size_t)' [D] 'function void cfg80211_scan_done(cfg80211_scan_request*, cfg80211_scan_info*)' [D] 'function void cfg80211_sched_scan_results(wiphy*, u64)' [D] 'function void cfg80211_sched_scan_stopped(wiphy*, u64)' [D] 'function void cfg80211_sched_scan_stopped_rtnl(wiphy*, u64)' [D] 'function void cfg80211_stop_iface(wiphy*, wireless_dev*, gfp_t)' [D] 'function void cfg80211_tdls_oper_request(net_device*, const u8*, nl80211_tdls_operation, u16, gfp_t)' [D] 'function void cfg80211_unlink_bss(wiphy*, cfg80211_bss*)' [D] 'function void cfg80211_unregister_wdev(wireless_dev*)' [D] 'function void cfg80211_update_owe_info_event(net_device*, cfg80211_update_owe_info*, gfp_t)' [D] 'function int cfg80211_vendor_cmd_reply(sk_buff*)' [D] 'function const ieee80211_reg_rule* freq_reg_info(wiphy*, u32)' [D] 'function ieee80211_hw* ieee80211_alloc_hw_nm(size_t, const ieee80211_ops*, const char*)' [D] 'function sk_buff* ieee80211_ap_probereq_get(ieee80211_hw*, ieee80211_vif*)' [D] 'function bool ieee80211_beacon_cntdwn_is_complete(ieee80211_vif*)' [D] 'function sk_buff* ieee80211_beacon_get_template(ieee80211_hw*, ieee80211_vif*, ieee80211_mutable_offsets*)' [D] 'function sk_buff* ieee80211_beacon_get_tim(ieee80211_hw*, ieee80211_vif*, u16*, u16*)' [D] 'function void ieee80211_beacon_loss(ieee80211_vif*)' [D] 'function u8 ieee80211_beacon_update_cntdwn(ieee80211_vif*)' [D] 'function const element* ieee80211_bss_get_elem(cfg80211_bss*, u8)' [D] 'function u32 ieee80211_channel_to_freq_khz(int, nl80211_band)' [D] 'function void ieee80211_chswitch_done(ieee80211_vif*, bool)' [D] 'function void ieee80211_connection_loss(ieee80211_vif*)' [D] 'function void ieee80211_cqm_beacon_loss_notify(ieee80211_vif*, gfp_t)' [D] 'function void ieee80211_cqm_rssi_notify(ieee80211_vif*, nl80211_cqm_rssi_threshold_event, s32, gfp_t)' [D] 'function void ieee80211_csa_finish(ieee80211_vif*)' [D] 'function ieee80211_sta* ieee80211_find_sta(ieee80211_vif*, const u8*)' [D] 'function ieee80211_sta* ieee80211_find_sta_by_ifaddr(ieee80211_hw*, const u8*, const u8*)' [D] 'function void ieee80211_free_hw(ieee80211_hw*)' [D] 'function void ieee80211_free_txskb(ieee80211_hw*, sk_buff*)' [D] 'function int ieee80211_freq_khz_to_channel(u32)' [D] 'function sk_buff* ieee80211_get_buffered_bc(ieee80211_hw*, ieee80211_vif*)' [D] 'function ieee80211_channel* ieee80211_get_channel_khz(wiphy*, u32)' [D] 'function unsigned int ieee80211_get_hdrlen_from_skb(const sk_buff*)' [D] 'function void ieee80211_get_tx_rates(ieee80211_vif*, ieee80211_sta*, sk_buff*, ieee80211_tx_rate*, int)' [D] 'function unsigned int ieee80211_hdrlen(__le16)' [D] 'function void ieee80211_iter_chan_contexts_atomic(ieee80211_hw*, void (ieee80211_hw*, ieee80211_chanctx_conf*, void*)*, void*)' [D] 'function void ieee80211_iterate_active_interfaces_atomic(ieee80211_hw*, u32, void (void*, u8*, ieee80211_vif*)*, void*)' [D] 'function void ieee80211_iterate_interfaces(ieee80211_hw*, u32, void (void*, u8*, ieee80211_vif*)*, void*)' [D] 'function void ieee80211_iterate_stations_atomic(ieee80211_hw*, void (void*, ieee80211_sta*)*, void*)' [D] 'function void ieee80211_manage_rx_ba_offl(ieee80211_vif*, const u8*, unsigned int)' [D] 'function ieee80211_txq* ieee80211_next_txq(ieee80211_hw*, u8)' [D] 'function sk_buff* ieee80211_nullfunc_get(ieee80211_hw*, ieee80211_vif*, bool)' [D] 'function sk_buff* ieee80211_probereq_get(ieee80211_hw*, const u8*, const u8*, size_t, size_t)' [D] 'function sk_buff* ieee80211_proberesp_get(ieee80211_hw*, ieee80211_vif*)' [D] 'function sk_buff* ieee80211_pspoll_get(ieee80211_hw*, ieee80211_vif*)' [D] 'function void ieee80211_queue_delayed_work(ieee80211_hw*, delayed_work*, unsigned long int)' [D] 'function void ieee80211_queue_work(ieee80211_hw*, work_struct*)' [D] 'function void ieee80211_radar_detected(ieee80211_hw*)' [D] 'function void ieee80211_ready_on_channel(ieee80211_hw*)' [D] 'function int ieee80211_register_hw(ieee80211_hw*)' [D] 'function void ieee80211_remain_on_channel_expired(ieee80211_hw*)' [D] 'function void ieee80211_report_low_ack(ieee80211_sta*, u32)' [D] 'function void ieee80211_restart_hw(ieee80211_hw*)' [D] 'function void ieee80211_rx_irqsafe(ieee80211_hw*, sk_buff*)' [D] 'function void ieee80211_rx_napi(ieee80211_hw*, ieee80211_sta*, sk_buff*, napi_struct*)' [D] 'function void ieee80211_scan_completed(ieee80211_hw*, cfg80211_scan_info*)' [D] 'function void ieee80211_sched_scan_results(ieee80211_hw*)' [D] 'function void ieee80211_sched_scan_stopped(ieee80211_hw*)' [D] 'function int ieee80211_sta_ps_transition(ieee80211_sta*, bool)' [D] 'function void ieee80211_sta_register_airtime(ieee80211_sta*, u8, u32, u32)' [D] 'function void ieee80211_stop_queue(ieee80211_hw*, int)' [D] 'function void ieee80211_stop_queues(ieee80211_hw*)' [D] 'function void ieee80211_stop_rx_ba_session(ieee80211_vif*, u16, const u8*)' [D] 'function void ieee80211_stop_tx_ba_cb_irqsafe(ieee80211_vif*, const u8*, u16)' [D] 'function void ieee80211_tdls_oper_request(ieee80211_vif*, const u8*, nl80211_tdls_operation, u16, gfp_t)' [D] 'function sk_buff* ieee80211_tx_dequeue(ieee80211_hw*, ieee80211_txq*)' [D] 'function void ieee80211_tx_rate_update(ieee80211_hw*, ieee80211_sta*, ieee80211_tx_info*)' [D] 'function void ieee80211_tx_status(ieee80211_hw*, sk_buff*)' [D] 'function void ieee80211_tx_status_irqsafe(ieee80211_hw*, sk_buff*)' [D] 'function void ieee80211_txq_get_depth(ieee80211_txq*, unsigned long int*, unsigned long int*)' [D] 'function bool ieee80211_txq_may_transmit(ieee80211_hw*, ieee80211_txq*)' [D] 'function void ieee80211_txq_schedule_start(ieee80211_hw*, u8)' [D] 'function void ieee80211_unregister_hw(ieee80211_hw*)' [D] 'function void ieee80211_wake_queue(ieee80211_hw*, int)' [D] 'function void ieee80211_wake_queues(ieee80211_hw*)' [D] 'function const char* reg_initiator_name(nl80211_reg_initiator)' [D] 'function int regulatory_hint(wiphy*, const char*)' [D] 'function int regulatory_set_wiphy_regd(wiphy*, ieee80211_regdomain*)' [D] 'function void wiphy_apply_custom_regulatory(wiphy*, const ieee80211_regdomain*)' [D] 'function void wiphy_free(wiphy*)' [D] 'function wiphy* wiphy_new_nm(const cfg80211_ops*, int, const char*)' [D] 'function void wiphy_read_of_freq_limits(wiphy*)' [D] 'function int wiphy_register(wiphy*)' [D] 'function void wiphy_rfkill_set_hw_state(wiphy*, bool)' [D] 'function ieee80211_hw* wiphy_to_ieee80211_hw(wiphy*)' [D] 'function void wiphy_unregister(wiphy*)' 55 Added functions: [A] 'function void __hw_addr_init(netdev_hw_addr_list*)' [A] 'function int __hw_addr_sync(netdev_hw_addr_list*, netdev_hw_addr_list*, int)' [A] 'function void __hw_addr_unsync(netdev_hw_addr_list*, netdev_hw_addr_list*, int)' [A] 'function void __put_net(net*)' [A] 'function int __sock_create(net*, int, int, int, socket**, int)' [A] 'function int __traceiter_android_vh_rproc_recovery(void*, rproc*)' [A] 'function sk_buff* alloc_can_err_skb(net_device*, can_frame**)' [A] 'function sk_buff* alloc_can_skb(net_device*, can_frame**)' [A] 'function net_device* alloc_candev_mqs(int, unsigned int, unsigned int, unsigned int)' [A] 'function void arc4_crypt(arc4_ctx*, u8*, const u8*, unsigned int)' [A] 'function int arc4_setkey(arc4_ctx*, const u8*, unsigned int)' [A] 'function int can_change_mtu(net_device*, int)' [A] 'function void can_free_echo_skb(net_device*, unsigned int)' [A] 'function unsigned int can_get_echo_skb(net_device*, unsigned int)' [A] 'function int can_put_echo_skb(sk_buff*, net_device*, unsigned int)' [A] 'function void close_candev(net_device*)' [A] 'function u32 crc32_be(u32, const unsigned char*, size_t)' [A] 'function dentry* debugfs_rename(dentry*, dentry*, dentry*, const char*)' [A] 'function int dev_change_net_namespace(net_device*, net*, const char*)' [A] 'function void dev_fetch_sw_netstats(rtnl_link_stats64*, const pcpu_sw_netstats*)' [A] 'function void free_candev(net_device*)' [A] 'function int genlmsg_multicast_allns(const genl_family*, sk_buff*, u32, unsigned int, gfp_t)' [A] 'function net* get_net_ns_by_fd(int)' [A] 'function net* get_net_ns_by_pid(pid_t)' [A] 'function int inet_csk_get_port(sock*, unsigned short int)' [A] 'function void kernel_param_lock(module*)' [A] 'function void kernel_param_unlock(module*)' [A] 'function key_ref_t key_create_or_update(key_ref_t, const char*, const char*, void*, size_t, key_perm_t, unsigned long int)' [A] 'function void key_put(key*)' [A] 'function key* keyring_alloc(const char*, kuid_t, kgid_t, const cred*, key_perm_t, unsigned long int, key_restriction*, key*)' [A] 'function ktime_t ktime_get_coarse_with_offset(tk_offsets)' [A] 'function void netdev_set_default_ethtool_ops(net_device*, const ethtool_ops*)' [A] 'function int open_candev(net_device*)' [A] 'function int register_candev(net_device*)' [A] 'function bool rfkill_blocked(rfkill*)' [A] 'function void rfkill_pause_polling(rfkill*)' [A] 'function void rfkill_resume_polling(rfkill*)' [A] 'function bool rfkill_set_hw_state(rfkill*, bool)' [A] 'function void rhashtable_free_and_destroy(rhashtable*, void (void*, void*)*, void*)' [A] 'function int rhltable_init(rhltable*, const rhashtable_params*)' [A] 'function unsigned long int round_jiffies(unsigned long int)' [A] 'function void rproc_coredump(rproc*)' [A] 'function void rproc_coredump_cleanup(rproc*)' [A] 'function int skb_checksum_help(sk_buff*)' [A] 'function sk_buff* skb_clone_sk(sk_buff*)' [A] 'function void skb_complete_wifi_ack(sk_buff*, bool)' [A] 'function void sock_efree(sk_buff*)' [A] 'function void tty_hangup(tty_struct*)' [A] 'function int tty_mode_ioctl(tty_struct*, file*, unsigned int, unsigned long int)' [A] 'function void unregister_candev(net_device*)' [A] 'function void usb_anchor_urb(urb*, usb_anchor*)' [A] 'function void usb_kill_anchored_urbs(usb_anchor*)' [A] 'function void usb_unanchor_urb(urb*)' [A] 'function int verify_pkcs7_signature(void*, size_t, void*, size_t, key*, key_being_used_for, int (void*, void*, typedef size_t, typedef size_t)*, void*)' [A] 'function void wireless_nlevent_flush()' 3342 functions with some sub-type change: [C] 'function void* PDE_DATA(const inode*)' at generic.c:799:1 has some sub-type changes: CRC (modversions) changed from 0x1c3ef274 to 0xf1429f7e [C] 'function void __ClearPageMovable(page*)' at compaction.c:138:1 has some sub-type changes: CRC (modversions) changed from 0xdc28d620 to 0xd8bcfba7 [C] 'function void __SetPageMovable(page*, address_space*)' at compaction.c:130:1 has some sub-type changes: CRC (modversions) changed from 0xd7b7b883 to 0x7389406a ... 3339 omitted; 3342 symbols have only CRC changes 3 Removed variables: [D] 'tracepoint __tracepoint_android_vh_force_compatible_post' [D] 'tracepoint __tracepoint_android_vh_force_compatible_pre' [D] 'const unsigned char rfc1042_header[6]' 3 Added variables: [A] 'tracepoint __tracepoint_android_vh_rproc_recovery' [A] 'const char hex_asc_upper[17]' [A] 'const kobj_ns_type_operations net_ns_type_operations' 62 Changed variables: [C] 'net init_net' was changed at net_namespace.c:47:1: size of symbol changed from 4416 to 4544 CRC (modversions) changed from 0x144cd521 to 0x661d880a type of variable changed: type size changed from 35328 to 36352 (in bits) 1 data member insertion: 'netns_can can', at offset 34368 (in bits) at net_namespace.h:183:1 there are data member changes: 2 ('netns_xdp xdp' .. 'sock* diag_nlsk') offsets changed (by +1408 bits) 3735 impacted interfaces [C] 'rq runqueues' was changed at core.c:56:1: size of symbol changed from 4544 to 4672 CRC (modversions) changed from 0x3e534b4f to 0x27777ba4 type of variable changed: type size changed from 36352 to 37376 (in bits) there are data member changes: type 'struct cfs_rq' of 'rq::cfs' changed: type size changed from 3072 to 4096 (in bits) 1 data member insertion: 'u64 android_vendor_data1[16]', at offset 2752 (in bits) at sched.h:617:1 3735 impacted interfaces 61 ('rt_rq rt' .. 'u64 android_kabi_reserved4') offsets changed (by +1024 bits) 3735 impacted interfaces [C] 'bus_type amba_bustype' was changed at bus.c:215:1: CRC (modversions) changed from 0x13c06cac to 0x1f5d98ee [C] 'neigh_table arp_tbl' was changed at arp.c:152:1: CRC (modversions) changed from 0x56697f62 to 0x16be2042 [C] 'const address_space_operations balloon_aops' was changed at balloon_compaction.c:253:1: CRC (modversions) changed from 0xf0207a10 to 0xf3018b56 ... 57 omitted; 60 symbols have only CRC changes 'enum attr_idn at ufs.h:128:1' changed: type size hasn't changed 1 enumerator deletion: 'attr_idn::QUERY_ATTR_IDN_RESERVED' value '1' 1 enumerator insertion: 'attr_idn::QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD' value '1' 2 impacted interfaces 'struct binder_transaction at binder_internal.h:547:1' changed: type size changed from 1280 to 1408 (in bits) 1 data member insertion: 'u64 android_oem_data1[2]', at offset 1280 (in bits) at binder_internal.h:575:1 4 impacted interfaces 'struct bio at blk_types.h:205:1' changed: type size changed from 1152 to 1216 (in bits) 1 data member insertion: 'u64 bi_iocost_cost', at offset 704 (in bits) at blk_types.h:234:1 there are data member changes: 2 ('bio_crypt_ctx* bi_crypt_context' .. 'bool bi_skip_dm_default_key') offsets changed (by +64 bits) anonymous data member 'union {}' offset changed from 776 to 840 (in bits) (by +64 bits) 8 ('unsigned short int bi_vcnt' .. 'bio_vec bi_inline_vecs[]') offsets changed (by +64 bits) 3735 impacted interfaces 'struct blk_mq_ctx at blk-mq.h:18:1' changed: type size changed from 2048 to 2560 (in bits) 1 data member insertion: 'u64 android_oem_data1[2]', at offset 2048 (in bits) at blk-mq.h:39:1 3735 impacted interfaces 'struct blk_mq_hw_ctx at blk-mq.h:16:1' changed: type size hasn't changed 1 data member deletion: 'atomic_t elevator_queued', at offset 3424 (in bits) at blk-mq.h:145:1 3735 impacted interfaces 'struct blk_mq_tags at blk-mq-tag.h:8:1' changed: type size changed from 1600 to 1664 (in bits) 1 data member insertion: 'u64 android_oem_data1', at offset 1600 (in bits) at blk-mq-tag.h:30:1 3735 impacted interfaces 'struct cfs_rq at sched.h:530:1' changed: details were reported earlier 'struct dma_buf at dma-buf.h:397:1' changed: type size changed from 3520 to 2240 (in bits) 3 data member deletions: 'int mmap_count', at offset 2240 (in bits) at dma-buf.h:430:1 'const vm_operations_struct* exp_vm_ops', at offset 2304 (in bits) at dma-buf.h:431:1 'vm_operations_struct vm_ops', at offset 2368 (in bits) at dma-buf.h:432:1 456 impacted interfaces 'struct mm_struct at mm_types.h:407:1' changed: type size changed from 7424 to 7360 (in bits) there are data member changes: anonymous data member at offset 0 (in bits) changed from: struct {vm_area_struct* mmap; rb_root mm_rb; u64 vmacache_seqnum; rwlock_t mm_rb_lock; unsigned long int (file*, unsigned long int, unsigned long int, unsigned long int, unsigned long int)* get_unmapped_area; unsigned long int mmap_base; unsigned long int mmap_legacy_base; unsigned long int task_size; unsigned long int highest_vm_end; pgd_t* pgd; atomic_t membarrier_state; atomic_t mm_users; atomic_t mm_count; atomic_t has_pinned; seqcount_t write_protect_seq; atomic_long_t pgtables_bytes; int map_count; spinlock_t page_table_lock; rw_semaphore mmap_lock; list_head mmlist; unsigned long int hiwater_rss; unsigned long int hiwater_vm; unsigned long int total_vm; unsigned long int locked_vm; atomic64_t pinned_vm; unsigned long int data_vm; unsigned long int exec_vm; unsigned long int stack_vm; unsigned long int def_flags; spinlock_t arg_lock; unsigned long int start_code; unsigned long int end_code; unsigned long int start_data; unsigned long int end_data; unsigned long int start_brk; unsigned long int brk; unsigned long int start_stack; unsigned long int arg_start; unsigned long int arg_end; unsigned long int env_start; unsigned long int env_end; unsigned long int saved_auxv[46]; mm_rss_stat rss_stat; linux_binfmt* binfmt; mm_context_t context; unsigned long int flags; core_state* core_state; spinlock_t ioctx_lock; kioctx_table* ioctx_table; user_namespace* user_ns; file* exe_file; mmu_notifier_subscriptions* notifier_subscriptions; atomic_t tlb_flush_pending; uprobes_state uprobes_state; work_struct async_put_work; u32 pasid;} to: struct {vm_area_struct* mmap; rb_root mm_rb; u64 vmacache_seqnum; rwlock_t mm_rb_lock; unsigned long int (file*, unsigned long int, unsigned long int, unsigned long int, unsigned long int)* get_unmapped_area; unsigned long int mmap_base; unsigned long int mmap_legacy_base; unsigned long int task_size; unsigned long int highest_vm_end; pgd_t* pgd; atomic_t membarrier_state; atomic_t mm_users; atomic_t mm_count; atomic_t has_pinned; atomic_long_t pgtables_bytes; int map_count; spinlock_t page_table_lock; rw_semaphore mmap_lock; list_head mmlist; unsigned long int hiwater_rss; unsigned long int hiwater_vm; unsigned long int total_vm; unsigned long int locked_vm; atomic64_t pinned_vm; unsigned long int data_vm; unsigned long int exec_vm; unsigned long int stack_vm; unsigned long int def_flags; seqcount_t write_protect_seq; spinlock_t arg_lock; unsigned long int start_code; unsigned long int end_code; unsigned long int start_data; unsigned long int end_data; unsigned long int start_brk; unsigned long int brk; unsigned long int start_stack; unsigned long int arg_start; unsigned long int arg_end; unsigned long int env_start; unsigned long int env_end; unsigned long int saved_auxv[46]; mm_rss_stat rss_stat; linux_binfmt* binfmt; mm_context_t context; unsigned long int flags; core_state* core_state; spinlock_t ioctx_lock; kioctx_table* ioctx_table; user_namespace* user_ns; file* exe_file; mmu_notifier_subscriptions* notifier_subscriptions; atomic_t tlb_flush_pending; uprobes_state uprobes_state; work_struct async_put_work; u32 pasid;} and size changed from 7424 to 7360 (in bits) (by -64 bits) 'unsigned long int cpu_bitmap[]' offset changed (by -64 bits) 3735 impacted interfaces 'struct mmc_bus_ops at core.h:20:1' changed: type size changed from 768 to 896 (in bits) 1 data member insertion: 'u64 android_vendor_data1[2]', at offset 768 (in bits) at core.h:34:1 89 impacted interfaces 'struct mmc_card at card.h:244:1' changed: type size changed from 13504 to 13568 (in bits) 1 data member insertion: 'u64 android_vendor_data1', at offset 13504 (in bits) at card.h:317:1 89 impacted interfaces 'struct mmc_host at host.h:279:1' changed: type size hasn't changed 1 data member insertion: 'u64 android_vendor_data1', at offset 13440 (in bits) at host.h:488:1 there are data member changes: 'u64 android_oem_data1' offset changed (by +64 bits) 89 impacted interfaces 'struct net at net_namespace.h:56:1' changed: details were reported earlier 'struct request at blkdev.h:127:1' changed: type size changed from 2304 to 2368 (in bits) 1 data member insertion: 'u64 alloc_time_ns', at offset 1408 (in bits) at blkdev.h:194:1 there are data member changes: 12 ('u64 start_time_ns' .. 'unsigned long int deadline') offsets changed (by +64 bits) anonymous data member 'union {__call_single_data csd; u64 fifo_time;}' offset changed from 1920 to 1984 (in bits) (by +64 bits) 2 ('rq_end_io_fn* end_io' .. 'void* end_io_data') offsets changed (by +64 bits) 3735 impacted interfaces 'struct request_queue at blkdev.h:396:1' changed: type size changed from 15616 to 15680 (in bits) 1 data member insertion: 'u64 android_oem_data1', at offset 15616 (in bits) at blkdev.h:592:1 3735 impacted interfaces 'struct rq at sched.h:914:1' changed (indirectly): details were reported earlier 'struct ufs_dev_info at ufs.h:529:1' changed: type size hasn't changed 1 data member insertion: 'bool hpb_enabled', at offset 304 (in bits) at ufs.h:596:1 38 impacted interfaces 'struct ufs_hba at ufshcd.h:737:1' changed (indirectly): type size hasn't changed there are data member changes: type 'struct ufs_dev_info' of 'ufs_hba::dev_info' changed, as reported earlier 38 impacted interfaces Bug: 192107071 Signed-off-by: Todd Kjos Change-Id: Ib0c3e59b2e33922b00a374801a8aa5d544047848 --- android/abi_gki_aarch64.xml | 21589 +++++++++++----------------------- build.config.common | 2 +- 2 files changed, 6792 insertions(+), 14799 deletions(-) diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index f1b0315ecb69..330ebb81d91e 100755 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -1,16 +1,16 @@ - - - - + + + + - - + + - + @@ -26,29 +26,26 @@ - - - - - - - + + + + - - - - + + + + - - - + + + - - - + + + - + @@ -56,34 +53,34 @@ - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + @@ -93,22 +90,24 @@ - + - + - + - + - - - - - - - + + + + + + + + + @@ -117,8 +116,8 @@ - - + + @@ -126,93 +125,95 @@ - + - + - - + + - - - - - + + + + + - - + + - - - - - - + + + + + + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - - - + + + + - - - + + + - - - - - - - - - + + + + + + + + + - - - - - - - - + + + + + + + + + @@ -221,99 +222,99 @@ - - + + - + - - - + + + - + - - - - - + + + + + - - - - + + + + - + - - + + - - + + - - + + - - + + - - - - - - + + + + + + - + - - - + + + - - + + - + - - - - + + + + - + - - + + - - - + + + - + @@ -322,33 +323,32 @@ - + - - - + - - + + - + - - - - + + + + - - + + + @@ -362,16 +362,16 @@ - - + + - + - + - + @@ -390,13 +390,13 @@ - + - + - - + + @@ -406,38 +406,38 @@ - - + + - - - - - + + + + + - + - - - - + + + + - - + + - + - - + + - - - + + + @@ -445,18 +445,18 @@ - - + + - - - - - - - - - + + + + + + + + + @@ -484,23 +484,23 @@ - - - - - + + + + + - + - - + + @@ -509,26 +509,31 @@ - + - + + + + - - - - + + + + - + - - - - + + + + - - + + + + @@ -539,44 +544,44 @@ - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + @@ -589,281 +594,247 @@ - - - - + + + + - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - + + + + - + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - - - + + + - - - - - - - - - + + + + + + + + + - + + + + + - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - - + + + + + - + - + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - + - - + + - + - + - - - - + + + + + - - + + - - - - - - - - - + + + + + + + + + - - - - - - - - + + + + + + + + - + @@ -878,485 +849,489 @@ - - - - - - - - + + + + + + + + - - - + + + - + - - + + - - + + - - - - + + + + - - + + - + - + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - - - - - - - - - - + + + + + + + + + + + + + - + - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -1374,59 +1349,59 @@ - - - - - - - - - + + + + + + + + + - + - - - - - + + + + + - + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + @@ -1440,193 +1415,193 @@ - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - - - - - - + + + + + + + - - - - - - + + + + + + - + - - - - + + + + - + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + @@ -1635,327 +1610,327 @@ - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - - + + + + - - - - + + + + - + - - - - - + + + + + - - + + - + - - - - + + + + - - - - - - - - - - + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - + + + + + + + + + + - + - + - - - - - - - - - - + + + + + + + + + + - - - - - - + + + + + + - - - - - - - - + + + + + + + + - - - - - + + + + + - + - - - - + + + + - - - - + + + + - + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - + + + - + - + - + - + + - + - + - - + - - - - - - - - - - - + + + + + + + + + + + @@ -1972,50 +1947,53 @@ - - - + + + - - - - - - - - - + + + + + + + + + + - - - - + + + + - - + + + + - - + + - + - - - - - - + + + + + + - - - + + + @@ -2023,68 +2001,68 @@ - - - - - - - - + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - + + - - - - - + + + + + - + - - - - - + + + + + @@ -2093,78 +2071,78 @@ - - - - - + + + + + - - + + - - + + - - + + - - - - - - - - - - - + + + + + + + + + + + - - - - - - - + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + @@ -2178,294 +2156,236 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - - - - - - - - - - + + + + + + + + + + - + - + - - + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - + - - - + + + - + - + - - - + + + - - - - - + + + + + + + - + - + - - - - + + + + + + + - - + + - - - - + + + + - - - - - - + + + + + + @@ -2473,26 +2393,26 @@ - - - - - - - - - - + + + + + + + + + + - - - + + + - + @@ -2512,31 +2432,32 @@ - - - - - - + + + + + + - - - - - + + + + + - - + + - - - - + + + + + @@ -2549,34 +2470,34 @@ - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - + - - - + + + @@ -2585,28 +2506,28 @@ - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + @@ -2634,105 +2555,105 @@ - - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + @@ -2742,60 +2663,61 @@ - + - - - - - - - - - + + + + + + + + + - - + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - + + + + + + + + + + + + - + - + - - + + @@ -2805,413 +2727,414 @@ - - - - + + + + - - - - + + + + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - - - - + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - + + + + + + + + + + + + + + - - - + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - + + + + + + + + - + - - + + - + @@ -3222,50 +3145,50 @@ - - - + + + - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - + + + + + + + - + @@ -3306,48 +3229,48 @@ - + - - - - - - + + + + + + - - - - - - - - + + + + + + + + - + - + - + - - + + - - + + @@ -3355,342 +3278,349 @@ - - + + - - - - - + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - - + + - + - - - - + + + + - + - + - - + + + + + + + + - - - - - - - - - - - + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + - - + + - + - + - + - - - - - - - - - - - + + + + + + + + + + + - - - - + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - - - - - - - - - + + + + + + + + + + + + - - - - + + + + - + - - - + + + - + - + @@ -3699,286 +3629,290 @@ - - - - - - + + + + + + - - - - - + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - + + + + + + - - - - - - - - - - + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - + - - - - + + + + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + @@ -4007,7 +3941,7 @@ - + @@ -4029,14 +3963,14 @@ - - - - + + + + - - + + @@ -4046,44 +3980,44 @@ - + - + - + - - - - - - - - + + + + + + + + - - - - - - - - - + + + + + + + + + - + - - - + + + - + @@ -4094,22 +4028,22 @@ - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + @@ -4117,241 +4051,245 @@ - - - + + + - - - - - - + + + + + + - + - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - - - - - + + + + + - + - + - - - - + + + + - - - - - - + + + + + + - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + - - - - - + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - + - + - + - - + + - + - - + + - - - - - - - - - - + + + + + + + + + + - + - - - - - + + + + + + - - - + + + - - + + - - - + + + - - + + - + @@ -4363,315 +4301,318 @@ - - - - - + + + + + - - - - - - - - - - - - - + + + + + + + + + + + + + - - - - - - - + + + + + + + - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + - - - - - - + + + + + + - - - - + + + + - + - - + + - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + - + - - + + - - - - - - + + + + + + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - + + + + + + + + + - - - - - - - - + + + + + + + + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - + + + + - + - + - + - - - + + + - - + + - - + + - - - + + + @@ -4686,89 +4627,82 @@ - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + - - - + + + - + - - - - - - + + + + + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + - - - - + + + + - - - - - - - - - + + + + + + + + + - - + + - - + + @@ -4876,8 +4810,6 @@ - - @@ -4905,6 +4837,7 @@ + @@ -4986,21 +4919,21 @@ - + - - + + - - - - - - + + + + + + - + @@ -5008,11 +4941,11 @@ - + - - - + + + @@ -5023,7 +4956,7 @@ - + @@ -5031,63 +4964,65 @@ - + - + + - - - - - + + + + + - - + + - - + + - + - - - + + + - + + - - + + - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + @@ -5095,21 +5030,20 @@ - - + - + - - + + - - - + + + @@ -5129,16 +5063,16 @@ - - - + + + - + - - - + + + @@ -9712,21 +9646,21 @@ - + - + - + - + - + - + @@ -10328,12 +10262,12 @@ - + - + - - + + @@ -10495,7 +10429,7 @@ - + @@ -10691,6 +10625,9 @@ + + + @@ -11730,7 +11667,7 @@ - + @@ -11803,6 +11740,9 @@ + + + @@ -12473,81 +12413,81 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -12713,7 +12653,7 @@ - + @@ -12757,130 +12697,130 @@ - + - + + + + - + - - + + - - - - - + + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - - + + - + - + - + - + - + - + - + - + - + - - - - - + + @@ -12984,7 +12924,7 @@ - + @@ -12992,7 +12932,7 @@ - + @@ -13000,7 +12940,7 @@ - + @@ -13008,7 +12948,7 @@ - + @@ -13066,7 +13006,7 @@ - + @@ -13110,36 +13050,39 @@ - + - - - - - - - - - - + - + + + + + + + + + + - + - + - + - + + + + @@ -13213,7 +13156,7 @@ - + @@ -13244,58 +13187,61 @@ + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -14176,7 +14122,35 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -14220,7 +14194,7 @@ - + @@ -14369,9 +14343,12 @@ + + + - + @@ -14513,7 +14490,7 @@ - + @@ -14566,48 +14543,51 @@ - + - + - - - - + - + + + + - + - - - - - - - + - + + + + - + - + + + + + + + - + - + @@ -14639,21 +14619,21 @@ - + - + - + - + - + - + @@ -15153,7 +15133,7 @@ - + @@ -15258,6 +15238,9 @@ + + + @@ -15319,7 +15302,7 @@ - + @@ -15487,88 +15470,85 @@ - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -16563,6 +16543,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -17842,225 +17872,225 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - + - + - + - + - + - + - - - - + - + - - - - + - + + + + - - - - + - + + + + + + + + + + + + + + + + + + + - + - - + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - - - - - - - - - - - - - - - - + - + - - + + + + + + + + + + + + + + - + - + - - - - - - - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + - + - - + + - - + + - - + + + + + @@ -18460,6 +18490,7 @@ + @@ -18608,7 +18639,7 @@ - + @@ -18741,6 +18772,9 @@ + + + @@ -18882,15 +18916,15 @@ - + - + - + - + @@ -19057,36 +19091,36 @@ - + - + - + - + - + - + - + - + - + - + - + @@ -19617,7 +19651,71 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -20146,80 +20244,80 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -21402,9 +21500,6 @@ - - - @@ -21642,7 +21737,7 @@ - + @@ -21676,6 +21771,9 @@ + + + @@ -21806,6 +21904,22 @@ + + + + + + + + + + + + + + + + @@ -23466,84 +23580,84 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -24246,12 +24360,12 @@ - + - + - + @@ -24537,12 +24651,12 @@ - + - + - + @@ -24582,12 +24696,12 @@ - + - + - + @@ -24660,7 +24774,13 @@ + + + + + + @@ -24672,11 +24792,13 @@ + + @@ -24712,6 +24834,7 @@ + @@ -24843,7 +24966,7 @@ - + @@ -24924,12 +25047,16 @@ + + + + @@ -27169,6 +27296,12 @@ + + + + + + @@ -27216,6 +27349,7 @@ + @@ -27770,51 +27904,51 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -27848,89 +27982,10 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + @@ -28694,15 +28749,15 @@ - + - + - + - + @@ -29065,24 +29120,24 @@ - + - + - + - + - + - + - + @@ -29315,12 +29370,12 @@ - + - + - + @@ -30130,7 +30185,7 @@ - + @@ -30407,160 +30462,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -30834,7 +30735,7 @@ - + @@ -30861,7 +30762,7 @@ - + @@ -31439,22 +31340,7 @@ - - - - - - - - - - - - - - - - + @@ -31582,9 +31468,6 @@ - - - @@ -31601,9 +31484,6 @@ - - - @@ -31639,10 +31519,6 @@ - - - - @@ -31730,7 +31606,6 @@ - @@ -31742,67 +31617,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -31855,34 +31669,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -31917,29 +31703,6 @@ - - - - - - - - - - - - - - - - - - - - - - - @@ -32207,30 +31970,30 @@ - + - + - + - + - + - + - + - + - + @@ -32540,7 +32303,6 @@ - @@ -32560,14 +32322,14 @@ - + - + @@ -32593,20 +32355,6 @@ - - - - - - - - - - - - - - @@ -32616,26 +32364,12 @@ - - - - - - - - - - - - - - @@ -32670,9 +32404,6 @@ - - - @@ -32683,9 +32414,6 @@ - - - @@ -32830,6 +32558,26 @@ + + + + + + + + + + + + + + + + + + + + @@ -32856,20 +32604,6 @@ - - - - - - - - - - - - - - @@ -32930,18 +32664,12 @@ - + - - - - + - - - - + @@ -33353,32 +33081,9 @@ - - - - - - - - - - - - - - - - - - - - - - - @@ -33398,7 +33103,6 @@ - @@ -33406,15 +33110,14 @@ - - - + + @@ -33427,14 +33130,6 @@ - - - - - - - - @@ -33522,31 +33217,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - @@ -33694,6 +33364,20 @@ + + + + + + + + + + + + + + @@ -33733,6 +33417,20 @@ + + + + + + + + + + + + + + @@ -33953,11 +33651,9 @@ + - - - @@ -33993,6 +33689,14 @@ + + + + + + + + @@ -34269,6 +33973,14 @@ + + + + + + + + @@ -34289,6 +34001,9 @@ + + + @@ -40278,13 +39993,13 @@ - - + + - - - + + + @@ -40360,7 +40075,7 @@ - + @@ -40394,6 +40109,9 @@ + + + @@ -40473,39 +40191,39 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -40513,13 +40231,14 @@ + - + - + - + @@ -40527,15 +40246,15 @@ - + - + - + - + @@ -40811,24 +40530,24 @@ - + - + - + - + - + - + - + @@ -41969,7 +41688,7 @@ - + @@ -42238,44 +41957,49 @@ - - + + - - - - + + + + - - - - - - - - + + + + + + + + + - - - + + + + + + + @@ -42287,26 +42011,21 @@ - - + + - - - + + + - - - - - - - + + @@ -42314,19 +42033,19 @@ - - - - - + + + + + - - + + - - + + @@ -42806,6 +42525,219 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -45139,7 +45071,7 @@ - + @@ -45197,6 +45129,9 @@ + + + @@ -45764,11 +45699,9 @@ - - - - - + + + @@ -45868,6 +45801,7 @@ + @@ -45957,11 +45891,11 @@ - - - - - + + + + + @@ -46036,22 +45970,12 @@ - - - - - - - - - - - + - + @@ -46845,6 +46769,11 @@ + + + + + @@ -46904,6 +46833,7 @@ + @@ -47219,15 +47149,128 @@ - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + @@ -48056,279 +48099,279 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -48791,80 +48834,83 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + @@ -48915,18 +48961,18 @@ - + - + - + - + - + @@ -48958,7 +49004,7 @@ - + @@ -49006,6 +49052,12 @@ + + + + + + @@ -49215,7 +49267,7 @@ - + @@ -49231,7 +49283,7 @@ - + @@ -49270,6 +49322,8 @@ + + @@ -49292,12 +49346,12 @@ - + - + - + @@ -50106,6 +50160,67 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -50548,18 +50663,18 @@ - + - + - + - + - + @@ -50591,70 +50706,70 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -50665,12 +50780,12 @@ - + - + - + @@ -50681,18 +50796,18 @@ - + - + - + - + - + @@ -50780,6 +50895,9 @@ + + + @@ -50787,12 +50905,12 @@ - + - + - + @@ -50918,18 +51036,18 @@ - + - + - + - + - + @@ -51131,43 +51249,43 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -51181,18 +51299,18 @@ - + - + - + - + - + @@ -51264,6 +51382,9 @@ + + + @@ -51287,6 +51408,7 @@ + @@ -51300,6 +51422,7 @@ + @@ -51313,12 +51436,15 @@ - + - + - + + + + @@ -51332,11 +51458,15 @@ + + + + @@ -51735,18 +51865,53 @@ - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -51919,6 +52084,9 @@ + + + @@ -52150,6 +52318,9 @@ + + + @@ -52242,6 +52413,14 @@ + + + + + + + + @@ -59312,27 +59491,27 @@ - + - + - + - + - + - + - + - + @@ -59340,44 +59519,44 @@ - - + + - - - + + + - - - - - + + + + + - - - + + + - - - + + + - - + + - - - - + + + + - - + + @@ -59385,134 +59564,125 @@ - - + + - - - - + + + + - - + + - - + + - - + + - - - + + + - - + + - - - + + + - - - - - + + + + + - - - + + + - - - - - + + + + + - - - + + + - - - + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - + @@ -59526,50 +59696,50 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -59589,12 +59759,12 @@ - + - + - + @@ -59650,18 +59820,18 @@ - + - + - + - + - + @@ -59676,12 +59846,12 @@ - + - + - + @@ -68825,104 +68995,95 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -76449,27 +76610,18 @@ - + - + - + - - + + - - - - - - - - - - - + + @@ -76513,18 +76665,27 @@ - + - + - + - - + + - - + + + + + + + + + + + @@ -76580,20 +76741,20 @@ - + - + - + - + - + - + @@ -76630,15 +76791,15 @@ - + - + - + - + @@ -76650,18 +76811,18 @@ - + - + - + - + - + @@ -78689,13 +78850,13 @@ - + - + @@ -97222,7 +97383,7 @@ - + @@ -97355,6 +97516,9 @@ + + + @@ -98004,10 +98168,13 @@ - + + + + - + @@ -98223,7 +98390,7 @@ - + @@ -98260,6 +98427,9 @@ + + + @@ -98781,10 +98951,13 @@ - + + + + - + @@ -99004,7 +99177,7 @@ - + @@ -99137,6 +99310,9 @@ + + + @@ -99603,7 +99779,7 @@ - + @@ -99640,6 +99816,9 @@ + + + @@ -101223,6 +101402,95 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -101894,7 +102162,7 @@ - + @@ -101921,7 +102189,7 @@ - + @@ -101947,14 +102215,14 @@ - + - + @@ -102959,8 +103227,8 @@ - - + + @@ -111943,72 +112211,72 @@ - - - - - - + + + + + + - - + + - - - + + + - - - + + + - - + + - - + + - - + + - - + + - - + + - - + + - - - + + + - - + + - - - - - - + + + + + + - - - + + + @@ -112105,6 +112373,10 @@ + + + + @@ -112119,6 +112391,10 @@ + + + + @@ -114462,6 +114738,9 @@ + + + @@ -114509,362 +114788,365 @@ + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -114878,7 +115160,7 @@ - + @@ -115019,48 +115301,94 @@ - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + @@ -115162,7 +115490,7 @@ - + @@ -115233,41 +115561,41 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -115309,7 +115637,7 @@ - + @@ -115398,98 +115726,101 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -115682,7 +116013,7 @@ - + @@ -115690,86 +116021,86 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -115781,10 +116112,10 @@ - + - + @@ -115828,8 +116159,10 @@ + + - + @@ -115895,172 +116228,172 @@ - - - - - - + + + + + + - - - - - + + + + + - - + + - - - - - + + + + + - - + + - - + + - - - + + + - - - + + + - - - - - - - + + + + + + + - - - - - - + + + + + + - - + + - - + + - - - - - - - + + + + + + + - - + + - - - - - - - - + + + + + + + + - - + + - - + + - - + + - - + + - - - - - - + + + + + + - - - - - - - + + + + + + + - - - - + + + + - - + + - - + + - - - + + + - - - + + + - - + + - - - + + + - - - + + + - - - + + + @@ -118665,6 +118998,10 @@ + + + + @@ -118678,6 +119015,13 @@ + + + + + + + @@ -120677,10 +121021,23 @@ + + + + + + + + + + + + + @@ -130622,6 +130979,13 @@ + + + + + + + @@ -131339,7 +131703,7 @@ - + @@ -131716,9 +132080,25 @@ + + + + + + + + + + + + + + + + @@ -132185,6 +132565,22 @@ + + + + + + + + + + + + + + + + @@ -132923,8 +133319,100 @@ - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -134818,7 +135306,7 @@ - + @@ -134862,130 +135350,130 @@ - + - + + + + - + - - + + - - - - - + + - + - + - + - + - + - + - + - + - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - - + + - - + + - + - - + + - + - + - + - + - + - + - + - + - + - - - - - + + @@ -135067,7 +135555,7 @@ - + @@ -136281,6 +136769,14 @@ + + + + + + + + @@ -137108,14 +137604,6 @@ - - - - - - - - @@ -141239,6 +141727,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -141414,6 +141952,9 @@ + + + @@ -143087,21 +143628,21 @@ - + - + - + - + - + - + @@ -143112,7 +143653,7 @@ - + @@ -143185,6 +143726,9 @@ + + + @@ -143535,17 +144079,17 @@ - + - + - - + + - + @@ -143694,9 +144238,12 @@ + + + - + @@ -144111,7 +144658,7 @@ - + @@ -144307,6 +144854,9 @@ + + + @@ -144329,81 +144879,81 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -148267,7 +148817,7 @@ - + @@ -148298,6 +148848,9 @@ + + + @@ -148398,141 +148951,138 @@ - - - - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -148616,21 +149166,21 @@ - + - + - + - + - + - + @@ -149052,55 +149602,55 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -149144,36 +149694,39 @@ - + - - - - - - - - - - + - + + + + + + + + + + - + - + - + - + + + + @@ -149252,7 +149805,7 @@ - + @@ -149305,48 +149858,51 @@ - + - + - - - - + - + + + + - + - - - - - - - + - + + + + - + - + + + + + + + - + - + @@ -152062,6 +152618,14 @@ + + + + + + + + @@ -152898,242 +153462,242 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - - - - - - - - - - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - + - + - + - + - + - + - - - - + - + - - - - + - + + + + - - - - + - + + + + + + + + + + + + + + + + + + + - + - - + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - - - - - - - - - - - - - - - - + - + - - + + + + + + + + + + + + + + - + - + - - - - - - - - + + - + - - + + - - + + - - + + - - + + - - + + - - + + - - + + + + + + + + + + + + + + + + + - + - - + + - - + + - - + + + + + - + - + - + - + - + - + @@ -153154,45 +153718,45 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -153203,132 +153767,132 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -153383,15 +153947,15 @@ - + - + - + - + @@ -153522,50 +154086,50 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -153760,15 +154324,15 @@ - + - + - + - + @@ -153875,25 +154439,25 @@ - + - - - + + + - + - + - - - + + + @@ -153901,7 +154465,7 @@ - + @@ -153911,9 +154475,9 @@ - - - + + + @@ -153972,12 +154536,12 @@ - - + + - - + + @@ -153985,61 +154549,61 @@ - - + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + - - - + + + - - + + - - + + - - - - - + + + + + - - + + - - - - - + + + + + @@ -154054,8 +154618,8 @@ - - + + @@ -154403,7 +154967,7 @@ - + @@ -154434,18 +154998,18 @@ - + - + - + - + - + @@ -156620,6 +157184,10 @@ + + + + @@ -156723,6 +157291,10 @@ + + + + @@ -163613,21 +164185,6 @@ - - - - - - - - - - - - - - - @@ -163774,7 +164331,6 @@ - @@ -164684,6 +165240,14 @@ + + + + + + + + @@ -164720,6 +165284,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -165090,6 +165684,7 @@ + @@ -166143,12 +166738,19 @@ + + + + + + + @@ -166170,12 +166772,23 @@ + + + + + + + + + + + @@ -169664,7 +170277,7 @@ - + @@ -169721,7 +170334,7 @@ - + @@ -170253,6 +170866,8 @@ + + @@ -170302,6 +170917,12 @@ + + + + + + @@ -170346,6 +170967,10 @@ + + + + @@ -170405,6 +171030,10 @@ + + + + @@ -170417,6 +171046,11 @@ + + + + + @@ -170453,10 +171087,6 @@ - - - - @@ -170482,6 +171112,11 @@ + + + + + @@ -170679,11 +171314,18 @@ + + + + + + + @@ -170699,6 +171341,16 @@ + + + + + + + + + + @@ -171507,6 +172159,9 @@ + + + @@ -171553,10 +172208,18 @@ + + + + + + + + @@ -171569,6 +172232,10 @@ + + + + @@ -171578,7 +172245,7 @@ - + @@ -171896,6 +172563,11 @@ + + + + + @@ -171903,6 +172575,10 @@ + + + + @@ -172963,6 +173639,10 @@ + + + + @@ -173324,6 +174004,13 @@ + + + + + + + @@ -173642,3942 +174329,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -178285,6 +175036,14 @@ + + + + + + + + @@ -178349,6 +175108,23 @@ + + + + + + + + + + + + + + + + + @@ -178423,17 +175199,6 @@ - - - - - - - - - - - @@ -178482,62 +175247,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -178963,6 +175672,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -179059,6 +175824,17 @@ + + + + + + + + + + + @@ -179124,6 +175900,15 @@ + + + + + + + + + @@ -179909,66 +176694,14 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + @@ -179981,24 +176714,9 @@ - - - - - - - - - - - - - - - @@ -180008,10 +176726,6 @@ - - - - @@ -180637,157 +177351,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -180841,96 +177404,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -180945,6 +177418,24 @@ + + + + + + + + + + + + + + + + + + @@ -181061,6 +177552,29 @@ + + + + + + + + + + + + + + + + + + + + + + + @@ -181391,2393 +177905,63 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - + + + + + - + - - - - + - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - + - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + - + - - - - - - + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -183792,15 +177976,10 @@ - - - - - @@ -183816,716 +177995,22 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -184535,12 +178020,6 @@ - - - - - - @@ -184548,1529 +178027,11 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -186145,6 +178106,38 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/build.config.common b/build.config.common index 1bf50ec77079..778c4a7af699 100644 --- a/build.config.common +++ b/build.config.common @@ -1,5 +1,5 @@ BRANCH=android12-5.10 -KMI_GENERATION=7 +KMI_GENERATION=8 LLVM=1 DEPMOD=depmod From 00aec39e2e6f8e8367e8bc37574a240194548d8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20=C5=BBenczykowski?= Date: Wed, 16 Jun 2021 17:09:52 -0700 Subject: [PATCH 62/62] FROMGIT: bpf: Support all gso types in bpf_skb_change_proto() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since we no longer modify gso_size, it is now theoretically safe to not set SKB_GSO_DODGY and reset gso_segs to zero. This also means the skb_is_gso_tcp() check should no longer be necessary. Unfortunately we cannot remove the skb_{decrease,increase}_gso_size() helpers, as they are still used elsewhere: bpf_skb_net_grow() without BPF_F_ADJ_ROOM_FIXED_GSO bpf_skb_net_shrink() without BPF_F_ADJ_ROOM_FIXED_GSO net/core/lwt_bpf.c's handle_gso_type() Signed-off-by: Maciej Żenczykowski Signed-off-by: Daniel Borkmann Cc: Dongseok Yi Cc: Willem de Bruijn Link: https://lore.kernel.org/bpf/20210617000953.2787453-3-zenczykowski@gmail.com (cherry picked from commit 0bc919d3e0b8149a60d2444c6a8e2b5974556522 https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git/commit/?id=0bc919d3e0b8149a60d2444c6a8e2b5974556522) Test: builds, TreeHugger Bug: 188690383 Change-Id: I46036bbacae9d1af7364ec0623dd75f0df5845fa --- net/core/filter.c | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 7ea752af7894..035d66227ae2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3244,9 +3244,6 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) u32 off = skb_mac_header_len(skb); int ret; - if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) - return -ENOTSUPP; - ret = skb_cow(skb, len_diff); if (unlikely(ret < 0)) return ret; @@ -3258,17 +3255,11 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); - /* SKB_GSO_TCPV4 needs to be changed into - * SKB_GSO_TCPV6. - */ + /* SKB_GSO_TCPV4 needs to be changed into SKB_GSO_TCPV6. */ if (shinfo->gso_type & SKB_GSO_TCPV4) { shinfo->gso_type &= ~SKB_GSO_TCPV4; shinfo->gso_type |= SKB_GSO_TCPV6; } - - /* Header must be checked, and gso_segs recomputed. */ - shinfo->gso_type |= SKB_GSO_DODGY; - shinfo->gso_segs = 0; } skb->protocol = htons(ETH_P_IPV6); @@ -3283,9 +3274,6 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) u32 off = skb_mac_header_len(skb); int ret; - if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) - return -ENOTSUPP; - ret = skb_unclone(skb, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; @@ -3297,17 +3285,11 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); - /* SKB_GSO_TCPV6 needs to be changed into - * SKB_GSO_TCPV4. - */ + /* SKB_GSO_TCPV6 needs to be changed into SKB_GSO_TCPV4. */ if (shinfo->gso_type & SKB_GSO_TCPV6) { shinfo->gso_type &= ~SKB_GSO_TCPV6; shinfo->gso_type |= SKB_GSO_TCPV4; } - - /* Header must be checked, and gso_segs recomputed. */ - shinfo->gso_type |= SKB_GSO_DODGY; - shinfo->gso_segs = 0; } skb->protocol = htons(ETH_P_IP);