Snap for 8275767 from 84600b344e to android12-5.10-keystone-qcom-release

Change-Id: I32aed3ea200bd12faba65df475fa5c677938a05b
This commit is contained in:
Android Build Coastguard Worker
2022-03-09 01:00:51 +00:00
5 changed files with 40 additions and 21 deletions

View File

@@ -680,7 +680,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
} }
pm_runtime_allow(xhci_to_hcd(xhci)->self.controller); pm_runtime_allow(xhci_to_hcd(xhci)->self.controller);
xhci->test_mode = 0; xhci->test_mode = 0;
return xhci_reset(xhci); return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
} }
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,

View File

@@ -2695,7 +2695,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
fail: fail:
xhci_halt(xhci); xhci_halt(xhci);
xhci_reset(xhci); xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
xhci_mem_cleanup(xhci); xhci_mem_cleanup(xhci);
return -ENOMEM; return -ENOMEM;
} }

View File

@@ -65,7 +65,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
* handshake done). There are two failure modes: "usec" have passed (major * handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed). * hardware flakeout), or the register reads as all-ones (hardware removed).
*/ */
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec) int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
{ {
u32 result; u32 result;
int ret; int ret;
@@ -73,7 +73,7 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
ret = readl_poll_timeout_atomic(ptr, result, ret = readl_poll_timeout_atomic(ptr, result,
(result & mask) == done || (result & mask) == done ||
result == U32_MAX, result == U32_MAX,
1, usec); 1, timeout_us);
if (result == U32_MAX) /* card removed */ if (result == U32_MAX) /* card removed */
return -ENODEV; return -ENODEV;
@@ -162,7 +162,7 @@ int xhci_start(struct xhci_hcd *xhci)
* Transactions will be terminated immediately, and operational registers * Transactions will be terminated immediately, and operational registers
* will be set to their defaults. * will be set to their defaults.
*/ */
int xhci_reset(struct xhci_hcd *xhci) int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
{ {
u32 command; u32 command;
u32 state; u32 state;
@@ -195,8 +195,7 @@ int xhci_reset(struct xhci_hcd *xhci)
if (xhci->quirks & XHCI_INTEL_HOST) if (xhci->quirks & XHCI_INTEL_HOST)
udelay(1000); udelay(1000);
ret = xhci_handshake(&xhci->op_regs->command, ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
CMD_RESET, 0, 10 * 1000 * 1000);
if (ret) if (ret)
return ret; return ret;
@@ -209,8 +208,7 @@ int xhci_reset(struct xhci_hcd *xhci)
* xHCI cannot write to any doorbells or operational registers other * xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared. * than status until the "Controller Not Ready" flag is cleared.
*/ */
ret = xhci_handshake(&xhci->op_regs->status, ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
STS_CNR, 0, 10 * 1000 * 1000);
xhci->usb2_rhub.bus_state.port_c_suspend = 0; xhci->usb2_rhub.bus_state.port_c_suspend = 0;
xhci->usb2_rhub.bus_state.suspended_ports = 0; xhci->usb2_rhub.bus_state.suspended_ports = 0;
@@ -731,7 +729,7 @@ static void xhci_stop(struct usb_hcd *hcd)
xhci->xhc_state |= XHCI_STATE_HALTED; xhci->xhc_state |= XHCI_STATE_HALTED;
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
xhci_halt(xhci); xhci_halt(xhci);
xhci_reset(xhci); xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
spin_unlock_irq(&xhci->lock); spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci); xhci_cleanup_msix(xhci);
@@ -784,7 +782,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
xhci_halt(xhci); xhci_halt(xhci);
/* Workaround for spurious wakeups at shutdown with HSW */ /* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
xhci_reset(xhci); xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
spin_unlock_irq(&xhci->lock); spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci); xhci_cleanup_msix(xhci);
@@ -1091,6 +1089,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
int retval = 0; int retval = 0;
bool comp_timer_running = false; bool comp_timer_running = false;
bool pending_portevent = false; bool pending_portevent = false;
bool reinit_xhc = false;
if (!hcd->state) if (!hcd->state)
return 0; return 0;
@@ -1107,10 +1106,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_lock_irq(&xhci->lock); spin_lock_irq(&xhci->lock);
if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
hibernated = true;
if (!hibernated) { if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
reinit_xhc = true;
if (!reinit_xhc) {
/* /*
* Some controllers might lose power during suspend, so wait * Some controllers might lose power during suspend, so wait
* for controller not ready bit to clear, just as in xHC init. * for controller not ready bit to clear, just as in xHC init.
@@ -1143,12 +1143,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
spin_unlock_irq(&xhci->lock); spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT; return -ETIMEDOUT;
} }
temp = readl(&xhci->op_regs->status);
} }
/* If restore operation fails, re-initialize the HC during resume */ temp = readl(&xhci->op_regs->status);
if ((temp & STS_SRE) || hibernated) {
/* re-initialize the HC on Restore Error, or Host Controller Error */
if (temp & (STS_SRE | STS_HCE)) {
reinit_xhc = true;
xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
}
if (reinit_xhc) {
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) { !(xhci_all_ports_seen_u0(xhci))) {
del_timer_sync(&xhci->comp_mode_recovery_timer); del_timer_sync(&xhci->comp_mode_recovery_timer);
@@ -1163,7 +1168,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "Stop HCD\n"); xhci_dbg(xhci, "Stop HCD\n");
xhci_halt(xhci); xhci_halt(xhci);
xhci_zero_64b_regs(xhci); xhci_zero_64b_regs(xhci);
retval = xhci_reset(xhci); retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
spin_unlock_irq(&xhci->lock); spin_unlock_irq(&xhci->lock);
if (retval) if (retval)
return retval; return retval;
@@ -5268,7 +5273,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
xhci_dbg(xhci, "Resetting HCD\n"); xhci_dbg(xhci, "Resetting HCD\n");
/* Reset the internal HC memory state and registers. */ /* Reset the internal HC memory state and registers. */
retval = xhci_reset(xhci); retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
if (retval) if (retval)
return retval; return retval;
xhci_dbg(xhci, "Reset complete\n"); xhci_dbg(xhci, "Reset complete\n");

View File

@@ -230,6 +230,9 @@ struct xhci_op_regs {
#define CMD_ETE (1 << 14) #define CMD_ETE (1 << 14)
/* bits 15:31 are reserved (and should be preserved on writes). */ /* bits 15:31 are reserved (and should be preserved on writes). */
#define XHCI_RESET_LONG_USEC (10 * 1000 * 1000)
#define XHCI_RESET_SHORT_USEC (250 * 1000)
/* IMAN - Interrupt Management Register */ /* IMAN - Interrupt Management Register */
#define IMAN_IE (1 << 1) #define IMAN_IE (1 << 1)
#define IMAN_IP (1 << 0) #define IMAN_IP (1 << 0)
@@ -2097,11 +2100,11 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci,
/* xHCI host controller glue */ /* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec); int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
void xhci_quiesce(struct xhci_hcd *xhci); void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci);
int xhci_start(struct xhci_hcd *xhci); int xhci_start(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci); int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us);
int xhci_run(struct usb_hcd *hcd); int xhci_run(struct usb_hcd *hcd);
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_shutdown(struct usb_hcd *hcd); void xhci_shutdown(struct usb_hcd *hcd);

View File

@@ -5033,11 +5033,15 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
goto out_walk; goto out_walk;
p4d = p4d_offset(pgd, address); p4d = p4d_offset(pgd, address);
if (pgd_val(READ_ONCE(*pgd)) != pgd_val(pgdval))
goto out_walk;
p4dval = READ_ONCE(*p4d); p4dval = READ_ONCE(*p4d);
if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval))) if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval)))
goto out_walk; goto out_walk;
vmf.pud = pud_offset(p4d, address); vmf.pud = pud_offset(p4d, address);
if (p4d_val(READ_ONCE(*p4d)) != p4d_val(p4dval))
goto out_walk;
pudval = READ_ONCE(*vmf.pud); pudval = READ_ONCE(*vmf.pud);
if (pud_none(pudval) || unlikely(pud_bad(pudval))) if (pud_none(pudval) || unlikely(pud_bad(pudval)))
goto out_walk; goto out_walk;
@@ -5047,6 +5051,8 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
goto out_walk; goto out_walk;
vmf.pmd = pmd_offset(vmf.pud, address); vmf.pmd = pmd_offset(vmf.pud, address);
if (pud_val(READ_ONCE(*vmf.pud)) != pud_val(pudval))
goto out_walk;
vmf.orig_pmd = READ_ONCE(*vmf.pmd); vmf.orig_pmd = READ_ONCE(*vmf.pmd);
/* /*
* pmd_none could mean that a hugepage collapse is in progress * pmd_none could mean that a hugepage collapse is in progress
@@ -5074,6 +5080,11 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
*/ */
vmf.pte = pte_offset_map(vmf.pmd, address); vmf.pte = pte_offset_map(vmf.pmd, address);
if (pmd_val(READ_ONCE(*vmf.pmd)) != pmd_val(vmf.orig_pmd)) {
pte_unmap(vmf.pte);
vmf.pte = NULL;
goto out_walk;
}
vmf.orig_pte = READ_ONCE(*vmf.pte); vmf.orig_pte = READ_ONCE(*vmf.pte);
barrier(); /* See comment in handle_pte_fault() */ barrier(); /* See comment in handle_pte_fault() */
if (pte_none(vmf.orig_pte)) { if (pte_none(vmf.orig_pte)) {