Merge keystone/android12-5.10-keystone-qcom-release.81+ (e4225e5
) into msm-5.10
* refs/heads/tmp-e4225e5: ANDROID: mm: Fix page table lookup in speculative fault path UPSTREAM: xhci: re-initialize the HC during resume if HCE was set FROMGIT: xhci: make xhci_handshake timeout for xhci_reset() adjustable ANDROID: vendor_hooks: Add hooks for __alloc_pages_direct_reclaim ANDROID: dma-direct: Document disable_dma32 ANDROID: dma-direct: Make DMA32 disablement work for CONFIG_NUMA UPSTREAM: mmc: block: fix read single on recovery logic UPSTREAM: fget: check that the fd still exists after getting a ref to it ANDROID: GKI: Update symbols to symbol list Change-Id: Ic2917d956f70e01cc21c45edd3c7526b5aa8dc6b Signed-off-by: Sivasri Kumar, Vanka <quic_svanka@quicinc.com>
This commit is contained in:
@@ -943,6 +943,10 @@
|
||||
can be useful when debugging issues that require an SLB
|
||||
miss to occur.
|
||||
|
||||
disable_dma32= [KNL]
|
||||
Dynamically disable ZONE_DMA32 on kernels compiled with
|
||||
CONFIG_ZONE_DMA32=y.
|
||||
|
||||
stress_slb [PPC]
|
||||
Limits the number of kernel SLB entries, and flushes
|
||||
them frequently to increase the rate of SLB faults
|
||||
|
@@ -1 +1 @@
|
||||
f2d0c305766eb3fe9aeef48ec1f6794e5ddb218a
|
||||
a817d6ed8746d9de8960acde229c3a9dfa4d465a
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -2692,6 +2692,7 @@
|
||||
__traceiter_android_vh_tune_inactive_ratio
|
||||
__traceiter_android_vh_tune_scan_type
|
||||
__traceiter_android_vh_tune_swappiness
|
||||
__traceiter_android_vh_page_referenced_check_bypass
|
||||
__traceiter_android_vh_ufs_compl_command
|
||||
__traceiter_android_vh_ufs_send_command
|
||||
__traceiter_android_vh_ufs_send_tm_command
|
||||
@@ -2894,6 +2895,7 @@
|
||||
__tracepoint_android_vh_tune_inactive_ratio
|
||||
__tracepoint_android_vh_tune_scan_type
|
||||
__tracepoint_android_vh_tune_swappiness
|
||||
__tracepoint_android_vh_page_referenced_check_bypass
|
||||
__tracepoint_android_vh_ufs_compl_command
|
||||
__tracepoint_android_vh_ufs_send_command
|
||||
__tracepoint_android_vh_ufs_send_tm_command
|
||||
|
@@ -292,6 +292,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_swappiness);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_slab_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
|
||||
|
@@ -1652,32 +1652,32 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
|
||||
struct mmc_card *card = mq->card;
|
||||
struct mmc_host *host = card->host;
|
||||
blk_status_t error = BLK_STS_OK;
|
||||
int retries = 0;
|
||||
|
||||
do {
|
||||
u32 status;
|
||||
int err;
|
||||
int retries = 0;
|
||||
|
||||
mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
|
||||
while (retries++ <= MMC_READ_SINGLE_RETRIES) {
|
||||
mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
|
||||
|
||||
mmc_wait_for_req(host, mrq);
|
||||
mmc_wait_for_req(host, mrq);
|
||||
|
||||
err = mmc_send_status(card, &status);
|
||||
if (err)
|
||||
goto error_exit;
|
||||
|
||||
if (!mmc_host_is_spi(host) &&
|
||||
!mmc_ready_for_data(status)) {
|
||||
err = mmc_blk_fix_state(card, req);
|
||||
err = mmc_send_status(card, &status);
|
||||
if (err)
|
||||
goto error_exit;
|
||||
|
||||
if (!mmc_host_is_spi(host) &&
|
||||
!mmc_ready_for_data(status)) {
|
||||
err = mmc_blk_fix_state(card, req);
|
||||
if (err)
|
||||
goto error_exit;
|
||||
}
|
||||
|
||||
if (!mrq->cmd->error)
|
||||
break;
|
||||
}
|
||||
|
||||
if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
|
||||
continue;
|
||||
|
||||
retries = 0;
|
||||
|
||||
if (mrq->cmd->error ||
|
||||
mrq->data->error ||
|
||||
(!mmc_host_is_spi(host) &&
|
||||
|
@@ -680,7 +680,7 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
|
||||
}
|
||||
pm_runtime_allow(xhci_to_hcd(xhci)->self.controller);
|
||||
xhci->test_mode = 0;
|
||||
return xhci_reset(xhci);
|
||||
return xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
|
||||
}
|
||||
|
||||
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
|
||||
|
@@ -2695,7 +2695,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
|
||||
fail:
|
||||
xhci_halt(xhci);
|
||||
xhci_reset(xhci);
|
||||
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
|
||||
xhci_mem_cleanup(xhci);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@@ -65,7 +65,7 @@ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
|
||||
* handshake done). There are two failure modes: "usec" have passed (major
|
||||
* hardware flakeout), or the register reads as all-ones (hardware removed).
|
||||
*/
|
||||
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
|
||||
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us)
|
||||
{
|
||||
u32 result;
|
||||
int ret;
|
||||
@@ -73,7 +73,7 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
|
||||
ret = readl_poll_timeout_atomic(ptr, result,
|
||||
(result & mask) == done ||
|
||||
result == U32_MAX,
|
||||
1, usec);
|
||||
1, timeout_us);
|
||||
if (result == U32_MAX) /* card removed */
|
||||
return -ENODEV;
|
||||
|
||||
@@ -162,7 +162,7 @@ int xhci_start(struct xhci_hcd *xhci)
|
||||
* Transactions will be terminated immediately, and operational registers
|
||||
* will be set to their defaults.
|
||||
*/
|
||||
int xhci_reset(struct xhci_hcd *xhci)
|
||||
int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us)
|
||||
{
|
||||
u32 command;
|
||||
u32 state;
|
||||
@@ -195,8 +195,7 @@ int xhci_reset(struct xhci_hcd *xhci)
|
||||
if (xhci->quirks & XHCI_INTEL_HOST)
|
||||
udelay(1000);
|
||||
|
||||
ret = xhci_handshake(&xhci->op_regs->command,
|
||||
CMD_RESET, 0, 10 * 1000 * 1000);
|
||||
ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -209,8 +208,7 @@ int xhci_reset(struct xhci_hcd *xhci)
|
||||
* xHCI cannot write to any doorbells or operational registers other
|
||||
* than status until the "Controller Not Ready" flag is cleared.
|
||||
*/
|
||||
ret = xhci_handshake(&xhci->op_regs->status,
|
||||
STS_CNR, 0, 10 * 1000 * 1000);
|
||||
ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us);
|
||||
|
||||
xhci->usb2_rhub.bus_state.port_c_suspend = 0;
|
||||
xhci->usb2_rhub.bus_state.suspended_ports = 0;
|
||||
@@ -731,7 +729,7 @@ static void xhci_stop(struct usb_hcd *hcd)
|
||||
xhci->xhc_state |= XHCI_STATE_HALTED;
|
||||
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
|
||||
xhci_halt(xhci);
|
||||
xhci_reset(xhci);
|
||||
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
|
||||
spin_unlock_irq(&xhci->lock);
|
||||
|
||||
xhci_cleanup_msix(xhci);
|
||||
@@ -784,7 +782,7 @@ void xhci_shutdown(struct usb_hcd *hcd)
|
||||
xhci_halt(xhci);
|
||||
/* Workaround for spurious wakeups at shutdown with HSW */
|
||||
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
|
||||
xhci_reset(xhci);
|
||||
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
|
||||
spin_unlock_irq(&xhci->lock);
|
||||
|
||||
xhci_cleanup_msix(xhci);
|
||||
@@ -1091,6 +1089,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
||||
int retval = 0;
|
||||
bool comp_timer_running = false;
|
||||
bool pending_portevent = false;
|
||||
bool reinit_xhc = false;
|
||||
|
||||
if (!hcd->state)
|
||||
return 0;
|
||||
@@ -1107,10 +1106,11 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
||||
set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
|
||||
|
||||
spin_lock_irq(&xhci->lock);
|
||||
if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
|
||||
hibernated = true;
|
||||
|
||||
if (!hibernated) {
|
||||
if (hibernated || xhci->quirks & XHCI_RESET_ON_RESUME || xhci->broken_suspend)
|
||||
reinit_xhc = true;
|
||||
|
||||
if (!reinit_xhc) {
|
||||
/*
|
||||
* Some controllers might lose power during suspend, so wait
|
||||
* for controller not ready bit to clear, just as in xHC init.
|
||||
@@ -1143,12 +1143,17 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
||||
spin_unlock_irq(&xhci->lock);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
temp = readl(&xhci->op_regs->status);
|
||||
}
|
||||
|
||||
/* If restore operation fails, re-initialize the HC during resume */
|
||||
if ((temp & STS_SRE) || hibernated) {
|
||||
temp = readl(&xhci->op_regs->status);
|
||||
|
||||
/* re-initialize the HC on Restore Error, or Host Controller Error */
|
||||
if (temp & (STS_SRE | STS_HCE)) {
|
||||
reinit_xhc = true;
|
||||
xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp);
|
||||
}
|
||||
|
||||
if (reinit_xhc) {
|
||||
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
|
||||
!(xhci_all_ports_seen_u0(xhci))) {
|
||||
del_timer_sync(&xhci->comp_mode_recovery_timer);
|
||||
@@ -1163,7 +1168,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
|
||||
xhci_dbg(xhci, "Stop HCD\n");
|
||||
xhci_halt(xhci);
|
||||
xhci_zero_64b_regs(xhci);
|
||||
retval = xhci_reset(xhci);
|
||||
retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
|
||||
spin_unlock_irq(&xhci->lock);
|
||||
if (retval)
|
||||
return retval;
|
||||
@@ -5268,7 +5273,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
|
||||
|
||||
xhci_dbg(xhci, "Resetting HCD\n");
|
||||
/* Reset the internal HC memory state and registers. */
|
||||
retval = xhci_reset(xhci);
|
||||
retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC);
|
||||
if (retval)
|
||||
return retval;
|
||||
xhci_dbg(xhci, "Reset complete\n");
|
||||
|
@@ -230,6 +230,9 @@ struct xhci_op_regs {
|
||||
#define CMD_ETE (1 << 14)
|
||||
/* bits 15:31 are reserved (and should be preserved on writes). */
|
||||
|
||||
#define XHCI_RESET_LONG_USEC (10 * 1000 * 1000)
|
||||
#define XHCI_RESET_SHORT_USEC (250 * 1000)
|
||||
|
||||
/* IMAN - Interrupt Management Register */
|
||||
#define IMAN_IE (1 << 1)
|
||||
#define IMAN_IP (1 << 0)
|
||||
@@ -2097,11 +2100,11 @@ void xhci_free_container_ctx(struct xhci_hcd *xhci,
|
||||
|
||||
/* xHCI host controller glue */
|
||||
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
|
||||
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec);
|
||||
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
|
||||
void xhci_quiesce(struct xhci_hcd *xhci);
|
||||
int xhci_halt(struct xhci_hcd *xhci);
|
||||
int xhci_start(struct xhci_hcd *xhci);
|
||||
int xhci_reset(struct xhci_hcd *xhci);
|
||||
int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us);
|
||||
int xhci_run(struct usb_hcd *hcd);
|
||||
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
|
||||
void xhci_shutdown(struct usb_hcd *hcd);
|
||||
|
@@ -834,6 +834,10 @@ loop:
|
||||
file = NULL;
|
||||
else if (!get_file_rcu_many(file, refs))
|
||||
goto loop;
|
||||
else if (__fcheck_files(files, fd) != file) {
|
||||
fput_many(file, refs);
|
||||
goto loop;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@@ -37,11 +37,16 @@ static inline bool zone_dma32_is_empty(int node)
|
||||
|
||||
static inline bool zone_dma32_are_empty(void)
|
||||
{
|
||||
#ifdef CONFIG_NUMA
|
||||
int node;
|
||||
|
||||
for_each_node(node)
|
||||
if (!zone_dma32_is_empty(node))
|
||||
return false;
|
||||
#else
|
||||
if (!zone_dma32_is_empty(numa_node_id()))
|
||||
return false;
|
||||
#endif
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@@ -117,6 +117,11 @@ DECLARE_HOOK(android_vh_mmap_region,
|
||||
DECLARE_HOOK(android_vh_try_to_unmap_one,
|
||||
TP_PROTO(struct vm_area_struct *vma, struct page *page, unsigned long addr, bool ret),
|
||||
TP_ARGS(vma, page, addr, ret));
|
||||
DECLARE_HOOK(android_vh_drain_all_pages_bypass,
|
||||
TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long alloc_flags,
|
||||
int migratetype, unsigned long did_some_progress,
|
||||
bool *bypass),
|
||||
TP_ARGS(gfp_mask, order, alloc_flags, migratetype, did_some_progress, bypass));
|
||||
struct device;
|
||||
DECLARE_HOOK(android_vh_subpage_dma_contig_alloc,
|
||||
TP_PROTO(bool *allow_subpage_alloc, struct device *dev, size_t *size),
|
||||
|
@@ -62,7 +62,7 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
|
||||
return GFP_DMA;
|
||||
if (*phys_limit <= DMA_BIT_MASK(32) &&
|
||||
!zone_dma32_is_empty(dev_to_node(dev)))
|
||||
!zone_dma32_are_empty())
|
||||
return GFP_DMA32;
|
||||
return 0;
|
||||
}
|
||||
@@ -103,7 +103,7 @@ again:
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
|
||||
phys_limit < DMA_BIT_MASK(64) &&
|
||||
!(gfp & (GFP_DMA32 | GFP_DMA)) &&
|
||||
!zone_dma32_is_empty(node)) {
|
||||
!zone_dma32_are_empty()) {
|
||||
gfp |= GFP_DMA32;
|
||||
goto again;
|
||||
}
|
||||
|
11
mm/memory.c
11
mm/memory.c
@@ -5033,11 +5033,15 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
|
||||
goto out_walk;
|
||||
|
||||
p4d = p4d_offset(pgd, address);
|
||||
if (pgd_val(READ_ONCE(*pgd)) != pgd_val(pgdval))
|
||||
goto out_walk;
|
||||
p4dval = READ_ONCE(*p4d);
|
||||
if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval)))
|
||||
goto out_walk;
|
||||
|
||||
vmf.pud = pud_offset(p4d, address);
|
||||
if (p4d_val(READ_ONCE(*p4d)) != p4d_val(p4dval))
|
||||
goto out_walk;
|
||||
pudval = READ_ONCE(*vmf.pud);
|
||||
if (pud_none(pudval) || unlikely(pud_bad(pudval)))
|
||||
goto out_walk;
|
||||
@@ -5047,6 +5051,8 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
|
||||
goto out_walk;
|
||||
|
||||
vmf.pmd = pmd_offset(vmf.pud, address);
|
||||
if (pud_val(READ_ONCE(*vmf.pud)) != pud_val(pudval))
|
||||
goto out_walk;
|
||||
vmf.orig_pmd = READ_ONCE(*vmf.pmd);
|
||||
/*
|
||||
* pmd_none could mean that a hugepage collapse is in progress
|
||||
@@ -5074,6 +5080,11 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
|
||||
*/
|
||||
|
||||
vmf.pte = pte_offset_map(vmf.pmd, address);
|
||||
if (pmd_val(READ_ONCE(*vmf.pmd)) != pmd_val(vmf.orig_pmd)) {
|
||||
pte_unmap(vmf.pte);
|
||||
vmf.pte = NULL;
|
||||
goto out_walk;
|
||||
}
|
||||
vmf.orig_pte = READ_ONCE(*vmf.pte);
|
||||
barrier(); /* See comment in handle_pte_fault() */
|
||||
if (pte_none(vmf.orig_pte)) {
|
||||
|
@@ -4505,6 +4505,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
|
||||
struct page *page = NULL;
|
||||
unsigned long pflags;
|
||||
bool drained = false;
|
||||
bool skip_pcp_drain = false;
|
||||
|
||||
psi_memstall_enter(&pflags);
|
||||
*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
|
||||
@@ -4521,7 +4522,10 @@ retry:
|
||||
*/
|
||||
if (!page && !drained) {
|
||||
unreserve_highatomic_pageblock(ac, false);
|
||||
drain_all_pages(NULL);
|
||||
trace_android_vh_drain_all_pages_bypass(gfp_mask, order,
|
||||
alloc_flags, ac->migratetype, *did_some_progress, &skip_pcp_drain);
|
||||
if (!skip_pcp_drain)
|
||||
drain_all_pages(NULL);
|
||||
drained = true;
|
||||
goto retry;
|
||||
}
|
||||
|
Reference in New Issue
Block a user