Merge branch 'android12-5.10' into android12-5.10-lts
Sync up with android12-5.10 for the following commits:591f4296cc
UPSTREAM: f2fs: fix UAF in f2fs_available_free_memorycd5f87fade
FROMGIT: regmap-irq: Update interrupt clear register for proper reset5501913544
UPSTREAM: iommu: Fix potential use-after-free during probe4c47eaa7c8
BACKPORT: sched/fair: Fix fault in reweight_entityc3daae52af
UPSTREAM: rcu/exp: Mark current CPU as exp-QS in IPI loop second passcb7e10d31b
ANDROID: vendor_hooks: Add hooks for binder proc transaction16d19b6561
UPSTREAM: usb: gadget: rndis: check size of RNDIS_MSG_SET commandc7732dbce5
UPSTREAM: USB: gadget: validate interface OS descriptor requests6f915dd2af
ANDROID: incremental-fs: remove index and incomplete dir on umountcbac4c1652
ANDROID: GKI: rockchip: Update symbol need by system heap64fe36c410
UPSTREAM: kfence: fix memory leak when cat kfence objects7a1e7dc41e
UPSTREAM: arm64: mte: DC {GVA,GZVA} shouldn't be used when DCZID_EL0.DZP == 1132cc28d20
UPSTREAM: dma-buf: system_heap: Use 'for_each_sgtable_sg' in pages free flow01e13a46e4
BACKPORT: arm64: uaccess: avoid blocking within critical sectionse97e339a68
UPSTREAM: arm64: arm64_ftr_reg->name may not be a human-readable stringc6672561bc
UPSTREAM: net: add and use skb_unclone_keeptruesize() helpera5c4e6ce74
UPSTREAM: mm/userfaultfd: selftests: fix memory corruption with thp enableda90890b4c7
UPSTREAM: KVM: arm64: Fix host stage-2 PGD refcountf6f03a70c2
UPSTREAM: remoteproc: Fix the wrong default value of is_iomemcf59c9b9b2
UPSTREAM: remoteproc: elf_loader: Fix loading segment when is_iomem true35c4c40dbb
UPSTREAM: scsi: ufs: core: Unbreak the reset handler81ec07b6b9
UPSTREAM: blkcg: fix memory leak in blk_iolatency_init234844b9fe
UPSTREAM: arm64: dts: qcom: ipq8074: remove USB tx-fifo-resize propertyafb9df4c90
UPSTREAM: usb: xhci-mtk: fix issue of out-of-bounds array access900c38d4ed
UPSTREAM: mm/slub: fix endianness bug for alloc/free_traces attributese4f41530d4
UPSTREAM: Revert "usb: dwc3: dwc3-qcom: Enable tx-fifo-resize property by default"e4757e9070
UPSTREAM: usb: dwc3: core: Revise GHWPARAMS9 offsetf8b20495b7
UPSTREAM: firmware: arm_scmi: Fix type error assignment in voltage protocold7ba0f636d
UPSTREAM: firmware: arm_scmi: Fix type error in sensor protocol986262ed83
UPSTREAM: coresight: trbe: Fix incorrect access of the sink specific datade27f42b19
UPSTREAM: device property: Add missed header in fwnode.h5be4ad1d99
UPSTREAM: usb: typec: tcpci: don't handle vSafe0V event if it's not enabledcac9433c3a
UPSTREAM: driver core: fw_devlink: Improve handling of cyclic dependencies4137188c10
UPSTREAM: tracing/boot: Fix to loop on only subkeyse44b1adb9e
BACKPORT: mm/memory_hotplug: fix potential permanent lru cache disable1d3cff0b48
UPSTREAM: usb: gadget: f_serial: Ensure gserial disconnected during unbinda8f9df1ffc
FROMGIT: scsi: ufs: Fix a deadlock in the error handlerfb0fa7dc29
UPSTREAM: scsi: ufs: Use DECLARE_COMPLETION_ONSTACK() where appropriate18d48b7c6d
ANDROID: GKI: Enable CONFIG_SERIAL_8250_RUNTIME_UARTS=08f280376b4
BACKPORT: f2fs: fix up f2fs_lookup tracepoints233aba68e8
UPSTREAM: tipc: improve size validations for received domain records944437cac9
ANDROID: gki_defconfig: Enable NET_ACT_BPF Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I02bf56df8b0fc823b4f445603ced4adf53021ef1
This commit is contained in:
@@ -4949,6 +4949,7 @@
|
|||||||
<elf-symbol name='submit_bio_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x32a9deed'/>
|
<elf-symbol name='submit_bio_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x32a9deed'/>
|
||||||
<elf-symbol name='subsys_system_register' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0xecd1be91'/>
|
<elf-symbol name='subsys_system_register' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0xecd1be91'/>
|
||||||
<elf-symbol name='suspend_set_ops' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x1ab0c7e0'/>
|
<elf-symbol name='suspend_set_ops' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x1ab0c7e0'/>
|
||||||
|
<elf-symbol name='swiotlb_max_segment' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x5b6b0329'/>
|
||||||
<elf-symbol name='swiotlb_nr_tbl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x5e51cd74'/>
|
<elf-symbol name='swiotlb_nr_tbl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x5e51cd74'/>
|
||||||
<elf-symbol name='sync_blockdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x219fe0be'/>
|
<elf-symbol name='sync_blockdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x219fe0be'/>
|
||||||
<elf-symbol name='sync_dirty_buffer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x87e19c21'/>
|
<elf-symbol name='sync_dirty_buffer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x87e19c21'/>
|
||||||
@@ -140169,6 +140170,9 @@
|
|||||||
<parameter type-id='9d109fcf' name='ops' filepath='kernel/power/suspend.c' line='205' column='1'/>
|
<parameter type-id='9d109fcf' name='ops' filepath='kernel/power/suspend.c' line='205' column='1'/>
|
||||||
<return type-id='48b5725f'/>
|
<return type-id='48b5725f'/>
|
||||||
</function-decl>
|
</function-decl>
|
||||||
|
<function-decl name='swiotlb_max_segment' mangled-name='swiotlb_max_segment' filepath='kernel/dma/swiotlb.c' line='138' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='swiotlb_max_segment'>
|
||||||
|
<return type-id='f0981eeb'/>
|
||||||
|
</function-decl>
|
||||||
<function-decl name='swiotlb_nr_tbl' mangled-name='swiotlb_nr_tbl' filepath='kernel/dma/swiotlb.c' line='132' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='swiotlb_nr_tbl'>
|
<function-decl name='swiotlb_nr_tbl' mangled-name='swiotlb_nr_tbl' filepath='kernel/dma/swiotlb.c' line='132' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='swiotlb_nr_tbl'>
|
||||||
<return type-id='7359adad'/>
|
<return type-id='7359adad'/>
|
||||||
</function-decl>
|
</function-decl>
|
||||||
|
@@ -1952,6 +1952,7 @@
|
|||||||
dma_heap_get_dev
|
dma_heap_get_dev
|
||||||
__sg_page_iter_next
|
__sg_page_iter_next
|
||||||
__sg_page_iter_start
|
__sg_page_iter_start
|
||||||
|
swiotlb_max_segment
|
||||||
|
|
||||||
# required by tcpci_husb311.ko
|
# required by tcpci_husb311.ko
|
||||||
i2c_smbus_read_word_data
|
i2c_smbus_read_word_data
|
||||||
|
@@ -433,7 +433,6 @@
|
|||||||
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
phys = <&qusb_phy_0>, <&usb0_ssphy>;
|
phys = <&qusb_phy_0>, <&usb0_ssphy>;
|
||||||
phy-names = "usb2-phy", "usb3-phy";
|
phy-names = "usb2-phy", "usb3-phy";
|
||||||
tx-fifo-resize;
|
|
||||||
snps,is-utmi-l1-suspend;
|
snps,is-utmi-l1-suspend;
|
||||||
snps,hird-threshold = /bits/ 8 <0x0>;
|
snps,hird-threshold = /bits/ 8 <0x0>;
|
||||||
snps,dis_u2_susphy_quirk;
|
snps,dis_u2_susphy_quirk;
|
||||||
@@ -474,7 +473,6 @@
|
|||||||
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
phys = <&qusb_phy_1>, <&usb1_ssphy>;
|
phys = <&qusb_phy_1>, <&usb1_ssphy>;
|
||||||
phy-names = "usb2-phy", "usb3-phy";
|
phy-names = "usb2-phy", "usb3-phy";
|
||||||
tx-fifo-resize;
|
|
||||||
snps,is-utmi-l1-suspend;
|
snps,is-utmi-l1-suspend;
|
||||||
snps,hird-threshold = /bits/ 8 <0x0>;
|
snps,hird-threshold = /bits/ 8 <0x0>;
|
||||||
snps,dis_u2_susphy_quirk;
|
snps,dis_u2_susphy_quirk;
|
||||||
|
@@ -259,6 +259,7 @@ CONFIG_NET_ACT_POLICE=y
|
|||||||
CONFIG_NET_ACT_GACT=y
|
CONFIG_NET_ACT_GACT=y
|
||||||
CONFIG_NET_ACT_MIRRED=y
|
CONFIG_NET_ACT_MIRRED=y
|
||||||
CONFIG_NET_ACT_SKBEDIT=y
|
CONFIG_NET_ACT_SKBEDIT=y
|
||||||
|
CONFIG_NET_ACT_BPF=y
|
||||||
CONFIG_VSOCKETS=y
|
CONFIG_VSOCKETS=y
|
||||||
CONFIG_CGROUP_NET_PRIO=y
|
CONFIG_CGROUP_NET_PRIO=y
|
||||||
CONFIG_BPF_JIT=y
|
CONFIG_BPF_JIT=y
|
||||||
@@ -368,6 +369,7 @@ CONFIG_SERIAL_8250=y
|
|||||||
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
|
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
|
||||||
CONFIG_SERIAL_8250_CONSOLE=y
|
CONFIG_SERIAL_8250_CONSOLE=y
|
||||||
# CONFIG_SERIAL_8250_EXAR is not set
|
# CONFIG_SERIAL_8250_EXAR is not set
|
||||||
|
CONFIG_SERIAL_8250_RUNTIME_UARTS=0
|
||||||
CONFIG_SERIAL_OF_PLATFORM=y
|
CONFIG_SERIAL_OF_PLATFORM=y
|
||||||
CONFIG_SERIAL_AMBA_PL011=y
|
CONFIG_SERIAL_AMBA_PL011=y
|
||||||
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
|
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
|
||||||
|
@@ -84,10 +84,12 @@ static inline void __dc_gzva(u64 p)
|
|||||||
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
|
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
|
||||||
bool init)
|
bool init)
|
||||||
{
|
{
|
||||||
u64 curr, mask, dczid_bs, end1, end2, end3;
|
u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3;
|
||||||
|
|
||||||
/* Read DC G(Z)VA block size from the system register. */
|
/* Read DC G(Z)VA block size from the system register. */
|
||||||
dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf);
|
dczid = read_cpuid(DCZID_EL0);
|
||||||
|
dczid_bs = 4ul << (dczid & 0xf);
|
||||||
|
dczid_dzp = (dczid >> 4) & 1;
|
||||||
|
|
||||||
curr = (u64)__tag_set(addr, tag);
|
curr = (u64)__tag_set(addr, tag);
|
||||||
mask = dczid_bs - 1;
|
mask = dczid_bs - 1;
|
||||||
@@ -106,7 +108,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
|
|||||||
*/
|
*/
|
||||||
#define SET_MEMTAG_RANGE(stg_post, dc_gva) \
|
#define SET_MEMTAG_RANGE(stg_post, dc_gva) \
|
||||||
do { \
|
do { \
|
||||||
if (size >= 2 * dczid_bs) { \
|
if (!dczid_dzp && size >= 2 * dczid_bs) {\
|
||||||
do { \
|
do { \
|
||||||
curr = stg_post(curr); \
|
curr = stg_post(curr); \
|
||||||
} while (curr < end1); \
|
} while (curr < end1); \
|
||||||
|
@@ -342,12 +342,22 @@ do { \
|
|||||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must not call into the scheduler between uaccess_enable_not_uao() and
|
||||||
|
* uaccess_disable_not_uao(). As `x` and `ptr` could contain blocking functions,
|
||||||
|
* we must evaluate these outside of the critical section.
|
||||||
|
*/
|
||||||
#define __raw_get_user(x, ptr, err) \
|
#define __raw_get_user(x, ptr, err) \
|
||||||
do { \
|
do { \
|
||||||
|
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
|
||||||
|
__typeof__(x) __rgu_val; \
|
||||||
__chk_user_ptr(ptr); \
|
__chk_user_ptr(ptr); \
|
||||||
|
\
|
||||||
uaccess_enable_not_uao(); \
|
uaccess_enable_not_uao(); \
|
||||||
__raw_get_mem("ldtr", x, ptr, err); \
|
__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
|
||||||
uaccess_disable_not_uao(); \
|
uaccess_disable_not_uao(); \
|
||||||
|
\
|
||||||
|
(x) = __rgu_val; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define __get_user_error(x, ptr, err) \
|
#define __get_user_error(x, ptr, err) \
|
||||||
@@ -371,14 +381,22 @@ do { \
|
|||||||
|
|
||||||
#define get_user __get_user
|
#define get_user __get_user
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||||
|
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||||
|
* functions, we must evaluate these outside of the critical section.
|
||||||
|
*/
|
||||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||||
do { \
|
do { \
|
||||||
|
__typeof__(dst) __gkn_dst = (dst); \
|
||||||
|
__typeof__(src) __gkn_src = (src); \
|
||||||
int __gkn_err = 0; \
|
int __gkn_err = 0; \
|
||||||
\
|
\
|
||||||
__uaccess_enable_tco_async(); \
|
__uaccess_enable_tco_async(); \
|
||||||
__raw_get_mem("ldr", *((type *)(dst)), \
|
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
|
||||||
(__force type *)(src), __gkn_err); \
|
(__force type *)(__gkn_src), __gkn_err); \
|
||||||
__uaccess_disable_tco_async(); \
|
__uaccess_disable_tco_async(); \
|
||||||
|
\
|
||||||
if (unlikely(__gkn_err)) \
|
if (unlikely(__gkn_err)) \
|
||||||
goto err_label; \
|
goto err_label; \
|
||||||
} while (0)
|
} while (0)
|
||||||
@@ -417,11 +435,19 @@ do { \
|
|||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must not call into the scheduler between uaccess_enable_not_uao() and
|
||||||
|
* uaccess_disable_not_uao(). As `x` and `ptr` could contain blocking functions,
|
||||||
|
* we must evaluate these outside of the critical section.
|
||||||
|
*/
|
||||||
#define __raw_put_user(x, ptr, err) \
|
#define __raw_put_user(x, ptr, err) \
|
||||||
do { \
|
do { \
|
||||||
__chk_user_ptr(ptr); \
|
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
|
||||||
|
__typeof__(*(ptr)) __rpu_val = (x); \
|
||||||
|
__chk_user_ptr(__rpu_ptr); \
|
||||||
|
\
|
||||||
uaccess_enable_not_uao(); \
|
uaccess_enable_not_uao(); \
|
||||||
__raw_put_mem("sttr", x, ptr, err); \
|
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
|
||||||
uaccess_disable_not_uao(); \
|
uaccess_disable_not_uao(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
@@ -446,14 +472,22 @@ do { \
|
|||||||
|
|
||||||
#define put_user __put_user
|
#define put_user __put_user
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||||
|
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||||
|
* functions, we must evaluate these outside of the critical section.
|
||||||
|
*/
|
||||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||||
do { \
|
do { \
|
||||||
|
__typeof__(dst) __pkn_dst = (dst); \
|
||||||
|
__typeof__(src) __pkn_src = (src); \
|
||||||
int __pkn_err = 0; \
|
int __pkn_err = 0; \
|
||||||
\
|
\
|
||||||
__uaccess_enable_tco_async(); \
|
__uaccess_enable_tco_async(); \
|
||||||
__raw_put_mem("str", *((type *)(src)), \
|
__raw_put_mem("str", *((type *)(__pkn_src)), \
|
||||||
(__force type *)(dst), __pkn_err); \
|
(__force type *)(__pkn_dst), __pkn_err); \
|
||||||
__uaccess_disable_tco_async(); \
|
__uaccess_disable_tco_async(); \
|
||||||
|
\
|
||||||
if (unlikely(__pkn_err)) \
|
if (unlikely(__pkn_err)) \
|
||||||
goto err_label; \
|
goto err_label; \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
@@ -569,15 +569,19 @@ static const struct arm64_ftr_bits ftr_raz[] = {
|
|||||||
ARM64_FTR_END,
|
ARM64_FTR_END,
|
||||||
};
|
};
|
||||||
|
|
||||||
#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) { \
|
#define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \
|
||||||
.sys_id = id, \
|
.sys_id = id, \
|
||||||
.reg = &(struct arm64_ftr_reg){ \
|
.reg = &(struct arm64_ftr_reg){ \
|
||||||
.name = #id, \
|
.name = id_str, \
|
||||||
.override = (ovr), \
|
.override = (ovr), \
|
||||||
.ftr_bits = &((table)[0]), \
|
.ftr_bits = &((table)[0]), \
|
||||||
}}
|
}}
|
||||||
|
|
||||||
#define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override)
|
#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \
|
||||||
|
__ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr)
|
||||||
|
|
||||||
|
#define ARM64_FTR_REG(id, table) \
|
||||||
|
__ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
|
||||||
|
|
||||||
struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
|
struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
|
||||||
struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
|
struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
|
||||||
|
@@ -59,6 +59,7 @@ static inline void hyp_set_page_refcounted(struct hyp_page *p)
|
|||||||
|
|
||||||
/* Allocation */
|
/* Allocation */
|
||||||
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
|
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
|
||||||
|
void hyp_split_page(struct hyp_page *page);
|
||||||
void hyp_get_page(void *addr);
|
void hyp_get_page(void *addr);
|
||||||
void hyp_put_page(void *addr);
|
void hyp_put_page(void *addr);
|
||||||
|
|
||||||
|
@@ -36,7 +36,18 @@ static const u8 pkvm_hyp_id = 1;
|
|||||||
|
|
||||||
static void *host_s2_zalloc_pages_exact(size_t size)
|
static void *host_s2_zalloc_pages_exact(size_t size)
|
||||||
{
|
{
|
||||||
return hyp_alloc_pages(&host_s2_mem, get_order(size));
|
void *addr = hyp_alloc_pages(&host_s2_mem, get_order(size));
|
||||||
|
|
||||||
|
hyp_split_page(hyp_virt_to_page(addr));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The size of concatenated PGDs is always a power of two of PAGE_SIZE,
|
||||||
|
* so there should be no need to free any of the tail pages to make the
|
||||||
|
* allocation exact.
|
||||||
|
*/
|
||||||
|
WARN_ON(size != (PAGE_SIZE << get_order(size)));
|
||||||
|
|
||||||
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *host_s2_zalloc_page(void *pool)
|
static void *host_s2_zalloc_page(void *pool)
|
||||||
|
@@ -140,6 +140,20 @@ void hyp_get_page(void *addr)
|
|||||||
hyp_page_ref_inc(p);
|
hyp_page_ref_inc(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void hyp_split_page(struct hyp_page *p)
|
||||||
|
{
|
||||||
|
unsigned short order = p->order;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
p->order = 0;
|
||||||
|
for (i = 1; i < (1 << order); i++) {
|
||||||
|
struct hyp_page *tail = p + i;
|
||||||
|
|
||||||
|
tail->order = 0;
|
||||||
|
hyp_set_page_refcounted(tail);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order)
|
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order)
|
||||||
{
|
{
|
||||||
unsigned int i = order;
|
unsigned int i = order;
|
||||||
|
@@ -43,17 +43,23 @@ SYM_FUNC_END(mte_clear_page_tags)
|
|||||||
* x0 - address to the beginning of the page
|
* x0 - address to the beginning of the page
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(mte_zero_clear_page_tags)
|
SYM_FUNC_START(mte_zero_clear_page_tags)
|
||||||
|
and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag
|
||||||
mrs x1, dczid_el0
|
mrs x1, dczid_el0
|
||||||
|
tbnz x1, #4, 2f // Branch if DC GZVA is prohibited
|
||||||
and w1, w1, #0xf
|
and w1, w1, #0xf
|
||||||
mov x2, #4
|
mov x2, #4
|
||||||
lsl x1, x2, x1
|
lsl x1, x2, x1
|
||||||
and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag
|
|
||||||
|
|
||||||
1: dc gzva, x0
|
1: dc gzva, x0
|
||||||
add x0, x0, x1
|
add x0, x0, x1
|
||||||
tst x0, #(PAGE_SIZE - 1)
|
tst x0, #(PAGE_SIZE - 1)
|
||||||
b.ne 1b
|
b.ne 1b
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
2: stz2g x0, [x0], #(MTE_GRANULE_SIZE * 2)
|
||||||
|
tst x0, #(PAGE_SIZE - 1)
|
||||||
|
b.ne 2b
|
||||||
|
ret
|
||||||
SYM_FUNC_END(mte_zero_clear_page_tags)
|
SYM_FUNC_END(mte_zero_clear_page_tags)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -235,6 +235,7 @@ CONFIG_NET_ACT_POLICE=y
|
|||||||
CONFIG_NET_ACT_GACT=y
|
CONFIG_NET_ACT_GACT=y
|
||||||
CONFIG_NET_ACT_MIRRED=y
|
CONFIG_NET_ACT_MIRRED=y
|
||||||
CONFIG_NET_ACT_SKBEDIT=y
|
CONFIG_NET_ACT_SKBEDIT=y
|
||||||
|
CONFIG_NET_ACT_BPF=y
|
||||||
CONFIG_VSOCKETS=y
|
CONFIG_VSOCKETS=y
|
||||||
CONFIG_CGROUP_NET_PRIO=y
|
CONFIG_CGROUP_NET_PRIO=y
|
||||||
CONFIG_BPF_JIT=y
|
CONFIG_BPF_JIT=y
|
||||||
@@ -339,6 +340,7 @@ CONFIG_INPUT_UINPUT=y
|
|||||||
CONFIG_SERIAL_8250=y
|
CONFIG_SERIAL_8250=y
|
||||||
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
|
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
|
||||||
CONFIG_SERIAL_8250_CONSOLE=y
|
CONFIG_SERIAL_8250_CONSOLE=y
|
||||||
|
CONFIG_SERIAL_8250_RUNTIME_UARTS=0
|
||||||
CONFIG_SERIAL_OF_PLATFORM=y
|
CONFIG_SERIAL_OF_PLATFORM=y
|
||||||
CONFIG_SERIAL_SAMSUNG=y
|
CONFIG_SERIAL_SAMSUNG=y
|
||||||
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
|
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
|
||||||
|
@@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
|
|||||||
if (preloaded)
|
if (preloaded)
|
||||||
radix_tree_preload_end();
|
radix_tree_preload_end();
|
||||||
|
|
||||||
ret = blk_iolatency_init(q);
|
|
||||||
if (ret)
|
|
||||||
goto err_destroy_all;
|
|
||||||
|
|
||||||
ret = blk_ioprio_init(q);
|
ret = blk_ioprio_init(q);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_destroy_all;
|
goto err_destroy_all;
|
||||||
@@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_destroy_all;
|
goto err_destroy_all;
|
||||||
|
|
||||||
|
ret = blk_iolatency_init(q);
|
||||||
|
if (ret) {
|
||||||
|
blk_throtl_exit(q);
|
||||||
|
goto err_destroy_all;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_destroy_all:
|
err_destroy_all:
|
||||||
|
@@ -2535,7 +2535,8 @@ static int binder_proc_transaction(struct binder_transaction *t,
|
|||||||
|
|
||||||
trace_android_vh_binder_proc_transaction_end(current, proc->tsk,
|
trace_android_vh_binder_proc_transaction_end(current, proc->tsk,
|
||||||
thread ? thread->task : NULL, t->code, pending_async, !oneway);
|
thread ? thread->task : NULL, t->code, pending_async, !oneway);
|
||||||
|
trace_android_vh_binder_proc_transaction_finish(proc, t,
|
||||||
|
thread ? thread->task : NULL, pending_async, !oneway);
|
||||||
if (!pending_async)
|
if (!pending_async)
|
||||||
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
|
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
|
||||||
|
|
||||||
|
@@ -279,6 +279,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_binder_transaction);
|
|||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_end);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_end);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_finish);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg);
|
||||||
|
@@ -1688,14 +1688,21 @@ static int fw_devlink_create_devlink(struct device *con,
|
|||||||
* be broken by applying logic. Check for these types of cycles and
|
* be broken by applying logic. Check for these types of cycles and
|
||||||
* break them so that devices in the cycle probe properly.
|
* break them so that devices in the cycle probe properly.
|
||||||
*
|
*
|
||||||
* If the supplier's parent is dependent on the consumer, then
|
* If the supplier's parent is dependent on the consumer, then the
|
||||||
* the consumer-supplier dependency is a false dependency. So,
|
* consumer and supplier have a cyclic dependency. Since fw_devlink
|
||||||
* treat it as an invalid link.
|
* can't tell which of the inferred dependencies are incorrect, don't
|
||||||
|
* enforce probe ordering between any of the devices in this cyclic
|
||||||
|
* dependency. Do this by relaxing all the fw_devlink device links in
|
||||||
|
* this cycle and by treating the fwnode link between the consumer and
|
||||||
|
* the supplier as an invalid dependency.
|
||||||
*/
|
*/
|
||||||
sup_dev = fwnode_get_next_parent_dev(sup_handle);
|
sup_dev = fwnode_get_next_parent_dev(sup_handle);
|
||||||
if (sup_dev && device_is_dependent(con, sup_dev)) {
|
if (sup_dev && device_is_dependent(con, sup_dev)) {
|
||||||
dev_dbg(con, "Not linking to %pfwP - False link\n",
|
dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
|
||||||
sup_handle);
|
sup_handle, dev_name(sup_dev));
|
||||||
|
device_links_write_lock();
|
||||||
|
fw_devlink_relax_cycle(con, sup_dev);
|
||||||
|
device_links_write_unlock();
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
@@ -170,11 +170,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
|
|||||||
ret = regmap_write(map, reg, d->mask_buf[i]);
|
ret = regmap_write(map, reg, d->mask_buf[i]);
|
||||||
if (d->chip->clear_ack) {
|
if (d->chip->clear_ack) {
|
||||||
if (d->chip->ack_invert && !ret)
|
if (d->chip->ack_invert && !ret)
|
||||||
ret = regmap_write(map, reg,
|
ret = regmap_write(map, reg, UINT_MAX);
|
||||||
d->mask_buf[i]);
|
|
||||||
else if (!ret)
|
else if (!ret)
|
||||||
ret = regmap_write(map, reg,
|
ret = regmap_write(map, reg, 0);
|
||||||
~d->mask_buf[i]);
|
|
||||||
}
|
}
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
|
dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
|
||||||
@@ -509,11 +507,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
|
|||||||
data->status_buf[i]);
|
data->status_buf[i]);
|
||||||
if (chip->clear_ack) {
|
if (chip->clear_ack) {
|
||||||
if (chip->ack_invert && !ret)
|
if (chip->ack_invert && !ret)
|
||||||
ret = regmap_write(map, reg,
|
ret = regmap_write(map, reg, UINT_MAX);
|
||||||
data->status_buf[i]);
|
|
||||||
else if (!ret)
|
else if (!ret)
|
||||||
ret = regmap_write(map, reg,
|
ret = regmap_write(map, reg, 0);
|
||||||
~data->status_buf[i]);
|
|
||||||
}
|
}
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
|
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
|
||||||
@@ -745,13 +741,9 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
|
|||||||
d->status_buf[i] & d->mask_buf[i]);
|
d->status_buf[i] & d->mask_buf[i]);
|
||||||
if (chip->clear_ack) {
|
if (chip->clear_ack) {
|
||||||
if (chip->ack_invert && !ret)
|
if (chip->ack_invert && !ret)
|
||||||
ret = regmap_write(map, reg,
|
ret = regmap_write(map, reg, UINT_MAX);
|
||||||
(d->status_buf[i] &
|
|
||||||
d->mask_buf[i]));
|
|
||||||
else if (!ret)
|
else if (!ret)
|
||||||
ret = regmap_write(map, reg,
|
ret = regmap_write(map, reg, 0);
|
||||||
~(d->status_buf[i] &
|
|
||||||
d->mask_buf[i]));
|
|
||||||
}
|
}
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
|
dev_err(map->dev, "Failed to ack 0x%x: %d\n",
|
||||||
|
@@ -338,7 +338,7 @@ static void system_heap_buf_free(struct deferred_freelist_item *item,
|
|||||||
reason = DF_UNDER_PRESSURE; // On failure, just free
|
reason = DF_UNDER_PRESSURE; // On failure, just free
|
||||||
|
|
||||||
table = &buffer->sg_table;
|
table = &buffer->sg_table;
|
||||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
for_each_sgtable_sg(table, sg, i) {
|
||||||
struct page *page = sg_page(sg);
|
struct page *page = sg_page(sg);
|
||||||
|
|
||||||
if (reason == DF_UNDER_PRESSURE) {
|
if (reason == DF_UNDER_PRESSURE) {
|
||||||
|
@@ -637,7 +637,7 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
|
put_unaligned_le32(sensor_id, t->tx.buf);
|
||||||
ret = ph->xops->do_xfer(ph, t);
|
ret = ph->xops->do_xfer(ph, t);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
struct sensors_info *si = ph->get_priv(ph);
|
struct sensors_info *si = ph->get_priv(ph);
|
||||||
|
@@ -155,7 +155,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
|
|||||||
int cnt;
|
int cnt;
|
||||||
|
|
||||||
cmd->domain_id = cpu_to_le32(v->id);
|
cmd->domain_id = cpu_to_le32(v->id);
|
||||||
cmd->level_index = desc_index;
|
cmd->level_index = cpu_to_le32(desc_index);
|
||||||
ret = ph->xops->do_xfer(ph, tl);
|
ret = ph->xops->do_xfer(ph, tl);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
|
@@ -366,7 +366,7 @@ static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
|
|||||||
|
|
||||||
static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
|
static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
|
||||||
{
|
{
|
||||||
struct trbe_buf *buf = perf_get_aux(handle);
|
struct trbe_buf *buf = etm_perf_sink_config(handle);
|
||||||
u64 limit = __trbe_normal_offset(handle);
|
u64 limit = __trbe_normal_offset(handle);
|
||||||
u64 head = PERF_IDX2OFF(handle->head, buf);
|
u64 head = PERF_IDX2OFF(handle->head, buf);
|
||||||
|
|
||||||
|
@@ -153,8 +153,8 @@ static void rproc_copy_segment(struct rproc *rproc, void *dest,
|
|||||||
struct rproc_dump_segment *segment,
|
struct rproc_dump_segment *segment,
|
||||||
size_t offset, size_t size)
|
size_t offset, size_t size)
|
||||||
{
|
{
|
||||||
|
bool is_iomem = false;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
bool is_iomem;
|
|
||||||
|
|
||||||
if (segment->dump) {
|
if (segment->dump) {
|
||||||
segment->dump(rproc, segment, dest, offset, size);
|
segment->dump(rproc, segment, dest, offset, size);
|
||||||
|
@@ -174,8 +174,8 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
|
|||||||
u64 filesz = elf_phdr_get_p_filesz(class, phdr);
|
u64 filesz = elf_phdr_get_p_filesz(class, phdr);
|
||||||
u64 offset = elf_phdr_get_p_offset(class, phdr);
|
u64 offset = elf_phdr_get_p_offset(class, phdr);
|
||||||
u32 type = elf_phdr_get_p_type(class, phdr);
|
u32 type = elf_phdr_get_p_type(class, phdr);
|
||||||
|
bool is_iomem = false;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
bool is_iomem;
|
|
||||||
|
|
||||||
if (type != PT_LOAD)
|
if (type != PT_LOAD)
|
||||||
continue;
|
continue;
|
||||||
@@ -216,7 +216,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
|
|||||||
/* put the segment where the remote processor expects it */
|
/* put the segment where the remote processor expects it */
|
||||||
if (filesz) {
|
if (filesz) {
|
||||||
if (is_iomem)
|
if (is_iomem)
|
||||||
memcpy_fromio(ptr, (void __iomem *)(elf_data + offset), filesz);
|
memcpy_toio((void __iomem *)ptr, elf_data + offset, filesz);
|
||||||
else
|
else
|
||||||
memcpy(ptr, elf_data + offset, filesz);
|
memcpy(ptr, elf_data + offset, filesz);
|
||||||
}
|
}
|
||||||
|
@@ -11,6 +11,7 @@
|
|||||||
*/
|
*/
|
||||||
struct ufs_hba_add_info {
|
struct ufs_hba_add_info {
|
||||||
struct ufs_hba hba;
|
struct ufs_hba hba;
|
||||||
|
u32 reserved_slot;
|
||||||
struct request **tmf_rqs;
|
struct request **tmf_rqs;
|
||||||
#ifdef CONFIG_SCSI_UFS_HPB
|
#ifdef CONFIG_SCSI_UFS_HPB
|
||||||
struct ufshpb_dev_info hpb_dev;
|
struct ufshpb_dev_info hpb_dev;
|
||||||
|
@@ -126,8 +126,9 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
|
|||||||
enum {
|
enum {
|
||||||
UFSHCD_MAX_CHANNEL = 0,
|
UFSHCD_MAX_CHANNEL = 0,
|
||||||
UFSHCD_MAX_ID = 1,
|
UFSHCD_MAX_ID = 1,
|
||||||
UFSHCD_CMD_PER_LUN = 32,
|
UFSHCD_NUM_RESERVED = 1,
|
||||||
UFSHCD_CAN_QUEUE = 32,
|
UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
|
||||||
|
UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* UFSHCD states */
|
/* UFSHCD states */
|
||||||
@@ -2184,6 +2185,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
|
|||||||
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
|
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
|
||||||
hba->nutmrs =
|
hba->nutmrs =
|
||||||
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
|
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
|
||||||
|
ufs_hba_add_info(hba)->reserved_slot = hba->nutrs - 1;
|
||||||
|
|
||||||
/* Read crypto capabilities */
|
/* Read crypto capabilities */
|
||||||
err = ufshcd_hba_init_crypto_capabilities(hba);
|
err = ufshcd_hba_init_crypto_capabilities(hba);
|
||||||
@@ -2922,27 +2924,16 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
|
|||||||
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
||||||
enum dev_cmd_type cmd_type, int timeout)
|
enum dev_cmd_type cmd_type, int timeout)
|
||||||
{
|
{
|
||||||
struct request_queue *q = hba->cmd_queue;
|
DECLARE_COMPLETION_ONSTACK(wait);
|
||||||
struct request *req;
|
const u32 tag = ufs_hba_add_info(hba)->reserved_slot;
|
||||||
struct ufshcd_lrb *lrbp;
|
struct ufshcd_lrb *lrbp;
|
||||||
int err;
|
int err;
|
||||||
int tag;
|
|
||||||
struct completion wait;
|
/* Protects use of ufs_hba_add_info(hba)->reserved_slot. */
|
||||||
|
lockdep_assert_held(&hba->dev_cmd.lock);
|
||||||
|
|
||||||
down_read(&hba->clk_scaling_lock);
|
down_read(&hba->clk_scaling_lock);
|
||||||
|
|
||||||
req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
|
|
||||||
if (IS_ERR(req)) {
|
|
||||||
err = PTR_ERR(req);
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
tag = req->tag;
|
|
||||||
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
|
|
||||||
/* Set the timeout such that the SCSI error handler is not activated. */
|
|
||||||
req->timeout = msecs_to_jiffies(2 * timeout);
|
|
||||||
blk_mq_start_request(req);
|
|
||||||
|
|
||||||
init_completion(&wait);
|
|
||||||
lrbp = &hba->lrb[tag];
|
lrbp = &hba->lrb[tag];
|
||||||
WARN_ON(lrbp->cmd);
|
WARN_ON(lrbp->cmd);
|
||||||
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
|
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
|
||||||
@@ -2961,8 +2952,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
|
|||||||
err ? "query_complete_err" : "query_complete");
|
err ? "query_complete_err" : "query_complete");
|
||||||
|
|
||||||
out:
|
out:
|
||||||
blk_put_request(req);
|
|
||||||
out_unlock:
|
|
||||||
up_read(&hba->clk_scaling_lock);
|
up_read(&hba->clk_scaling_lock);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@@ -3939,14 +3928,13 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
|
|||||||
*/
|
*/
|
||||||
static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
|
||||||
{
|
{
|
||||||
struct completion uic_async_done;
|
DECLARE_COMPLETION_ONSTACK(uic_async_done);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u8 status;
|
u8 status;
|
||||||
int ret;
|
int ret;
|
||||||
bool reenable_intr = false;
|
bool reenable_intr = false;
|
||||||
|
|
||||||
mutex_lock(&hba->uic_cmd_mutex);
|
mutex_lock(&hba->uic_cmd_mutex);
|
||||||
init_completion(&uic_async_done);
|
|
||||||
ufshcd_add_delay_before_dme_cmd(hba);
|
ufshcd_add_delay_before_dme_cmd(hba);
|
||||||
|
|
||||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||||
@@ -6626,25 +6614,17 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
|||||||
enum dev_cmd_type cmd_type,
|
enum dev_cmd_type cmd_type,
|
||||||
enum query_opcode desc_op)
|
enum query_opcode desc_op)
|
||||||
{
|
{
|
||||||
struct request_queue *q = hba->cmd_queue;
|
DECLARE_COMPLETION_ONSTACK(wait);
|
||||||
struct request *req;
|
const u32 tag = ufs_hba_add_info(hba)->reserved_slot;
|
||||||
struct ufshcd_lrb *lrbp;
|
struct ufshcd_lrb *lrbp;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int tag;
|
|
||||||
struct completion wait;
|
|
||||||
u8 upiu_flags;
|
u8 upiu_flags;
|
||||||
|
|
||||||
|
/* Protects use of ufs_hba_add_info(hba)->reserved_slot. */
|
||||||
|
lockdep_assert_held(&hba->dev_cmd.lock);
|
||||||
|
|
||||||
down_read(&hba->clk_scaling_lock);
|
down_read(&hba->clk_scaling_lock);
|
||||||
|
|
||||||
req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
|
|
||||||
if (IS_ERR(req)) {
|
|
||||||
err = PTR_ERR(req);
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
tag = req->tag;
|
|
||||||
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
|
|
||||||
|
|
||||||
init_completion(&wait);
|
|
||||||
lrbp = &hba->lrb[tag];
|
lrbp = &hba->lrb[tag];
|
||||||
WARN_ON(lrbp->cmd);
|
WARN_ON(lrbp->cmd);
|
||||||
lrbp->cmd = NULL;
|
lrbp->cmd = NULL;
|
||||||
@@ -6711,8 +6691,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_put_request(req);
|
|
||||||
out_unlock:
|
|
||||||
up_read(&hba->clk_scaling_lock);
|
up_read(&hba->clk_scaling_lock);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@@ -6820,7 +6798,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
|
|||||||
err = ufshcd_clear_cmd(hba, pos);
|
err = ufshcd_clear_cmd(hba, pos);
|
||||||
if (err)
|
if (err)
|
||||||
break;
|
break;
|
||||||
__ufshcd_transfer_req_compl(hba, pos);
|
__ufshcd_transfer_req_compl(hba, 1U << pos);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -9323,8 +9301,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|||||||
/* Configure LRB */
|
/* Configure LRB */
|
||||||
ufshcd_host_memory_configure(hba);
|
ufshcd_host_memory_configure(hba);
|
||||||
|
|
||||||
host->can_queue = hba->nutrs;
|
host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
|
||||||
host->cmd_per_lun = hba->nutrs;
|
host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
|
||||||
host->max_id = UFSHCD_MAX_ID;
|
host->max_id = UFSHCD_MAX_ID;
|
||||||
host->max_lun = UFS_MAX_LUNS;
|
host->max_lun = UFS_MAX_LUNS;
|
||||||
host->max_channel = UFSHCD_MAX_CHANNEL;
|
host->max_channel = UFSHCD_MAX_CHANNEL;
|
||||||
|
@@ -739,6 +739,7 @@ struct ufs_hba_monitor {
|
|||||||
* @capabilities: UFS Controller Capabilities
|
* @capabilities: UFS Controller Capabilities
|
||||||
* @nutrs: Transfer Request Queue depth supported by controller
|
* @nutrs: Transfer Request Queue depth supported by controller
|
||||||
* @nutmrs: Task Management Queue depth supported by controller
|
* @nutmrs: Task Management Queue depth supported by controller
|
||||||
|
* @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
|
||||||
* @ufs_version: UFS Version to which controller complies
|
* @ufs_version: UFS Version to which controller complies
|
||||||
* @vops: pointer to variant specific operations
|
* @vops: pointer to variant specific operations
|
||||||
* @priv: pointer to variant specific private data
|
* @priv: pointer to variant specific private data
|
||||||
@@ -827,6 +828,12 @@ struct ufs_hba {
|
|||||||
u32 capabilities;
|
u32 capabilities;
|
||||||
int nutrs;
|
int nutrs;
|
||||||
int nutmrs;
|
int nutmrs;
|
||||||
|
#if 0
|
||||||
|
/*
|
||||||
|
* This has been moved into struct ufs_hba_add_info because of the GKI.
|
||||||
|
*/
|
||||||
|
u32 reserved_slot;
|
||||||
|
#endif
|
||||||
u32 ufs_version;
|
u32 ufs_version;
|
||||||
const struct ufs_hba_variant_ops *vops;
|
const struct ufs_hba_variant_ops *vops;
|
||||||
struct ufs_hba_variant_params *vps;
|
struct ufs_hba_variant_params *vps;
|
||||||
|
@@ -144,7 +144,7 @@
|
|||||||
#define DWC3_GHWPARAMS8 0xc600
|
#define DWC3_GHWPARAMS8 0xc600
|
||||||
#define DWC3_GUCTL3 0xc60c
|
#define DWC3_GUCTL3 0xc60c
|
||||||
#define DWC3_GFLADJ 0xc630
|
#define DWC3_GFLADJ 0xc630
|
||||||
#define DWC3_GHWPARAMS9 0xc680
|
#define DWC3_GHWPARAMS9 0xc6e0
|
||||||
|
|
||||||
/* Device Registers */
|
/* Device Registers */
|
||||||
#define DWC3_DCFG 0xc700
|
#define DWC3_DCFG 0xc700
|
||||||
|
@@ -644,7 +644,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
|
|||||||
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
|
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
|
||||||
struct device_node *np = pdev->dev.of_node, *dwc3_np;
|
struct device_node *np = pdev->dev.of_node, *dwc3_np;
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
struct property *prop;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
dwc3_np = of_get_child_by_name(np, "dwc3");
|
dwc3_np = of_get_child_by_name(np, "dwc3");
|
||||||
@@ -653,20 +652,6 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
|
|
||||||
if (!prop) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
dev_err(dev, "unable to allocate memory for property\n");
|
|
||||||
goto node_put;
|
|
||||||
}
|
|
||||||
|
|
||||||
prop->name = "tx-fifo-resize";
|
|
||||||
ret = of_add_property(dwc3_np, prop);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev, "unable to add property\n");
|
|
||||||
goto node_put;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = of_platform_populate(np, NULL, NULL, dev);
|
ret = of_platform_populate(np, NULL, NULL, dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "failed to register dwc3 core - %d\n", ret);
|
dev_err(dev, "failed to register dwc3 core - %d\n", ret);
|
||||||
|
@@ -345,6 +345,10 @@ static void gser_free(struct usb_function *f)
|
|||||||
|
|
||||||
static void gser_unbind(struct usb_configuration *c, struct usb_function *f)
|
static void gser_unbind(struct usb_configuration *c, struct usb_function *f)
|
||||||
{
|
{
|
||||||
|
struct f_gser *gser = func_to_gser(f);
|
||||||
|
|
||||||
|
/* Ensure port is disconnected before unbinding */
|
||||||
|
gserial_disconnect(&gser->port);
|
||||||
usb_free_all_descriptors(f);
|
usb_free_all_descriptors(f);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -590,10 +590,12 @@ static u32 get_esit_boundary(struct mu3h_sch_ep_info *sch_ep)
|
|||||||
u32 boundary = sch_ep->esit;
|
u32 boundary = sch_ep->esit;
|
||||||
|
|
||||||
if (sch_ep->sch_tt) { /* LS/FS with TT */
|
if (sch_ep->sch_tt) { /* LS/FS with TT */
|
||||||
/* tune for CS */
|
/*
|
||||||
if (sch_ep->ep_type != ISOC_OUT_EP)
|
* tune for CS, normally esit >= 8 for FS/LS,
|
||||||
boundary++;
|
* not add one for other types to avoid access array
|
||||||
else if (boundary > 1) /* normally esit >= 8 for FS/LS */
|
* out of boundary
|
||||||
|
*/
|
||||||
|
if (sch_ep->ep_type == ISOC_OUT_EP && boundary > 1)
|
||||||
boundary--;
|
boundary--;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -722,7 +722,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
|
|||||||
tcpm_pd_receive(tcpci->port, &msg);
|
tcpm_pd_receive(tcpci->port, &msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (status & TCPC_ALERT_EXTENDED_STATUS) {
|
if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
|
||||||
ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
|
ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
|
||||||
if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
|
if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
|
||||||
tcpm_vbus_change(tcpci->port);
|
tcpm_vbus_change(tcpci->port);
|
||||||
|
@@ -4244,6 +4244,8 @@ free_node_inode:
|
|||||||
free_stats:
|
free_stats:
|
||||||
f2fs_destroy_stats(sbi);
|
f2fs_destroy_stats(sbi);
|
||||||
free_nm:
|
free_nm:
|
||||||
|
/* stop discard thread before destroying node manager */
|
||||||
|
f2fs_stop_discard_thread(sbi);
|
||||||
f2fs_destroy_node_manager(sbi);
|
f2fs_destroy_node_manager(sbi);
|
||||||
free_sm:
|
free_sm:
|
||||||
f2fs_destroy_segment_manager(sbi);
|
f2fs_destroy_segment_manager(sbi);
|
||||||
|
@@ -175,7 +175,6 @@ void incfs_free_mount_info(struct mount_info *mi)
|
|||||||
kfree(mi->pseudo_file_xattr[i].data);
|
kfree(mi->pseudo_file_xattr[i].data);
|
||||||
kfree(mi->mi_per_uid_read_timeouts);
|
kfree(mi->mi_per_uid_read_timeouts);
|
||||||
incfs_free_sysfs_node(mi->mi_sysfs_node);
|
incfs_free_sysfs_node(mi->mi_sysfs_node);
|
||||||
kfree(mi->mi_options.sysfs_name);
|
|
||||||
kfree(mi);
|
kfree(mi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1855,10 +1855,11 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
path_put(&backing_dir_path);
|
mi->mi_backing_dir_path = backing_dir_path;
|
||||||
sb->s_flags |= SB_ACTIVE;
|
sb->s_flags |= SB_ACTIVE;
|
||||||
|
|
||||||
pr_debug("incfs: mount\n");
|
pr_debug("incfs: mount\n");
|
||||||
|
free_options(&options);
|
||||||
return dget(sb->s_root);
|
return dget(sb->s_root);
|
||||||
err:
|
err:
|
||||||
sb->s_fs_info = NULL;
|
sb->s_fs_info = NULL;
|
||||||
@@ -1903,9 +1904,13 @@ out:
|
|||||||
void incfs_kill_sb(struct super_block *sb)
|
void incfs_kill_sb(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct mount_info *mi = sb->s_fs_info;
|
struct mount_info *mi = sb->s_fs_info;
|
||||||
|
struct inode *dinode = d_inode(mi->mi_backing_dir_path.dentry);
|
||||||
|
|
||||||
pr_debug("incfs: unmount\n");
|
pr_debug("incfs: unmount\n");
|
||||||
generic_shutdown_super(sb);
|
vfs_rmdir(dinode, mi->mi_index_dir);
|
||||||
|
vfs_rmdir(dinode, mi->mi_incomplete_dir);
|
||||||
|
|
||||||
|
kill_anon_super(sb);
|
||||||
incfs_free_mount_info(mi);
|
incfs_free_mount_info(mi);
|
||||||
sb->s_fs_info = NULL;
|
sb->s_fs_info = NULL;
|
||||||
}
|
}
|
||||||
|
@@ -11,6 +11,7 @@
|
|||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
|
#include <linux/bits.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/android_kabi.h>
|
#include <linux/android_kabi.h>
|
||||||
|
|
||||||
|
@@ -1646,6 +1646,22 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* This variant of skb_unclone() makes sure skb->truesize is not changed */
|
||||||
|
static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
|
||||||
|
{
|
||||||
|
might_sleep_if(gfpflags_allow_blocking(pri));
|
||||||
|
|
||||||
|
if (skb_cloned(skb)) {
|
||||||
|
unsigned int save = skb->truesize;
|
||||||
|
int res;
|
||||||
|
|
||||||
|
res = pskb_expand_head(skb, 0, 0, pri);
|
||||||
|
skb->truesize = save;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* skb_header_cloned - is the header a clone
|
* skb_header_cloned - is the header a clone
|
||||||
* @skb: buffer to check
|
* @skb: buffer to check
|
||||||
|
@@ -69,6 +69,10 @@ DECLARE_HOOK(android_vh_binder_proc_transaction_end,
|
|||||||
struct task_struct *binder_th_task, unsigned int code,
|
struct task_struct *binder_th_task, unsigned int code,
|
||||||
bool pending_async, bool sync),
|
bool pending_async, bool sync),
|
||||||
TP_ARGS(caller_task, binder_proc_task, binder_th_task, code, pending_async, sync));
|
TP_ARGS(caller_task, binder_proc_task, binder_th_task, code, pending_async, sync));
|
||||||
|
DECLARE_HOOK(android_vh_binder_proc_transaction_finish,
|
||||||
|
TP_PROTO(struct binder_proc *proc, struct binder_transaction *t,
|
||||||
|
struct task_struct *binder_th_task, bool pending_async, bool sync),
|
||||||
|
TP_ARGS(proc, t, binder_th_task, pending_async, sync));
|
||||||
DECLARE_HOOK(android_vh_binder_new_ref,
|
DECLARE_HOOK(android_vh_binder_new_ref,
|
||||||
TP_PROTO(struct task_struct *proc, uint32_t ref_desc, int node_debug_id),
|
TP_PROTO(struct task_struct *proc, uint32_t ref_desc, int node_debug_id),
|
||||||
TP_ARGS(proc, ref_desc, node_debug_id));
|
TP_ARGS(proc, ref_desc, node_debug_id));
|
||||||
@@ -98,6 +102,7 @@ DECLARE_HOOK(android_vh_binder_read_done,
|
|||||||
DECLARE_HOOK(android_vh_binder_has_work_ilocked,
|
DECLARE_HOOK(android_vh_binder_has_work_ilocked,
|
||||||
TP_PROTO(struct binder_thread *thread, bool do_proc_work, int *ret),
|
TP_PROTO(struct binder_thread *thread, bool do_proc_work, int *ret),
|
||||||
TP_ARGS(thread, do_proc_work, ret));
|
TP_ARGS(thread, do_proc_work, ret));
|
||||||
|
|
||||||
/* macro versions of hooks are no longer required */
|
/* macro versions of hooks are no longer required */
|
||||||
|
|
||||||
#endif /* _TRACE_HOOK_BINDER_H */
|
#endif /* _TRACE_HOOK_BINDER_H */
|
||||||
|
@@ -880,8 +880,9 @@ int tg_nop(struct task_group *tg, void *data)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void set_load_weight(struct task_struct *p, bool update_load)
|
static void set_load_weight(struct task_struct *p)
|
||||||
{
|
{
|
||||||
|
bool update_load = !(READ_ONCE(p->state) & TASK_NEW);
|
||||||
int prio = p->static_prio - MAX_RT_PRIO;
|
int prio = p->static_prio - MAX_RT_PRIO;
|
||||||
struct load_weight *load = &p->se.load;
|
struct load_weight *load = &p->se.load;
|
||||||
|
|
||||||
@@ -3484,7 +3485,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
|||||||
p->static_prio = NICE_TO_PRIO(0);
|
p->static_prio = NICE_TO_PRIO(0);
|
||||||
|
|
||||||
p->prio = p->normal_prio = p->static_prio;
|
p->prio = p->normal_prio = p->static_prio;
|
||||||
set_load_weight(p, false);
|
set_load_weight(p);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't need the reset flag anymore after the fork. It has
|
* We don't need the reset flag anymore after the fork. It has
|
||||||
@@ -5252,7 +5253,7 @@ void set_user_nice(struct task_struct *p, long nice)
|
|||||||
put_prev_task(rq, p);
|
put_prev_task(rq, p);
|
||||||
|
|
||||||
p->static_prio = NICE_TO_PRIO(nice);
|
p->static_prio = NICE_TO_PRIO(nice);
|
||||||
set_load_weight(p, true);
|
set_load_weight(p);
|
||||||
old_prio = p->prio;
|
old_prio = p->prio;
|
||||||
p->prio = effective_prio(p);
|
p->prio = effective_prio(p);
|
||||||
|
|
||||||
@@ -5426,7 +5427,7 @@ static void __setscheduler_params(struct task_struct *p,
|
|||||||
*/
|
*/
|
||||||
p->rt_priority = attr->sched_priority;
|
p->rt_priority = attr->sched_priority;
|
||||||
p->normal_prio = normal_prio(p);
|
p->normal_prio = normal_prio(p);
|
||||||
set_load_weight(p, true);
|
set_load_weight(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -7569,7 +7570,7 @@ void __init sched_init(void)
|
|||||||
atomic_set(&rq->nr_iowait, 0);
|
atomic_set(&rq->nr_iowait, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
set_load_weight(&init_task, false);
|
set_load_weight(&init_task);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The boot idle thread does lazy MMU switching as well:
|
* The boot idle thread does lazy MMU switching as well:
|
||||||
|
@@ -233,8 +233,8 @@ trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
|
|||||||
if (!node)
|
if (!node)
|
||||||
return;
|
return;
|
||||||
/* per-event key starts with "event.GROUP.EVENT" */
|
/* per-event key starts with "event.GROUP.EVENT" */
|
||||||
xbc_node_for_each_child(node, gnode)
|
xbc_node_for_each_subkey(node, gnode)
|
||||||
xbc_node_for_each_child(gnode, enode)
|
xbc_node_for_each_subkey(gnode, enode)
|
||||||
trace_boot_init_one_event(tr, gnode, enode);
|
trace_boot_init_one_event(tr, gnode, enode);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@@ -315,7 +315,7 @@ trace_boot_init_instances(struct xbc_node *node)
|
|||||||
if (!node)
|
if (!node)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
xbc_node_for_each_child(node, inode) {
|
xbc_node_for_each_subkey(node, inode) {
|
||||||
p = xbc_node_get_data(inode);
|
p = xbc_node_get_data(inode);
|
||||||
if (!p || *p == '\0')
|
if (!p || *p == '\0')
|
||||||
continue;
|
continue;
|
||||||
|
@@ -574,6 +574,7 @@ static const struct file_operations objects_fops = {
|
|||||||
.open = open_objects,
|
.open = open_objects,
|
||||||
.read = seq_read,
|
.read = seq_read,
|
||||||
.llseek = seq_lseek,
|
.llseek = seq_lseek,
|
||||||
|
.release = seq_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __init kfence_debugfs_init(void)
|
static int __init kfence_debugfs_init(void)
|
||||||
|
@@ -1541,7 +1541,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
|||||||
MEMORY_OFFLINE | REPORT_FAILURE, NULL);
|
MEMORY_OFFLINE | REPORT_FAILURE, NULL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
reason = "failure to isolate range";
|
reason = "failure to isolate range";
|
||||||
goto failed_removal;
|
goto failed_removal_lru_cache_disabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
drain_all_pages(zone);
|
drain_all_pages(zone);
|
||||||
@@ -1656,6 +1656,8 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages)
|
|||||||
failed_removal_isolated:
|
failed_removal_isolated:
|
||||||
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
|
undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
|
||||||
memory_notify(MEM_CANCEL_OFFLINE, &arg);
|
memory_notify(MEM_CANCEL_OFFLINE, &arg);
|
||||||
|
failed_removal_lru_cache_disabled:
|
||||||
|
lru_cache_enable();
|
||||||
failed_removal:
|
failed_removal:
|
||||||
pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
|
pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
|
||||||
(unsigned long long) start_pfn << PAGE_SHIFT,
|
(unsigned long long) start_pfn << PAGE_SHIFT,
|
||||||
|
15
mm/slub.c
15
mm/slub.c
@@ -4687,6 +4687,7 @@ struct loc_track {
|
|||||||
unsigned long max;
|
unsigned long max;
|
||||||
unsigned long count;
|
unsigned long count;
|
||||||
struct location *loc;
|
struct location *loc;
|
||||||
|
loff_t idx;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dentry *slab_debugfs_root;
|
static struct dentry *slab_debugfs_root;
|
||||||
@@ -5718,11 +5719,11 @@ __initcall(slab_sysfs_init);
|
|||||||
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
|
||||||
static int slab_debugfs_show(struct seq_file *seq, void *v)
|
static int slab_debugfs_show(struct seq_file *seq, void *v)
|
||||||
{
|
{
|
||||||
|
|
||||||
struct location *l;
|
|
||||||
unsigned int idx = *(unsigned int *)v;
|
|
||||||
struct loc_track *t = seq->private;
|
struct loc_track *t = seq->private;
|
||||||
|
struct location *l;
|
||||||
|
unsigned long idx;
|
||||||
|
|
||||||
|
idx = (unsigned long) t->idx;
|
||||||
if (idx < t->count) {
|
if (idx < t->count) {
|
||||||
l = &t->loc[idx];
|
l = &t->loc[idx];
|
||||||
|
|
||||||
@@ -5771,16 +5772,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
|
|||||||
{
|
{
|
||||||
struct loc_track *t = seq->private;
|
struct loc_track *t = seq->private;
|
||||||
|
|
||||||
v = ppos;
|
t->idx = ++(*ppos);
|
||||||
++*ppos;
|
|
||||||
if (*ppos <= t->count)
|
if (*ppos <= t->count)
|
||||||
return v;
|
return ppos;
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
|
static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
|
||||||
{
|
{
|
||||||
|
struct loc_track *t = seq->private;
|
||||||
|
|
||||||
|
t->idx = *ppos;
|
||||||
return ppos;
|
return ppos;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -3296,19 +3296,7 @@ EXPORT_SYMBOL(skb_split);
|
|||||||
*/
|
*/
|
||||||
static int skb_prepare_for_shift(struct sk_buff *skb)
|
static int skb_prepare_for_shift(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
|
||||||
|
|
||||||
if (skb_cloned(skb)) {
|
|
||||||
/* Save and restore truesize: pskb_expand_head() may reallocate
|
|
||||||
* memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we
|
|
||||||
* cannot change truesize at this point.
|
|
||||||
*/
|
|
||||||
unsigned int save_truesize = skb->truesize;
|
|
||||||
|
|
||||||
ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
|
||||||
skb->truesize = save_truesize;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@@ -1561,7 +1561,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (skb_unclone(skb, gfp))
|
if (skb_unclone_keeptruesize(skb, gfp))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* Get a new skb... force flag on. */
|
/* Get a new skb... force flag on. */
|
||||||
@@ -1670,7 +1670,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
|
|||||||
{
|
{
|
||||||
u32 delta_truesize;
|
u32 delta_truesize;
|
||||||
|
|
||||||
if (skb_unclone(skb, GFP_ATOMIC))
|
if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
delta_truesize = __pskb_trim_head(skb, len);
|
delta_truesize = __pskb_trim_head(skb, len);
|
||||||
@@ -3184,7 +3184,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
|
|||||||
cur_mss, GFP_ATOMIC))
|
cur_mss, GFP_ATOMIC))
|
||||||
return -ENOMEM; /* We'll try again later. */
|
return -ENOMEM; /* We'll try again later. */
|
||||||
} else {
|
} else {
|
||||||
if (skb_unclone(skb, GFP_ATOMIC))
|
if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
diff = tcp_skb_pcount(skb);
|
diff = tcp_skb_pcount(skb);
|
||||||
|
@@ -413,9 +413,6 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
|
|||||||
uffd_test_ops->allocate_area((void **)&area_src);
|
uffd_test_ops->allocate_area((void **)&area_src);
|
||||||
uffd_test_ops->allocate_area((void **)&area_dst);
|
uffd_test_ops->allocate_area((void **)&area_dst);
|
||||||
|
|
||||||
uffd_test_ops->release_pages(area_src);
|
|
||||||
uffd_test_ops->release_pages(area_dst);
|
|
||||||
|
|
||||||
userfaultfd_open(features);
|
userfaultfd_open(features);
|
||||||
|
|
||||||
count_verify = malloc(nr_pages * sizeof(unsigned long long));
|
count_verify = malloc(nr_pages * sizeof(unsigned long long));
|
||||||
@@ -436,6 +433,26 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
|
|||||||
*(area_count(area_src, nr) + 1) = 1;
|
*(area_count(area_src, nr) + 1) = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* After initialization of area_src, we must explicitly release pages
|
||||||
|
* for area_dst to make sure it's fully empty. Otherwise we could have
|
||||||
|
* some area_dst pages be errornously initialized with zero pages,
|
||||||
|
* hence we could hit memory corruption later in the test.
|
||||||
|
*
|
||||||
|
* One example is when THP is globally enabled, above allocate_area()
|
||||||
|
* calls could have the two areas merged into a single VMA (as they
|
||||||
|
* will have the same VMA flags so they're mergeable). When we
|
||||||
|
* initialize the area_src above, it's possible that some part of
|
||||||
|
* area_dst could have been faulted in via one huge THP that will be
|
||||||
|
* shared between area_src and area_dst. It could cause some of the
|
||||||
|
* area_dst won't be trapped by missing userfaults.
|
||||||
|
*
|
||||||
|
* This release_pages() will guarantee even if that happened, we'll
|
||||||
|
* proactively split the thp and drop any accidentally initialized
|
||||||
|
* pages within area_dst.
|
||||||
|
*/
|
||||||
|
uffd_test_ops->release_pages(area_dst);
|
||||||
|
|
||||||
pipefd = malloc(sizeof(int) * nr_cpus * 2);
|
pipefd = malloc(sizeof(int) * nr_cpus * 2);
|
||||||
if (!pipefd)
|
if (!pipefd)
|
||||||
err("pipefd");
|
err("pipefd");
|
||||||
|
Reference in New Issue
Block a user