Snap for 8186911 from 33c07be45c to android12-5.10-keystone-qcom-release

Change-Id: Iae0fc42a6cba8558fee1cd53ec73b8e66eff6fed
This commit is contained in:
Android Build Coastguard Worker
2022-02-16 01:00:22 +00:00
28 changed files with 174 additions and 56 deletions

View File

@@ -4949,6 +4949,7 @@
<elf-symbol name='submit_bio_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x32a9deed'/> <elf-symbol name='submit_bio_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x32a9deed'/>
<elf-symbol name='subsys_system_register' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0xecd1be91'/> <elf-symbol name='subsys_system_register' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0xecd1be91'/>
<elf-symbol name='suspend_set_ops' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x1ab0c7e0'/> <elf-symbol name='suspend_set_ops' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x1ab0c7e0'/>
<elf-symbol name='swiotlb_max_segment' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x5b6b0329'/>
<elf-symbol name='swiotlb_nr_tbl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x5e51cd74'/> <elf-symbol name='swiotlb_nr_tbl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x5e51cd74'/>
<elf-symbol name='sync_blockdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x219fe0be'/> <elf-symbol name='sync_blockdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x219fe0be'/>
<elf-symbol name='sync_dirty_buffer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x87e19c21'/> <elf-symbol name='sync_dirty_buffer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x87e19c21'/>
@@ -139990,6 +139991,9 @@
<parameter type-id='9d109fcf' name='ops' filepath='kernel/power/suspend.c' line='205' column='1'/> <parameter type-id='9d109fcf' name='ops' filepath='kernel/power/suspend.c' line='205' column='1'/>
<return type-id='48b5725f'/> <return type-id='48b5725f'/>
</function-decl> </function-decl>
<function-decl name='swiotlb_max_segment' mangled-name='swiotlb_max_segment' filepath='kernel/dma/swiotlb.c' line='138' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='swiotlb_max_segment'>
<return type-id='f0981eeb'/>
</function-decl>
<function-decl name='swiotlb_nr_tbl' mangled-name='swiotlb_nr_tbl' filepath='kernel/dma/swiotlb.c' line='132' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='swiotlb_nr_tbl'> <function-decl name='swiotlb_nr_tbl' mangled-name='swiotlb_nr_tbl' filepath='kernel/dma/swiotlb.c' line='132' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='swiotlb_nr_tbl'>
<return type-id='7359adad'/> <return type-id='7359adad'/>
</function-decl> </function-decl>

View File

@@ -1952,6 +1952,7 @@
dma_heap_get_dev dma_heap_get_dev
__sg_page_iter_next __sg_page_iter_next
__sg_page_iter_start __sg_page_iter_start
swiotlb_max_segment
# required by tcpci_husb311.ko # required by tcpci_husb311.ko
i2c_smbus_read_word_data i2c_smbus_read_word_data

View File

@@ -433,7 +433,6 @@
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
phys = <&qusb_phy_0>, <&usb0_ssphy>; phys = <&qusb_phy_0>, <&usb0_ssphy>;
phy-names = "usb2-phy", "usb3-phy"; phy-names = "usb2-phy", "usb3-phy";
tx-fifo-resize;
snps,is-utmi-l1-suspend; snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>; snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk; snps,dis_u2_susphy_quirk;
@@ -474,7 +473,6 @@
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
phys = <&qusb_phy_1>, <&usb1_ssphy>; phys = <&qusb_phy_1>, <&usb1_ssphy>;
phy-names = "usb2-phy", "usb3-phy"; phy-names = "usb2-phy", "usb3-phy";
tx-fifo-resize;
snps,is-utmi-l1-suspend; snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>; snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk; snps,dis_u2_susphy_quirk;

View File

@@ -84,10 +84,12 @@ static inline void __dc_gzva(u64 p)
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
bool init) bool init)
{ {
u64 curr, mask, dczid_bs, end1, end2, end3; u64 curr, mask, dczid, dczid_bs, dczid_dzp, end1, end2, end3;
/* Read DC G(Z)VA block size from the system register. */ /* Read DC G(Z)VA block size from the system register. */
dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf); dczid = read_cpuid(DCZID_EL0);
dczid_bs = 4ul << (dczid & 0xf);
dczid_dzp = (dczid >> 4) & 1;
curr = (u64)__tag_set(addr, tag); curr = (u64)__tag_set(addr, tag);
mask = dczid_bs - 1; mask = dczid_bs - 1;
@@ -106,7 +108,7 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
*/ */
#define SET_MEMTAG_RANGE(stg_post, dc_gva) \ #define SET_MEMTAG_RANGE(stg_post, dc_gva) \
do { \ do { \
if (size >= 2 * dczid_bs) { \ if (!dczid_dzp && size >= 2 * dczid_bs) {\
do { \ do { \
curr = stg_post(curr); \ curr = stg_post(curr); \
} while (curr < end1); \ } while (curr < end1); \

View File

@@ -342,12 +342,22 @@ do { \
(x) = (__force __typeof__(*(ptr)))__gu_val; \ (x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0) } while (0)
/*
* We must not call into the scheduler between uaccess_enable_not_uao() and
* uaccess_disable_not_uao(). As `x` and `ptr` could contain blocking functions,
* we must evaluate these outside of the critical section.
*/
#define __raw_get_user(x, ptr, err) \ #define __raw_get_user(x, ptr, err) \
do { \ do { \
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
__typeof__(x) __rgu_val; \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
\
uaccess_enable_not_uao(); \ uaccess_enable_not_uao(); \
__raw_get_mem("ldtr", x, ptr, err); \ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
uaccess_disable_not_uao(); \ uaccess_disable_not_uao(); \
\
(x) = __rgu_val; \
} while (0) } while (0)
#define __get_user_error(x, ptr, err) \ #define __get_user_error(x, ptr, err) \
@@ -371,14 +381,22 @@ do { \
#define get_user __get_user #define get_user __get_user
/*
* We must not call into the scheduler between __uaccess_enable_tco_async() and
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
* functions, we must evaluate these outside of the critical section.
*/
#define __get_kernel_nofault(dst, src, type, err_label) \ #define __get_kernel_nofault(dst, src, type, err_label) \
do { \ do { \
__typeof__(dst) __gkn_dst = (dst); \
__typeof__(src) __gkn_src = (src); \
int __gkn_err = 0; \ int __gkn_err = 0; \
\ \
__uaccess_enable_tco_async(); \ __uaccess_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(dst)), \ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
(__force type *)(src), __gkn_err); \ (__force type *)(__gkn_src), __gkn_err); \
__uaccess_disable_tco_async(); \ __uaccess_disable_tco_async(); \
\
if (unlikely(__gkn_err)) \ if (unlikely(__gkn_err)) \
goto err_label; \ goto err_label; \
} while (0) } while (0)
@@ -417,11 +435,19 @@ do { \
} \ } \
} while (0) } while (0)
/*
* We must not call into the scheduler between uaccess_enable_not_uao() and
* uaccess_disable_not_uao(). As `x` and `ptr` could contain blocking functions,
* we must evaluate these outside of the critical section.
*/
#define __raw_put_user(x, ptr, err) \ #define __raw_put_user(x, ptr, err) \
do { \ do { \
__chk_user_ptr(ptr); \ __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
__typeof__(*(ptr)) __rpu_val = (x); \
__chk_user_ptr(__rpu_ptr); \
\
uaccess_enable_not_uao(); \ uaccess_enable_not_uao(); \
__raw_put_mem("sttr", x, ptr, err); \ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
uaccess_disable_not_uao(); \ uaccess_disable_not_uao(); \
} while (0) } while (0)
@@ -446,14 +472,22 @@ do { \
#define put_user __put_user #define put_user __put_user
/*
* We must not call into the scheduler between __uaccess_enable_tco_async() and
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
* functions, we must evaluate these outside of the critical section.
*/
#define __put_kernel_nofault(dst, src, type, err_label) \ #define __put_kernel_nofault(dst, src, type, err_label) \
do { \ do { \
__typeof__(dst) __pkn_dst = (dst); \
__typeof__(src) __pkn_src = (src); \
int __pkn_err = 0; \ int __pkn_err = 0; \
\ \
__uaccess_enable_tco_async(); \ __uaccess_enable_tco_async(); \
__raw_put_mem("str", *((type *)(src)), \ __raw_put_mem("str", *((type *)(__pkn_src)), \
(__force type *)(dst), __pkn_err); \ (__force type *)(__pkn_dst), __pkn_err); \
__uaccess_disable_tco_async(); \ __uaccess_disable_tco_async(); \
\
if (unlikely(__pkn_err)) \ if (unlikely(__pkn_err)) \
goto err_label; \ goto err_label; \
} while(0) } while(0)

View File

@@ -569,15 +569,19 @@ static const struct arm64_ftr_bits ftr_raz[] = {
ARM64_FTR_END, ARM64_FTR_END,
}; };
#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) { \ #define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \
.sys_id = id, \ .sys_id = id, \
.reg = &(struct arm64_ftr_reg){ \ .reg = &(struct arm64_ftr_reg){ \
.name = #id, \ .name = id_str, \
.override = (ovr), \ .override = (ovr), \
.ftr_bits = &((table)[0]), \ .ftr_bits = &((table)[0]), \
}} }}
#define ARM64_FTR_REG(id, table) ARM64_FTR_REG_OVERRIDE(id, table, &no_override) #define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \
__ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr)
#define ARM64_FTR_REG(id, table) \
__ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override; struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
struct arm64_ftr_override __ro_after_init id_aa64pfr1_override; struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;

View File

@@ -59,6 +59,7 @@ static inline void hyp_set_page_refcounted(struct hyp_page *p)
/* Allocation */ /* Allocation */
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order); void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
void hyp_split_page(struct hyp_page *page);
void hyp_get_page(void *addr); void hyp_get_page(void *addr);
void hyp_put_page(void *addr); void hyp_put_page(void *addr);

View File

@@ -36,7 +36,18 @@ static const u8 pkvm_hyp_id = 1;
static void *host_s2_zalloc_pages_exact(size_t size) static void *host_s2_zalloc_pages_exact(size_t size)
{ {
return hyp_alloc_pages(&host_s2_mem, get_order(size)); void *addr = hyp_alloc_pages(&host_s2_mem, get_order(size));
hyp_split_page(hyp_virt_to_page(addr));
/*
* The size of concatenated PGDs is always a power of two of PAGE_SIZE,
* so there should be no need to free any of the tail pages to make the
* allocation exact.
*/
WARN_ON(size != (PAGE_SIZE << get_order(size)));
return addr;
} }
static void *host_s2_zalloc_page(void *pool) static void *host_s2_zalloc_page(void *pool)

View File

@@ -140,6 +140,20 @@ void hyp_get_page(void *addr)
hyp_page_ref_inc(p); hyp_page_ref_inc(p);
} }
void hyp_split_page(struct hyp_page *p)
{
unsigned short order = p->order;
unsigned int i;
p->order = 0;
for (i = 1; i < (1 << order); i++) {
struct hyp_page *tail = p + i;
tail->order = 0;
hyp_set_page_refcounted(tail);
}
}
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order) void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order)
{ {
unsigned int i = order; unsigned int i = order;

View File

@@ -43,17 +43,23 @@ SYM_FUNC_END(mte_clear_page_tags)
* x0 - address to the beginning of the page * x0 - address to the beginning of the page
*/ */
SYM_FUNC_START(mte_zero_clear_page_tags) SYM_FUNC_START(mte_zero_clear_page_tags)
and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag
mrs x1, dczid_el0 mrs x1, dczid_el0
tbnz x1, #4, 2f // Branch if DC GZVA is prohibited
and w1, w1, #0xf and w1, w1, #0xf
mov x2, #4 mov x2, #4
lsl x1, x2, x1 lsl x1, x2, x1
and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag
1: dc gzva, x0 1: dc gzva, x0
add x0, x0, x1 add x0, x0, x1
tst x0, #(PAGE_SIZE - 1) tst x0, #(PAGE_SIZE - 1)
b.ne 1b b.ne 1b
ret ret
2: stz2g x0, [x0], #(MTE_GRANULE_SIZE * 2)
tst x0, #(PAGE_SIZE - 1)
b.ne 2b
ret
SYM_FUNC_END(mte_zero_clear_page_tags) SYM_FUNC_END(mte_zero_clear_page_tags)
/* /*

View File

@@ -1182,10 +1182,6 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded) if (preloaded)
radix_tree_preload_end(); radix_tree_preload_end();
ret = blk_iolatency_init(q);
if (ret)
goto err_destroy_all;
ret = blk_ioprio_init(q); ret = blk_ioprio_init(q);
if (ret) if (ret)
goto err_destroy_all; goto err_destroy_all;
@@ -1194,6 +1190,12 @@ int blkcg_init_queue(struct request_queue *q)
if (ret) if (ret)
goto err_destroy_all; goto err_destroy_all;
ret = blk_iolatency_init(q);
if (ret) {
blk_throtl_exit(q);
goto err_destroy_all;
}
return 0; return 0;
err_destroy_all: err_destroy_all:

View File

@@ -2535,7 +2535,8 @@ static int binder_proc_transaction(struct binder_transaction *t,
trace_android_vh_binder_proc_transaction_end(current, proc->tsk, trace_android_vh_binder_proc_transaction_end(current, proc->tsk,
thread ? thread->task : NULL, t->code, pending_async, !oneway); thread ? thread->task : NULL, t->code, pending_async, !oneway);
trace_android_vh_binder_proc_transaction_finish(proc, t,
thread ? thread->task : NULL, pending_async, !oneway);
if (!pending_async) if (!pending_async)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);

View File

@@ -279,6 +279,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_binder_transaction);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_end); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_proc_transaction_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_new_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_del_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_post_init_entity_util_avg);

View File

@@ -338,7 +338,7 @@ static void system_heap_buf_free(struct deferred_freelist_item *item,
reason = DF_UNDER_PRESSURE; // On failure, just free reason = DF_UNDER_PRESSURE; // On failure, just free
table = &buffer->sg_table; table = &buffer->sg_table;
for_each_sg(table->sgl, sg, table->nents, i) { for_each_sgtable_sg(table, sg, i) {
struct page *page = sg_page(sg); struct page *page = sg_page(sg);
if (reason == DF_UNDER_PRESSURE) { if (reason == DF_UNDER_PRESSURE) {

View File

@@ -153,8 +153,8 @@ static void rproc_copy_segment(struct rproc *rproc, void *dest,
struct rproc_dump_segment *segment, struct rproc_dump_segment *segment,
size_t offset, size_t size) size_t offset, size_t size)
{ {
bool is_iomem = false;
void *ptr; void *ptr;
bool is_iomem;
if (segment->dump) { if (segment->dump) {
segment->dump(rproc, segment, dest, offset, size); segment->dump(rproc, segment, dest, offset, size);

View File

@@ -174,8 +174,8 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
u64 filesz = elf_phdr_get_p_filesz(class, phdr); u64 filesz = elf_phdr_get_p_filesz(class, phdr);
u64 offset = elf_phdr_get_p_offset(class, phdr); u64 offset = elf_phdr_get_p_offset(class, phdr);
u32 type = elf_phdr_get_p_type(class, phdr); u32 type = elf_phdr_get_p_type(class, phdr);
bool is_iomem = false;
void *ptr; void *ptr;
bool is_iomem;
if (type != PT_LOAD) if (type != PT_LOAD)
continue; continue;
@@ -216,7 +216,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
/* put the segment where the remote processor expects it */ /* put the segment where the remote processor expects it */
if (filesz) { if (filesz) {
if (is_iomem) if (is_iomem)
memcpy_fromio(ptr, (void __iomem *)(elf_data + offset), filesz); memcpy_toio((void __iomem *)ptr, elf_data + offset, filesz);
else else
memcpy(ptr, elf_data + offset, filesz); memcpy(ptr, elf_data + offset, filesz);
} }

View File

@@ -6797,7 +6797,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos); err = ufshcd_clear_cmd(hba, pos);
if (err) if (err)
break; break;
__ufshcd_transfer_req_compl(hba, pos); __ufshcd_transfer_req_compl(hba, 1U << pos);
} }
} }

View File

@@ -1975,6 +1975,9 @@ unknown:
if (w_index != 0x5 || (w_value >> 8)) if (w_index != 0x5 || (w_value >> 8))
break; break;
interface = w_value & 0xFF; interface = w_value & 0xFF;
if (interface >= MAX_CONFIG_INTERFACES ||
!os_desc_cfg->interface[interface])
break;
buf[6] = w_index; buf[6] = w_index;
count = count_ext_prop(os_desc_cfg, count = count_ext_prop(os_desc_cfg,
interface); interface);

View File

@@ -637,14 +637,17 @@ static int rndis_set_response(struct rndis_params *params,
rndis_set_cmplt_type *resp; rndis_set_cmplt_type *resp;
rndis_resp_t *r; rndis_resp_t *r;
BufLength = le32_to_cpu(buf->InformationBufferLength);
BufOffset = le32_to_cpu(buf->InformationBufferOffset);
if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
(BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
return -EINVAL;
r = rndis_add_response(params, sizeof(rndis_set_cmplt_type)); r = rndis_add_response(params, sizeof(rndis_set_cmplt_type));
if (!r) if (!r)
return -ENOMEM; return -ENOMEM;
resp = (rndis_set_cmplt_type *)r->buf; resp = (rndis_set_cmplt_type *)r->buf;
BufLength = le32_to_cpu(buf->InformationBufferLength);
BufOffset = le32_to_cpu(buf->InformationBufferOffset);
#ifdef VERBOSE_DEBUG #ifdef VERBOSE_DEBUG
pr_debug("%s: Length: %d\n", __func__, BufLength); pr_debug("%s: Length: %d\n", __func__, BufLength);
pr_debug("%s: Offset: %d\n", __func__, BufOffset); pr_debug("%s: Offset: %d\n", __func__, BufOffset);

View File

@@ -590,10 +590,12 @@ static u32 get_esit_boundary(struct mu3h_sch_ep_info *sch_ep)
u32 boundary = sch_ep->esit; u32 boundary = sch_ep->esit;
if (sch_ep->sch_tt) { /* LS/FS with TT */ if (sch_ep->sch_tt) { /* LS/FS with TT */
/* tune for CS */ /*
if (sch_ep->ep_type != ISOC_OUT_EP) * tune for CS, normally esit >= 8 for FS/LS,
boundary++; * not add one for other types to avoid access array
else if (boundary > 1) /* normally esit >= 8 for FS/LS */ * out of boundary
*/
if (sch_ep->ep_type == ISOC_OUT_EP && boundary > 1)
boundary--; boundary--;
} }

View File

@@ -175,7 +175,6 @@ void incfs_free_mount_info(struct mount_info *mi)
kfree(mi->pseudo_file_xattr[i].data); kfree(mi->pseudo_file_xattr[i].data);
kfree(mi->mi_per_uid_read_timeouts); kfree(mi->mi_per_uid_read_timeouts);
incfs_free_sysfs_node(mi->mi_sysfs_node); incfs_free_sysfs_node(mi->mi_sysfs_node);
kfree(mi->mi_options.sysfs_name);
kfree(mi); kfree(mi);
} }

View File

@@ -1855,10 +1855,11 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
goto err; goto err;
} }
path_put(&backing_dir_path); mi->mi_backing_dir_path = backing_dir_path;
sb->s_flags |= SB_ACTIVE; sb->s_flags |= SB_ACTIVE;
pr_debug("incfs: mount\n"); pr_debug("incfs: mount\n");
free_options(&options);
return dget(sb->s_root); return dget(sb->s_root);
err: err:
sb->s_fs_info = NULL; sb->s_fs_info = NULL;
@@ -1903,9 +1904,13 @@ out:
void incfs_kill_sb(struct super_block *sb) void incfs_kill_sb(struct super_block *sb)
{ {
struct mount_info *mi = sb->s_fs_info; struct mount_info *mi = sb->s_fs_info;
struct inode *dinode = d_inode(mi->mi_backing_dir_path.dentry);
pr_debug("incfs: unmount\n"); pr_debug("incfs: unmount\n");
generic_shutdown_super(sb); vfs_rmdir(dinode, mi->mi_index_dir);
vfs_rmdir(dinode, mi->mi_incomplete_dir);
kill_anon_super(sb);
incfs_free_mount_info(mi); incfs_free_mount_info(mi);
sb->s_fs_info = NULL; sb->s_fs_info = NULL;
} }

View File

@@ -1646,6 +1646,22 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
return 0; return 0;
} }
/* This variant of skb_unclone() makes sure skb->truesize is not changed */
static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
{
might_sleep_if(gfpflags_allow_blocking(pri));
if (skb_cloned(skb)) {
unsigned int save = skb->truesize;
int res;
res = pskb_expand_head(skb, 0, 0, pri);
skb->truesize = save;
return res;
}
return 0;
}
/** /**
* skb_header_cloned - is the header a clone * skb_header_cloned - is the header a clone
* @skb: buffer to check * @skb: buffer to check

View File

@@ -69,6 +69,10 @@ DECLARE_HOOK(android_vh_binder_proc_transaction_end,
struct task_struct *binder_th_task, unsigned int code, struct task_struct *binder_th_task, unsigned int code,
bool pending_async, bool sync), bool pending_async, bool sync),
TP_ARGS(caller_task, binder_proc_task, binder_th_task, code, pending_async, sync)); TP_ARGS(caller_task, binder_proc_task, binder_th_task, code, pending_async, sync));
DECLARE_HOOK(android_vh_binder_proc_transaction_finish,
TP_PROTO(struct binder_proc *proc, struct binder_transaction *t,
struct task_struct *binder_th_task, bool pending_async, bool sync),
TP_ARGS(proc, t, binder_th_task, pending_async, sync));
DECLARE_HOOK(android_vh_binder_new_ref, DECLARE_HOOK(android_vh_binder_new_ref,
TP_PROTO(struct task_struct *proc, uint32_t ref_desc, int node_debug_id), TP_PROTO(struct task_struct *proc, uint32_t ref_desc, int node_debug_id),
TP_ARGS(proc, ref_desc, node_debug_id)); TP_ARGS(proc, ref_desc, node_debug_id));
@@ -98,6 +102,7 @@ DECLARE_HOOK(android_vh_binder_read_done,
DECLARE_HOOK(android_vh_binder_has_work_ilocked, DECLARE_HOOK(android_vh_binder_has_work_ilocked,
TP_PROTO(struct binder_thread *thread, bool do_proc_work, int *ret), TP_PROTO(struct binder_thread *thread, bool do_proc_work, int *ret),
TP_ARGS(thread, do_proc_work, ret)); TP_ARGS(thread, do_proc_work, ret));
/* macro versions of hooks are no longer required */ /* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_BINDER_H */ #endif /* _TRACE_HOOK_BINDER_H */

View File

@@ -574,6 +574,7 @@ static const struct file_operations objects_fops = {
.open = open_objects, .open = open_objects,
.read = seq_read, .read = seq_read,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = seq_release,
}; };
static int __init kfence_debugfs_init(void) static int __init kfence_debugfs_init(void)

View File

@@ -3296,19 +3296,7 @@ EXPORT_SYMBOL(skb_split);
*/ */
static int skb_prepare_for_shift(struct sk_buff *skb) static int skb_prepare_for_shift(struct sk_buff *skb)
{ {
int ret = 0; return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
if (skb_cloned(skb)) {
/* Save and restore truesize: pskb_expand_head() may reallocate
* memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we
* cannot change truesize at this point.
*/
unsigned int save_truesize = skb->truesize;
ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
skb->truesize = save_truesize;
}
return ret;
} }
/** /**

View File

@@ -1561,7 +1561,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
return -ENOMEM; return -ENOMEM;
} }
if (skb_unclone(skb, gfp)) if (skb_unclone_keeptruesize(skb, gfp))
return -ENOMEM; return -ENOMEM;
/* Get a new skb... force flag on. */ /* Get a new skb... force flag on. */
@@ -1670,7 +1670,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
{ {
u32 delta_truesize; u32 delta_truesize;
if (skb_unclone(skb, GFP_ATOMIC)) if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
return -ENOMEM; return -ENOMEM;
delta_truesize = __pskb_trim_head(skb, len); delta_truesize = __pskb_trim_head(skb, len);
@@ -3184,7 +3184,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
cur_mss, GFP_ATOMIC)) cur_mss, GFP_ATOMIC))
return -ENOMEM; /* We'll try again later. */ return -ENOMEM; /* We'll try again later. */
} else { } else {
if (skb_unclone(skb, GFP_ATOMIC)) if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
return -ENOMEM; return -ENOMEM;
diff = tcp_skb_pcount(skb); diff = tcp_skb_pcount(skb);

View File

@@ -413,9 +413,6 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
uffd_test_ops->allocate_area((void **)&area_src); uffd_test_ops->allocate_area((void **)&area_src);
uffd_test_ops->allocate_area((void **)&area_dst); uffd_test_ops->allocate_area((void **)&area_dst);
uffd_test_ops->release_pages(area_src);
uffd_test_ops->release_pages(area_dst);
userfaultfd_open(features); userfaultfd_open(features);
count_verify = malloc(nr_pages * sizeof(unsigned long long)); count_verify = malloc(nr_pages * sizeof(unsigned long long));
@@ -436,6 +433,26 @@ static void uffd_test_ctx_init_ext(uint64_t *features)
*(area_count(area_src, nr) + 1) = 1; *(area_count(area_src, nr) + 1) = 1;
} }
/*
* After initialization of area_src, we must explicitly release pages
* for area_dst to make sure it's fully empty. Otherwise we could have
* some area_dst pages be errornously initialized with zero pages,
* hence we could hit memory corruption later in the test.
*
* One example is when THP is globally enabled, above allocate_area()
* calls could have the two areas merged into a single VMA (as they
* will have the same VMA flags so they're mergeable). When we
* initialize the area_src above, it's possible that some part of
* area_dst could have been faulted in via one huge THP that will be
* shared between area_src and area_dst. It could cause some of the
* area_dst won't be trapped by missing userfaults.
*
* This release_pages() will guarantee even if that happened, we'll
* proactively split the thp and drop any accidentally initialized
* pages within area_dst.
*/
uffd_test_ops->release_pages(area_dst);
pipefd = malloc(sizeof(int) * nr_cpus * 2); pipefd = malloc(sizeof(int) * nr_cpus * 2);
if (!pipefd) if (!pipefd)
err("pipefd"); err("pipefd");