Merge branch 'android12-5.10' into branch 'android12-5.10-lts'

Catch up with the latest android12-5.10 changes into android12-5.10-lts.
Included in here are the following commits:

* e265882155 ANDROID: Add __nocfi return for swsusp_arch_resume
* 028de5c48b BACKPORT: arm64: mm: Make hibernation aware of KFENCE
* d615d2d800 Merge tag 'android12-5.10.210_r00' into branch android12-5.10
* 178bf27b97 UPSTREAM: selftests: timers: Fix valid-adjtimex signed left-shift undefined behavior
* 9f5f2481c9 ANDROID: kbuild: Search external devicetree path when running clean target
* 50b4a2a7e1 ANDROID: kbuild: add support for compiling external device trees
* fe033e0b34 ANDROID: usb: gadget: ncm: Introduce vendor opts to deal with ABI breakage
* 19eb358ded UPSTREAM: usb: gadget: ncm: Fix endianness of wMaxSegmentSize variable in ecm_desc
* 38958820bd UPSTREAM: usb: gadget: ncm: Add support to update wMaxSegmentSize via configfs
* 43bb9f846d ANDROID: usb: Optimize the problem of slow transfer rate in USB accessory mode
* b2c2d74cae ANDROID: ABI: Update honor symbol list
* 33c78af45a ANDROID: add vendor hook in do_read_fault to tune fault_around_bytes
* 7fc588d60f FROMGIT: usb: dwc3: Wait unconditionally after issuing EndXfer command
* 923b677c93 ANDROID: irq: put irq_resolve_mapping under protection of __irq_enter_raw
* 602a22e77a ANDROID: abi_gki_aarch64_qcom: Add clk_restore_context and clk_save_context
* b493b35d3a UPSTREAM: usb: gadget: ncm: Fix handling of zero block length packets
* c344c3ebe3 UPSTREAM: usb: gadget: ncm: Avoid dropping datagrams of properly parsed NTBs
* 626e5dce00 ANDROID: 16K: Fix show maps CFI failure
* 09da1d141d ANDROID: 16K: Handle pad VMA splits and merges
* 162de86e24 ANDROID: 16K: madvise_vma_pad_pages: Remove filemap_fault check
* 000bbad86c ANDROID: 16K: Only madvise padding from dynamic linker context
* ebf0750ad2 ANDROID: 16K: Separate padding from ELF LOAD segment mappings
* abbc0d53ee ANDROID: 16K: Exclude ELF padding for fault around range
* 778abad3ac ANDROID: 16K: Use MADV_DONTNEED to save VMA padding pages.
* 37d6ffe5ca ANDROID: 16K: Introduce ELF padding representation for VMAs
* 38c464b4a4 ANDROID: 16K: Introduce /sys/kernel/mm/pgsize_miration/enabled
* 280193753c ANDROID: GKI: Update symbols to symbol list
* 1016230309 UPSTREAM: netfilter: nf_tables: release mutex after nft_gc_seq_end from abort path
* 582e001b25 UPSTREAM: netfilter: nf_tables: release batch on table validation from abort path
* cd2fc5a605 UPSTREAM: netfilter: nf_tables: mark set as dead when unbinding anonymous set with timeout
* 5fa7520118 UPSTREAM: netfilter: nft_chain_filter: handle NETDEV_UNREGISTER for inet/ingress basechain
* ecd8068fb4 BACKPORT: mm: update mark_victim tracepoints fields
* 4571e647cc Revert "FROMGIT: BACKPORT: mm: update mark_victim tracepoints fields"
* beecd97e3a UPSTREAM: usb: gadget: uvc: decrease the interrupt load to a quarter
* ad31e24641 UPSTREAM: netfilter: nft_set_pipapo: release elements in clone only from destroy path

Change-Id: I0f7cad212c9425224ade80ed88ef8f0b8046827a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2024-06-01 10:59:15 +00:00
29 changed files with 1354 additions and 437 deletions

View File

@@ -441,15 +441,17 @@ Function-specific configfs interface
The function name to use when creating the function directory is "ncm". The function name to use when creating the function directory is "ncm".
The NCM function provides these attributes in its function directory: The NCM function provides these attributes in its function directory:
=============== ================================================== =============== ==================================================
ifname network device interface name associated with this ifname network device interface name associated with this
function instance function instance
qmult queue length multiplier for high and super speed qmult queue length multiplier for high and super speed
host_addr MAC address of host's end of this host_addr MAC address of host's end of this
Ethernet over USB link Ethernet over USB link
dev_addr MAC address of device's end of this dev_addr MAC address of device's end of this
Ethernet over USB link Ethernet over USB link
=============== ================================================== max_segment_size Segment size required for P2P connections. This
will set MTU to (max_segment_size - 14 bytes)
=============== ==================================================
and after creating the functions/ncm.<instance name> they contain default and after creating the functions/ncm.<instance name> they contain default
values: qmult is 5, dev_addr and host_addr are randomly selected. values: qmult is 5, dev_addr and host_addr are randomly selected.

View File

@@ -1460,7 +1460,9 @@ kselftest-merge:
# Devicetree files # Devicetree files
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/boot/dts/),) ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/boot/dts/),)
dtstree := arch/$(SRCARCH)/boot/dts # ANDROID: allow this to be overridden by the build environment. This allows
# one to compile a device tree that is located out-of-tree.
dtstree ?= arch/$(SRCARCH)/boot/dts
endif endif
ifneq ($(dtstree),) ifneq ($(dtstree),)
@@ -1929,7 +1931,9 @@ $(clean-dirs):
clean: $(clean-dirs) clean: $(clean-dirs)
$(call cmd,rmfiles) $(call cmd,rmfiles)
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \ @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) \
$(if $(filter-out arch/$(SRCARCH)/boot/dts, $(dtstree)), $(dtstree)) \
$(RCS_FIND_IGNORE) \
\( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \ \( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
-o -name '*.ko.*' \ -o -name '*.ko.*' \
-o -name '*.dtb' -o -name '*.dtb.S' -o -name '*.dt.yaml' \ -o -name '*.dtb' -o -name '*.dtb.S' -o -name '*.dt.yaml' \

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,5 @@
[abi_symbol_list] [abi_symbol_list]
__traceiter_android_rvh_dma_buf_stats_teardown __traceiter_android_rvh_dma_buf_stats_teardown
__traceiter_android_vh_tune_fault_around_bytes
__tracepoint_android_rvh_dma_buf_stats_teardown __tracepoint_android_rvh_dma_buf_stats_teardown
__tracepoint_android_vh_tune_fault_around_bytes

View File

@@ -178,7 +178,9 @@
clk_prepare clk_prepare
clk_put clk_put
clk_register clk_register
clk_restore_context
clk_round_rate clk_round_rate
clk_save_context
clk_set_parent clk_set_parent
clk_set_rate clk_set_rate
clk_sync_state clk_sync_state

View File

@@ -10,6 +10,17 @@
swapcache_free_entries swapcache_free_entries
swap_type_to_swap_info swap_type_to_swap_info
blkcg_schedule_throttle blkcg_schedule_throttle
_atomic_dec_and_lock_irqsave
bio_uninit
blk_mq_kick_requeue_list
blk_rq_init
errno_to_blk_status
ioc_lookup_icq
sbitmap_finish_wait
sbitmap_prepare_to_wait
sbitmap_queue_wake_all
scsi_command_normalize_sense
ufshcd_release_scsi_cmd
__traceiter_android_rvh_alloc_si __traceiter_android_rvh_alloc_si
__traceiter_android_rvh_alloc_swap_slot_cache __traceiter_android_rvh_alloc_swap_slot_cache
__traceiter_android_rvh_drain_slots_cache_cpu __traceiter_android_rvh_drain_slots_cache_cpu

View File

@@ -39,6 +39,7 @@
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
#include <asm/virt.h> #include <asm/virt.h>
#include <linux/kfence.h>
/* /*
* Hibernate core relies on this value being 0 on resume, and marks it * Hibernate core relies on this value being 0 on resume, and marks it
@@ -473,7 +474,8 @@ static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
* the temporary mappings we use during restore. * the temporary mappings we use during restore.
*/ */
set_pte(dst_ptep, pte_mkwrite(pte)); set_pte(dst_ptep, pte_mkwrite(pte));
} else if (debug_pagealloc_enabled() && !pte_none(pte)) { } else if ((debug_pagealloc_enabled() ||
is_kfence_address((void *)addr)) && !pte_none(pte)) {
/* /*
* debug_pagealloc will removed the PTE_VALID bit if * debug_pagealloc will removed the PTE_VALID bit if
* the page isn't in use by the resume kernel. It may have * the page isn't in use by the resume kernel. It may have
@@ -644,7 +646,7 @@ static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
* Memory allocated by get_safe_page() will be dealt with by the hibernate code, * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
* we don't need to free it here. * we don't need to free it here.
*/ */
int swsusp_arch_resume(void) int __nocfi swsusp_arch_resume(void)
{ {
int rc; int rc;
void *zero_page; void *zero_page;

View File

@@ -368,6 +368,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_dirty_limits);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_oom_check_panic); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_oom_check_panic);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_uninterruptible_tasks); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_uninterruptible_tasks);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_uninterruptible_tasks_dn); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_uninterruptible_tasks_dn);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_fault_around_bytes);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_blk_reset); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_blk_reset);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_blk_mq_rw_recovery); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_blk_mq_rw_recovery);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sd_update_bus_speed_mode); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sd_update_bus_speed_mode);

View File

@@ -1701,7 +1701,6 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
*/ */
static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt) static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt)
{ {
struct dwc3 *dwc = dep->dwc;
struct dwc3_gadget_ep_cmd_params params; struct dwc3_gadget_ep_cmd_params params;
u32 cmd; u32 cmd;
int ret; int ret;
@@ -1726,8 +1725,7 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
dep->resource_index = 0; dep->resource_index = 0;
if (!interrupt) { if (!interrupt) {
if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A)) mdelay(1);
mdelay(1);
dep->flags &= ~DWC3_EP_TRANSFER_STARTED; dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
} else if (!ret) { } else if (!ret) {
dep->flags |= DWC3_EP_END_TRANSFER_PENDING; dep->flags |= DWC3_EP_END_TRANSFER_PENDING;

View File

@@ -170,7 +170,7 @@ static struct usb_ss_ep_comp_descriptor acc_superspeedplus_comp_desc = {
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */ /* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */ .bMaxBurst = 6,
/* .bmAttributes = 0, */ /* .bmAttributes = 0, */
}; };
@@ -195,7 +195,7 @@ static struct usb_ss_ep_comp_descriptor acc_superspeed_comp_desc = {
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP, .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */ /* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */ .bMaxBurst = 6,
/* .bmAttributes = 0, */ /* .bmAttributes = 0, */
}; };

View File

@@ -120,6 +120,16 @@ static inline unsigned ncm_bitrate(struct usb_gadget *g)
/* Delay for the transmit to wait before sending an unfilled NTB frame. */ /* Delay for the transmit to wait before sending an unfilled NTB frame. */
#define TX_TIMEOUT_NSECS 300000 #define TX_TIMEOUT_NSECS 300000
/*
* Although max mtu as dictated by u_ether is 15412 bytes, setting
* max_segment_size to 15426 would not be efficient. If user chooses segment
* size to be (>= 8192), then we can't aggregate more than one buffer in each
* NTB (assuming each packet coming from network layer is >= 8192 bytes) as ep
* maxpacket limit is 16384. So let max_segment_size be limited to 8000 to allow
* at least 2 packets to be aggregated reducing wastage of NTB buffer space
*/
#define MAX_DATAGRAM_SIZE 8000
#define FORMATS_SUPPORTED (USB_CDC_NCM_NTB16_SUPPORTED | \ #define FORMATS_SUPPORTED (USB_CDC_NCM_NTB16_SUPPORTED | \
USB_CDC_NCM_NTB32_SUPPORTED) USB_CDC_NCM_NTB32_SUPPORTED)
@@ -196,7 +206,6 @@ static struct usb_cdc_ether_desc ecm_desc = {
/* this descriptor actually adds value, surprise! */ /* this descriptor actually adds value, surprise! */
/* .iMACAddress = DYNAMIC */ /* .iMACAddress = DYNAMIC */
.bmEthernetStatistics = cpu_to_le32(0), /* no statistics */ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */
.wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN),
.wNumberMCFilters = cpu_to_le16(0), .wNumberMCFilters = cpu_to_le16(0),
.bNumberPowerFilters = 0, .bNumberPowerFilters = 0,
}; };
@@ -1190,11 +1199,17 @@ static int ncm_unwrap_ntb(struct gether *port,
struct sk_buff *skb2; struct sk_buff *skb2;
int ret = -EINVAL; int ret = -EINVAL;
unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize); unsigned frame_max;
const struct ndp_parser_opts *opts = ncm->parser_opts; const struct ndp_parser_opts *opts = ncm->parser_opts;
unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
int dgram_counter; int dgram_counter;
int to_process = skb->len; int to_process = skb->len;
struct f_ncm_opts *ncm_opts;
struct ncm_vendor_opts *ncm_vopts;
ncm_opts = container_of(port->func.fi, struct f_ncm_opts, func_inst);
ncm_vopts = container_of(ncm_opts, struct ncm_vendor_opts, opts);
frame_max = ncm_vopts->max_segment_size;
parse_ntb: parse_ntb:
tmp = (__le16 *)ntb_ptr; tmp = (__le16 *)ntb_ptr;
@@ -1357,7 +1372,7 @@ parse_ntb:
if (to_process == 1 && if (to_process == 1 &&
(*(unsigned char *)(ntb_ptr + block_len) == 0x00)) { (*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
to_process--; to_process--;
} else if (to_process > 0) { } else if ((to_process > 0) && (block_len != 0)) {
ntb_ptr = (unsigned char *)(ntb_ptr + block_len); ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
goto parse_ntb; goto parse_ntb;
} }
@@ -1446,11 +1461,13 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
int status = 0; int status = 0;
struct usb_ep *ep; struct usb_ep *ep;
struct f_ncm_opts *ncm_opts; struct f_ncm_opts *ncm_opts;
struct ncm_vendor_opts *ncm_vopts;
if (!can_support_ecm(cdev->gadget)) if (!can_support_ecm(cdev->gadget))
return -EINVAL; return -EINVAL;
ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst); ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
ncm_vopts = container_of(ncm_opts, struct ncm_vendor_opts, opts);
if (cdev->use_os_string) { if (cdev->use_os_string) {
f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
@@ -1463,8 +1480,10 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
mutex_lock(&ncm_opts->lock); mutex_lock(&ncm_opts->lock);
gether_set_gadget(ncm_opts->net, cdev->gadget); gether_set_gadget(ncm_opts->net, cdev->gadget);
if (!ncm_opts->bound) if (!ncm_opts->bound) {
ncm_opts->net->mtu = (ncm_vopts->max_segment_size - ETH_HLEN);
status = gether_register_netdev(ncm_opts->net); status = gether_register_netdev(ncm_opts->net);
}
mutex_unlock(&ncm_opts->lock); mutex_unlock(&ncm_opts->lock);
if (status) if (status)
@@ -1507,6 +1526,8 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
ncm_data_intf.bInterfaceNumber = status; ncm_data_intf.bInterfaceNumber = status;
ncm_union_desc.bSlaveInterface0 = status; ncm_union_desc.bSlaveInterface0 = status;
ecm_desc.wMaxSegmentSize = cpu_to_le16(ncm_vopts->max_segment_size);
status = -ENODEV; status = -ENODEV;
/* allocate instance-specific endpoints */ /* allocate instance-specific endpoints */
@@ -1611,11 +1632,62 @@ USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm);
/* f_ncm_opts_ifname */ /* f_ncm_opts_ifname */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm); USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm);
static ssize_t ncm_opts_max_segment_size_show(struct config_item *item,
char *page)
{
struct f_ncm_opts *opts = to_f_ncm_opts(item);
struct ncm_vendor_opts *vopts;
u16 segment_size;
vopts = container_of(opts, struct ncm_vendor_opts, opts);
mutex_lock(&opts->lock);
segment_size = vopts->max_segment_size;
mutex_unlock(&opts->lock);
return sysfs_emit(page, "%u\n", segment_size);
}
static ssize_t ncm_opts_max_segment_size_store(struct config_item *item,
const char *page, size_t len)
{
struct f_ncm_opts *opts = to_f_ncm_opts(item);
struct ncm_vendor_opts *vopts;
u16 segment_size;
int ret;
vopts = container_of(opts, struct ncm_vendor_opts, opts);
mutex_lock(&opts->lock);
if (opts->refcnt) {
ret = -EBUSY;
goto out;
}
ret = kstrtou16(page, 0, &segment_size);
if (ret)
goto out;
if (segment_size > MAX_DATAGRAM_SIZE) {
ret = -EINVAL;
goto out;
}
vopts->max_segment_size = segment_size;
ret = len;
out:
mutex_unlock(&opts->lock);
return ret;
}
CONFIGFS_ATTR(ncm_opts_, max_segment_size);
static struct configfs_attribute *ncm_attrs[] = { static struct configfs_attribute *ncm_attrs[] = {
&ncm_opts_attr_dev_addr, &ncm_opts_attr_dev_addr,
&ncm_opts_attr_host_addr, &ncm_opts_attr_host_addr,
&ncm_opts_attr_qmult, &ncm_opts_attr_qmult,
&ncm_opts_attr_ifname, &ncm_opts_attr_ifname,
&ncm_opts_attr_max_segment_size,
NULL, NULL,
}; };
@@ -1640,14 +1712,16 @@ static void ncm_free_inst(struct usb_function_instance *f)
static struct usb_function_instance *ncm_alloc_inst(void) static struct usb_function_instance *ncm_alloc_inst(void)
{ {
struct ncm_vendor_opts *vopts;
struct f_ncm_opts *opts; struct f_ncm_opts *opts;
struct usb_os_desc *descs[1]; struct usb_os_desc *descs[1];
char *names[1]; char *names[1];
struct config_group *ncm_interf_group; struct config_group *ncm_interf_group;
opts = kzalloc(sizeof(*opts), GFP_KERNEL); vopts = kzalloc(sizeof(*vopts), GFP_KERNEL);
if (!opts) if (!vopts)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
opts = &vopts->opts;
opts->ncm_os_desc.ext_compat_id = opts->ncm_ext_compat_id; opts->ncm_os_desc.ext_compat_id = opts->ncm_ext_compat_id;
mutex_init(&opts->lock); mutex_init(&opts->lock);
@@ -1658,6 +1732,7 @@ static struct usb_function_instance *ncm_alloc_inst(void)
kfree(opts); kfree(opts);
return ERR_CAST(net); return ERR_CAST(net);
} }
vopts->max_segment_size = ETH_FRAME_LEN;
INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop); INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop);
descs[0] = &opts->ncm_os_desc; descs[0] = &opts->ncm_os_desc;

View File

@@ -33,4 +33,9 @@ struct f_ncm_opts {
int refcnt; int refcnt;
}; };
struct ncm_vendor_opts {
struct f_ncm_opts opts;
u16 max_segment_size;
};
#endif /* U_NCM_H */ #endif /* U_NCM_H */

View File

@@ -100,6 +100,8 @@ struct uvc_video {
struct list_head req_free; struct list_head req_free;
spinlock_t req_lock; spinlock_t req_lock;
unsigned int req_int_count;
void (*encode) (struct usb_request *req, struct uvc_video *video, void (*encode) (struct usb_request *req, struct uvc_video *video,
struct uvc_buffer *buf); struct uvc_buffer *buf);

View File

@@ -294,6 +294,19 @@ static void uvcg_video_pump(struct work_struct *work)
video->encode(req, video, buf); video->encode(req, video, buf);
/* With usb3 we have more requests. This will decrease the
* interrupt load to a quarter but also catches the corner
* cases, which needs to be handled */
if (list_empty(&video->req_free) ||
buf->state == UVC_BUF_STATE_DONE ||
!(video->req_int_count %
DIV_ROUND_UP(video->uvc_num_requests, 4))) {
video->req_int_count = 0;
req->no_interrupt = 0;
} else {
req->no_interrupt = 1;
}
/* Queue the USB request */ /* Queue the USB request */
ret = uvcg_video_ep_queue(video, req); ret = uvcg_video_ep_queue(video, req);
spin_unlock_irqrestore(&queue->irqlock, flags); spin_unlock_irqrestore(&queue->irqlock, flags);
@@ -305,6 +318,7 @@ static void uvcg_video_pump(struct work_struct *work)
/* Endpoint now owns the request */ /* Endpoint now owns the request */
req = NULL; req = NULL;
video->req_int_count++;
} }
if (!req) if (!req)
@@ -355,6 +369,8 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
} else } else
video->encode = uvc_video_encode_isoc; video->encode = uvc_video_encode_isoc;
video->req_int_count = 0;
schedule_work(&video->pump); schedule_work(&video->pump);
return ret; return ret;

View File

@@ -9,6 +9,7 @@
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/pgsize_migration.h>
#include <linux/mempolicy.h> #include <linux/mempolicy.h>
#include <linux/rmap.h> #include <linux/rmap.h>
#include <linux/swap.h> #include <linux/swap.h>
@@ -390,7 +391,14 @@ done:
static int show_map(struct seq_file *m, void *v) static int show_map(struct seq_file *m, void *v)
{ {
show_map_vma(m, v); struct vm_area_struct *pad_vma = get_pad_vma(v);
struct vm_area_struct *vma = get_data_vma(v);
if (vma_pages(vma))
show_map_vma(m, vma);
show_map_pad_vma(vma, pad_vma, m, show_map_vma, false);
return 0; return 0;
} }
@@ -887,11 +895,15 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
static int show_smap(struct seq_file *m, void *v) static int show_smap(struct seq_file *m, void *v)
{ {
struct vm_area_struct *vma = v; struct vm_area_struct *pad_vma = get_pad_vma(v);
struct vm_area_struct *vma = get_data_vma(v);
struct mem_size_stats mss; struct mem_size_stats mss;
memset(&mss, 0, sizeof(mss)); memset(&mss, 0, sizeof(mss));
if (!vma_pages(vma))
goto show_pad;
smap_gather_stats(vma, &mss, 0); smap_gather_stats(vma, &mss, 0);
show_map_vma(m, vma); show_map_vma(m, vma);
@@ -915,6 +927,9 @@ static int show_smap(struct seq_file *m, void *v)
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
show_smap_vma_flags(m, vma); show_smap_vma_flags(m, vma);
show_pad:
show_map_pad_vma(vma, pad_vma, m, show_smap, true);
return 0; return 0;
} }

View File

@@ -0,0 +1,133 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PAGE_SIZE_MIGRATION_H
#define _LINUX_PAGE_SIZE_MIGRATION_H
/*
* Page Size Migration
*
* Copyright (c) 2024, Google LLC.
* Author: Kalesh Singh <kaleshsingh@goole.com>
*
* This file contains the APIs for mitigations to ensure
* app compatibility during the transition from 4kB to 16kB
* page size in Android.
*/
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/sizes.h>
/*
* vm_flags representation of VMA padding pages.
*
* This allows the kernel to identify the portion of an ELF LOAD segment VMA
* that is padding.
*
* 4 high bits of vm_flags [63,60] are used to represent ELF segment padding
* up to 60kB, which is sufficient for ELFs of both 16kB and 64kB segment
* alignment (p_align).
*
* The representation is illustrated below.
*
* 63 62 61 60
* _________ _________ _________ _________
* | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
* | of 4kB | of 4kB | of 4kB | of 4kB |
* | chunks | chunks | chunks | chunks |
* |_________|_________|_________|_________|
*/
#define VM_PAD_WIDTH 4
#define VM_PAD_SHIFT (BITS_PER_LONG - VM_PAD_WIDTH)
#define VM_TOTAL_PAD_PAGES ((1ULL << VM_PAD_WIDTH) - 1)
#define VM_PAD_MASK (VM_TOTAL_PAD_PAGES << VM_PAD_SHIFT)
#define VMA_PAD_START(vma) (vma->vm_end - (vma_pad_pages(vma) << PAGE_SHIFT))
#if PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT)
extern void vma_set_pad_pages(struct vm_area_struct *vma,
unsigned long nr_pages);
extern unsigned long vma_pad_pages(struct vm_area_struct *vma);
extern void madvise_vma_pad_pages(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma);
extern struct vm_area_struct *get_data_vma(struct vm_area_struct *vma);
extern void show_map_pad_vma(struct vm_area_struct *vma,
struct vm_area_struct *pad,
struct seq_file *m, void *func, bool smaps);
extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
unsigned long addr, int new_below);
#else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */
static inline void vma_set_pad_pages(struct vm_area_struct *vma,
unsigned long nr_pages)
{
}
static inline unsigned long vma_pad_pages(struct vm_area_struct *vma)
{
return 0;
}
static inline void madvise_vma_pad_pages(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
}
static inline struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma)
{
return NULL;
}
static inline struct vm_area_struct *get_data_vma(struct vm_area_struct *vma)
{
return vma;
}
static inline void show_map_pad_vma(struct vm_area_struct *vma,
struct vm_area_struct *pad,
struct seq_file *m, void *func, bool smaps)
{
}
static inline void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
unsigned long addr, int new_below)
{
}
#endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */
static inline unsigned long vma_data_pages(struct vm_area_struct *vma)
{
return vma_pages(vma) - vma_pad_pages(vma);
}
/*
* Sets the correct padding bits / flags for a VMA split.
*/
static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
unsigned long newflags)
{
if (newflags & VM_PAD_MASK)
return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK);
else
return newflags;
}
/*
* Merging of padding VMAs is uncommon, as padding is only allowed
* from the linker context.
*
* To simplify the semantics, adjacent VMAs with padding are not
* allowed to merge.
*/
static inline bool is_mergable_pad_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
/* Padding VMAs cannot be merged with other padding or real VMAs */
return !((vma->vm_flags | vm_flags) & VM_PAD_MASK);
}
#endif /* _LINUX_PAGE_SIZE_MIGRATION_H */

View File

@@ -7,6 +7,8 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <trace/events/mmflags.h> #include <trace/events/mmflags.h>
#define PG_COUNT_TO_KB(x) ((x) << (PAGE_SHIFT - 10))
TRACE_EVENT(oom_score_adj_update, TRACE_EVENT(oom_score_adj_update,
TP_PROTO(struct task_struct *task), TP_PROTO(struct task_struct *task),
@@ -78,22 +80,37 @@ TRACE_EVENT(mark_victim,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, pid) __field(int, pid)
__field(uid_t, uid)
__string(comm, task->comm) __string(comm, task->comm)
__field(unsigned long, total_vm)
__field(unsigned long, anon_rss)
__field(unsigned long, file_rss)
__field(unsigned long, shmem_rss)
__field(uid_t, uid)
__field(unsigned long, pgtables)
__field(short, oom_score_adj) __field(short, oom_score_adj)
), ),
TP_fast_assign( TP_fast_assign(
__entry->pid = task->pid; __entry->pid = task->pid;
__entry->uid = uid;
__assign_str(comm, task->comm); __assign_str(comm, task->comm);
__entry->total_vm = PG_COUNT_TO_KB(task->mm->total_vm);
__entry->anon_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_ANONPAGES));
__entry->file_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_FILEPAGES));
__entry->shmem_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_SHMEMPAGES));
__entry->uid = uid;
__entry->pgtables = mm_pgtables_bytes(task->mm) >> 10;
__entry->oom_score_adj = task->signal->oom_score_adj; __entry->oom_score_adj = task->signal->oom_score_adj;
), ),
TP_printk("pid=%d uid=%u comm=%s oom_score_adj=%hd", TP_printk("pid=%d comm=%s total-vm=%lukB anon-rss=%lukB file-rss:%lukB shmem-rss:%lukB uid=%u pgtables=%lukB oom_score_adj=%hd",
__entry->pid, __entry->pid,
__entry->uid,
__get_str(comm), __get_str(comm),
__entry->total_vm,
__entry->anon_rss,
__entry->file_rss,
__entry->shmem_rss,
__entry->uid,
__entry->pgtables,
__entry->oom_score_adj __entry->oom_score_adj
) )
); );

View File

@@ -367,6 +367,9 @@ DECLARE_HOOK(android_vh_do_swap_page_spf,
TP_ARGS(allow_swap_spf)); TP_ARGS(allow_swap_spf));
/* macro versions of hooks are no longer required */ /* macro versions of hooks are no longer required */
DECLARE_HOOK(android_vh_tune_fault_around_bytes,
TP_PROTO(unsigned long *fault_around_bytes),
TP_ARGS(fault_around_bytes));
#endif /* _TRACE_HOOK_MM_H */ #endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */ /* This part must be outside protection */

View File

@@ -676,8 +676,11 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
int ret = 0; int ret = 0;
#ifdef CONFIG_IRQ_DOMAIN #ifdef CONFIG_IRQ_DOMAIN
if (lookup) if (lookup) {
__irq_enter_raw();
irq = irq_find_mapping(domain, hwirq); irq = irq_find_mapping(domain, hwirq);
__irq_exit_raw();
}
#endif #endif
/* /*

View File

@@ -52,7 +52,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
mm_init.o percpu.o slab_common.o \ mm_init.o percpu.o slab_common.o \
compaction.o vmacache.o \ compaction.o vmacache.o \
interval_tree.o list_lru.o workingset.o \ interval_tree.o list_lru.o workingset.o \
debug.o gup.o $(mmu-y) debug.o gup.o pgsize_migration.o $(mmu-y)
# Give 'page_alloc' its own module-parameter namespace # Give 'page_alloc' its own module-parameter namespace
page-alloc-y := page_alloc.o page-alloc-y := page_alloc.o

View File

@@ -11,6 +11,7 @@
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/mempolicy.h> #include <linux/mempolicy.h>
#include <linux/page-isolation.h> #include <linux/page-isolation.h>
#include <linux/pgsize_migration.h>
#include <linux/page_idle.h> #include <linux/page_idle.h>
#include <linux/userfaultfd_k.h> #include <linux/userfaultfd_k.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
@@ -792,6 +793,8 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
static long madvise_dontneed_single_vma(struct vm_area_struct *vma, static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
madvise_vma_pad_pages(vma, start, end);
zap_page_range(vma, start, end - start); zap_page_range(vma, start, end - start);
return 0; return 0;
} }

View File

@@ -57,6 +57,7 @@
#include <linux/delayacct.h> #include <linux/delayacct.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/pfn_t.h> #include <linux/pfn_t.h>
#include <linux/pgsize_migration.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
@@ -4359,7 +4360,7 @@ static vm_fault_t do_fault_around(struct vm_fault *vmf)
end_pgoff = start_pgoff - end_pgoff = start_pgoff -
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
PTRS_PER_PTE - 1; PTRS_PER_PTE - 1;
end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1, end_pgoff = min3(end_pgoff, vma_data_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
start_pgoff + nr_pages - 1); start_pgoff + nr_pages - 1);
if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) && if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) &&
@@ -4378,6 +4379,8 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma; struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret = 0; vm_fault_t ret = 0;
trace_android_vh_tune_fault_around_bytes(&fault_around_bytes);
/* /*
* Let's call ->map_pages() first and use ->fault() as fallback * Let's call ->map_pages() first and use ->fault() as fallback
* if page by the offset is not ready to be mapped (cold cache or * if page by the offset is not ready to be mapped (cold cache or

View File

@@ -13,6 +13,7 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/swapops.h> #include <linux/swapops.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/pgsize_migration.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/mempolicy.h> #include <linux/mempolicy.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
@@ -586,7 +587,7 @@ success:
*/ */
if (lock) { if (lock) {
vm_write_begin(vma); vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, newflags); WRITE_ONCE(vma->vm_flags, vma_pad_fixup_flags(vma, newflags));
vm_write_end(vma); vm_write_end(vma);
} else } else
munlock_vma_pages_range(vma, start, end); munlock_vma_pages_range(vma, start, end);

View File

@@ -23,6 +23,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/pgsize_migration.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
@@ -1103,6 +1104,8 @@ static inline int is_mergeable_vma(struct vm_area_struct *vma,
return 0; return 0;
if (vma_get_anon_name(vma) != anon_name) if (vma_get_anon_name(vma) != anon_name)
return 0; return 0;
if (!is_mergable_pad_vma(vma, vm_flags))
return 0;
return 1; return 1;
} }
@@ -2902,8 +2905,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
/* Success. */ /* Success. */
if (!err) if (!err) {
split_pad_vma(vma, new, addr, new_below);
return 0; return 0;
}
/* Clean everything up if vma_adjust failed. */ /* Clean everything up if vma_adjust failed. */
if (new->vm_ops && new->vm_ops->close) if (new->vm_ops && new->vm_ops->close)

View File

@@ -17,6 +17,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/mempolicy.h> #include <linux/mempolicy.h>
#include <linux/pgsize_migration.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/swap.h> #include <linux/swap.h>
@@ -481,7 +482,7 @@ success:
* held in write mode. * held in write mode.
*/ */
vm_write_begin(vma); vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, newflags); WRITE_ONCE(vma->vm_flags, vma_pad_fixup_flags(vma, newflags));
dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
vma_set_page_prot(vma); vma_set_page_prot(vma);

399
mm/pgsize_migration.c Normal file
View File

@@ -0,0 +1,399 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Page Size Migration
*
* This file contains the core logic of mitigations to ensure
* app compatibility during the transition from 4kB to 16kB
* page size in Android.
*
* Copyright (c) 2024, Google LLC.
* Author: Kalesh Singh <kaleshsingh@goole.com>
*/
#include <linux/pgsize_migration.h>
#include <linux/init.h>
#include <linux/jump_label.h>
#include <linux/kobject.h>
#include <linux/kstrtox.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
typedef void (*show_pad_maps_fn) (struct seq_file *m, struct vm_area_struct *vma);
typedef int (*show_pad_smaps_fn) (struct seq_file *m, void *v);
#ifdef CONFIG_64BIT
#if PAGE_SIZE == SZ_4K
DEFINE_STATIC_KEY_TRUE(pgsize_migration_enabled);
#define is_pgsize_migration_enabled() (static_branch_likely(&pgsize_migration_enabled))
#else /* PAGE_SIZE != SZ_4K */
DEFINE_STATIC_KEY_FALSE(pgsize_migration_enabled);
#define is_pgsize_migration_enabled() (static_branch_unlikely(&pgsize_migration_enabled))
#endif /* PAGE_SIZE == SZ_4K */
static ssize_t show_pgsize_migration_enabled(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
if (is_pgsize_migration_enabled())
return sprintf(buf, "%d\n", 1);
else
return sprintf(buf, "%d\n", 0);
}
static ssize_t store_pgsize_migration_enabled(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
/* Migration is only applicable to 4kB kernels */
if (PAGE_SIZE != SZ_4K)
return n;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
if (val == 1)
static_branch_enable(&pgsize_migration_enabled);
else if (val == 0)
static_branch_disable(&pgsize_migration_enabled);
return n;
}
static struct kobj_attribute pgsize_migration_enabled_attr = __ATTR(
enabled,
0644,
show_pgsize_migration_enabled,
store_pgsize_migration_enabled
);
static struct attribute *pgsize_migration_attrs[] = {
&pgsize_migration_enabled_attr.attr,
NULL
};
static struct attribute_group pgsize_migration_attr_group = {
.name = "pgsize_migration",
.attrs = pgsize_migration_attrs,
};
/**
* What: /sys/kernel/mm/pgsize_migration/enabled
* Date: April 2024
* KernelVersion: v5.4+ (GKI kernels)
* Contact: Kalesh Singh <kaleshsingh@google.com>
* Description: /sys/kernel/mm/pgsize_migration/enabled
* allows for userspace to turn on or off page size
* migration mitigations necessary for app compatibility
* during Android's transition from 4kB to 16kB page size.
* Such mitigations include preserving /proc/<pid>/[s]maps
* output as if there was no segment extension by the
* dynamic loader; and preventing fault around in the padding
* sections of ELF LOAD segment mappings.
* Users: Bionic's dynamic linker
*/
static int __init init_pgsize_migration(void)
{
if (sysfs_create_group(mm_kobj, &pgsize_migration_attr_group))
pr_err("pgsize_migration: failed to create sysfs group\n");
return 0;
};
late_initcall(init_pgsize_migration);
#if PAGE_SIZE == SZ_4K
void vma_set_pad_pages(struct vm_area_struct *vma,
unsigned long nr_pages)
{
if (!is_pgsize_migration_enabled())
return;
vma->vm_flags &= ~VM_PAD_MASK;
vma->vm_flags |= (nr_pages << VM_PAD_SHIFT);
}
unsigned long vma_pad_pages(struct vm_area_struct *vma)
{
if (!is_pgsize_migration_enabled())
return 0;
return vma->vm_flags >> VM_PAD_SHIFT;
}
static __always_inline bool str_has_suffix(const char *str, const char *suffix)
{
size_t str_len = strlen(str);
size_t suffix_len = strlen(suffix);
if (str_len < suffix_len)
return false;
return !strncmp(str + str_len - suffix_len, suffix, suffix_len);
}
/*
* The dynamic linker, or interpreter, operates within the process context
* of the binary that necessitated dynamic linking.
*
* Consequently, process context identifiers; like PID, comm, ...; cannot
* be used to differentiate whether the execution context belongs to the
* dynamic linker or not.
*
* linker_ctx() deduces whether execution is currently in the dynamic linker's
* context by correlating the current userspace instruction pointer with the
* VMAs of the current task.
*
* Returns true if in linker context, otherwise false.
*
* Caller must hold mmap lock in read mode.
*/
static inline bool linker_ctx(void)
{
struct pt_regs *regs = task_pt_regs(current);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct file *file;
if (!regs)
return false;
vma = find_vma(mm, instruction_pointer(regs));
/* Current execution context, the VMA must be present */
BUG_ON(!vma);
file = vma->vm_file;
if (!file)
return false;
if ((vma->vm_flags & VM_EXEC)) {
char buf[64];
const int bufsize = sizeof(buf);
char *path;
memset(buf, 0, bufsize);
path = d_path(&file->f_path, buf, bufsize);
if (!strcmp(path, "/system/bin/linker64"))
return true;
}
return false;
}
/*
* Saves the number of padding pages for an ELF segment mapping
* in vm_flags.
*
* The number of padding pages is deduced from the madvise DONTNEED range [start, end)
* if the following conditions are met:
* 1) The range is enclosed by a single VMA
* 2) The range ends at the end address of the VMA
* 3) The range starts at an address greater than the start address of the VMA
* 4) The number of the pages in the range does not exceed VM_TOTAL_PAD_PAGES.
* 5) The VMA is a file backed VMA.
* 6) The file backing the VMA is a shared library (*.so)
* 7) The madvise was requested by bionic's dynamic linker.
*/
void madvise_vma_pad_pages(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
unsigned long nr_pad_pages;
if (!is_pgsize_migration_enabled())
return;
/*
* If the madvise range is it at the end of the file save the number of
* pages in vm_flags (only need 4 bits are needed for up to 64kB aligned ELFs).
*/
if (start <= vma->vm_start || end != vma->vm_end)
return;
nr_pad_pages = (end - start) >> PAGE_SHIFT;
if (!nr_pad_pages || nr_pad_pages > VM_TOTAL_PAD_PAGES)
return;
/* Only handle this for file backed VMAs */
if (!vma->vm_file)
return;
/* Limit this to only shared libraries (*.so) */
if (!str_has_suffix(vma->vm_file->f_path.dentry->d_name.name, ".so"))
return;
/* Only bionic's dynamic linker needs to hint padding pages. */
if (!linker_ctx())
return;
vma_set_pad_pages(vma, nr_pad_pages);
}
static const char *pad_vma_name(struct vm_area_struct *vma)
{
return "[page size compat]";
}
static const struct vm_operations_struct pad_vma_ops = {
.name = pad_vma_name,
};
/*
* Returns a new VMA representing the padding in @vma, if no padding
* in @vma returns NULL.
*/
struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *pad;
if (!is_pgsize_migration_enabled() || !(vma->vm_flags & VM_PAD_MASK))
return NULL;
pad = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
*pad = *vma;
/* Remove file */
pad->vm_file = NULL;
/* Add vm_ops->name */
pad->vm_ops = &pad_vma_ops;
/* Adjust the start to begin at the start of the padding section */
pad->vm_start = VMA_PAD_START(pad);
/* Make the pad vma PROT_NONE */
pad->vm_flags &= ~(VM_READ|VM_WRITE|VM_EXEC);
/* Remove padding bits */
pad->vm_flags &= ~VM_PAD_MASK;
return pad;
}
/*
* Returns a new VMA exclusing the padding from @vma; if no padding in
* @vma returns @vma.
*/
struct vm_area_struct *get_data_vma(struct vm_area_struct *vma)
{
struct vm_area_struct *data;
if (!is_pgsize_migration_enabled() || !(vma->vm_flags & VM_PAD_MASK))
return vma;
data = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
*data = *vma;
/* Adjust the end to the start of the padding section */
data->vm_end = VMA_PAD_START(data);
return data;
}
/*
* Calls the show_pad_vma_fn on the @pad VMA, and frees the copies of @vma
* and @pad.
*/
void show_map_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *pad,
struct seq_file *m, void *func, bool smaps)
{
if (!pad)
return;
/*
* This cannot happen. If @pad vma was allocated the corresponding
* @vma should have the VM_PAD_MASK bit(s) set.
*/
BUG_ON(!(vma->vm_flags & VM_PAD_MASK));
/*
* This cannot happen. @pad is a section of the original VMA.
* Therefore @vma cannot be null if @pad is not null.
*/
BUG_ON(!vma);
if (smaps)
((show_pad_smaps_fn)func)(m, pad);
else
((show_pad_maps_fn)func)(m, pad);
kfree(pad);
kfree(vma);
}
/*
* When splitting a padding VMA there are a couple of cases to handle.
*
* Given:
*
* | DDDDPPPP |
*
* where:
* - D represents 1 page of data;
* - P represents 1 page of padding;
* - | represents the boundaries (start/end) of the VMA
*
*
* 1) Split exactly at the padding boundary
*
* | DDDDPPPP | --> | DDDD | PPPP |
*
* - Remove padding flags from the first VMA.
* - The second VMA is all padding
*
* 2) Split within the padding area
*
* | DDDDPPPP | --> | DDDDPP | PP |
*
* - Subtract the length of the second VMA from the first VMA's padding.
* - The second VMA is all padding, adjust its padding length (flags)
*
* 3) Split within the data area
*
* | DDDDPPPP | --> | DD | DDPPPP |
*
* - Remove padding flags from the first VMA.
* - The second VMA is has the same padding as from before the split.
*/
void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
unsigned long addr, int new_below)
{
unsigned long nr_pad_pages = vma_pad_pages(vma);
unsigned long nr_vma2_pages;
struct vm_area_struct *first;
struct vm_area_struct *second;
if (!nr_pad_pages)
return;
if (new_below) {
first = new;
second = vma;
} else {
first = vma;
second = new;
}
nr_vma2_pages = vma_pages(second);
if (nr_vma2_pages >= nr_pad_pages) { /* Case 1 & 3 */
first->vm_flags &= ~VM_PAD_MASK;
vma_set_pad_pages(second, nr_pad_pages);
} else { /* Case 2 */
vma_set_pad_pages(first, nr_pad_pages - nr_vma2_pages);
vma_set_pad_pages(second, nr_vma2_pages);
}
}
#endif /* PAGE_SIZE == SZ_4K */
#endif /* CONFIG_64BIT */

View File

@@ -4754,6 +4754,7 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) { if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
list_del_rcu(&set->list); list_del_rcu(&set->list);
set->dead = 1;
if (event) if (event)
nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
GFP_KERNEL); GFP_KERNEL);
@@ -8778,10 +8779,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
struct nft_trans *trans, *next; struct nft_trans *trans, *next;
LIST_HEAD(set_update_list); LIST_HEAD(set_update_list);
struct nft_trans_elem *te; struct nft_trans_elem *te;
int err = 0;
if (action == NFNL_ABORT_VALIDATE && if (action == NFNL_ABORT_VALIDATE &&
nf_tables_validate(net) < 0) nf_tables_validate(net) < 0)
return -EAGAIN; err = -EAGAIN;
list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list, list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list,
list) { list) {
@@ -8941,12 +8943,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nf_tables_abort_release(trans); nf_tables_abort_release(trans);
} }
if (action == NFNL_ABORT_AUTOLOAD) return err;
nf_tables_module_autoload(net);
else
nf_tables_module_autoload_cleanup(net);
return 0;
} }
static int nf_tables_abort(struct net *net, struct sk_buff *skb, static int nf_tables_abort(struct net *net, struct sk_buff *skb,
@@ -8960,6 +8957,16 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
ret = __nf_tables_abort(net, action); ret = __nf_tables_abort(net, action);
nft_gc_seq_end(nft_net, gc_seq); nft_gc_seq_end(nft_net, gc_seq);
WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
/* module autoload needs to happen after GC sequence update because it
* temporarily releases and grabs mutex again.
*/
if (action == NFNL_ABORT_AUTOLOAD)
nf_tables_module_autoload(net);
else
nf_tables_module_autoload_cleanup(net);
mutex_unlock(&nft_net->commit_mutex); mutex_unlock(&nft_net->commit_mutex);
return ret; return ret;
@@ -9694,8 +9701,11 @@ static void __net_exit nf_tables_exit_net(struct net *net)
gc_seq = nft_gc_seq_begin(nft_net); gc_seq = nft_gc_seq_begin(nft_net);
if (!list_empty(&nft_net->commit_list)) WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
__nf_tables_abort(net, NFNL_ABORT_NONE);
if (!list_empty(&nft_net->module_list))
nf_tables_module_autoload_cleanup(net);
__nft_release_tables(net); __nft_release_tables(net);
nft_gc_seq_end(nft_net, gc_seq); nft_gc_seq_end(nft_net, gc_seq);

View File

@@ -228,7 +228,9 @@ cpp_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
ld_flags = $(KBUILD_LDFLAGS) $(ldflags-y) $(LDFLAGS_$(@F)) ld_flags = $(KBUILD_LDFLAGS) $(ldflags-y) $(LDFLAGS_$(@F))
DTC_INCLUDE := $(srctree)/scripts/dtc/include-prefixes # ANDROID: Allow DTC_INCLUDE to be set by the BUILD_CONFIG. This allows one to
# compile an out-of-tree device tree.
DTC_INCLUDE += $(srctree)/scripts/dtc/include-prefixes
dtc_cpp_flags = -Wp,-MMD,$(depfile).pre.tmp -nostdinc \ dtc_cpp_flags = -Wp,-MMD,$(depfile).pre.tmp -nostdinc \
$(addprefix -I,$(DTC_INCLUDE)) \ $(addprefix -I,$(DTC_INCLUDE)) \

View File

@@ -21,9 +21,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details. * GNU General Public License for more details.
*/ */
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <time.h> #include <time.h>
@@ -62,45 +59,47 @@ int clear_time_state(void)
#define NUM_FREQ_OUTOFRANGE 4 #define NUM_FREQ_OUTOFRANGE 4
#define NUM_FREQ_INVALID 2 #define NUM_FREQ_INVALID 2
#define SHIFTED_PPM (1 << 16)
long valid_freq[NUM_FREQ_VALID] = { long valid_freq[NUM_FREQ_VALID] = {
-499<<16, -499 * SHIFTED_PPM,
-450<<16, -450 * SHIFTED_PPM,
-400<<16, -400 * SHIFTED_PPM,
-350<<16, -350 * SHIFTED_PPM,
-300<<16, -300 * SHIFTED_PPM,
-250<<16, -250 * SHIFTED_PPM,
-200<<16, -200 * SHIFTED_PPM,
-150<<16, -150 * SHIFTED_PPM,
-100<<16, -100 * SHIFTED_PPM,
-75<<16, -75 * SHIFTED_PPM,
-50<<16, -50 * SHIFTED_PPM,
-25<<16, -25 * SHIFTED_PPM,
-10<<16, -10 * SHIFTED_PPM,
-5<<16, -5 * SHIFTED_PPM,
-1<<16, -1 * SHIFTED_PPM,
-1000, -1000,
1<<16, 1 * SHIFTED_PPM,
5<<16, 5 * SHIFTED_PPM,
10<<16, 10 * SHIFTED_PPM,
25<<16, 25 * SHIFTED_PPM,
50<<16, 50 * SHIFTED_PPM,
75<<16, 75 * SHIFTED_PPM,
100<<16, 100 * SHIFTED_PPM,
150<<16, 150 * SHIFTED_PPM,
200<<16, 200 * SHIFTED_PPM,
250<<16, 250 * SHIFTED_PPM,
300<<16, 300 * SHIFTED_PPM,
350<<16, 350 * SHIFTED_PPM,
400<<16, 400 * SHIFTED_PPM,
450<<16, 450 * SHIFTED_PPM,
499<<16, 499 * SHIFTED_PPM,
}; };
long outofrange_freq[NUM_FREQ_OUTOFRANGE] = { long outofrange_freq[NUM_FREQ_OUTOFRANGE] = {
-1000<<16, -1000 * SHIFTED_PPM,
-550<<16, -550 * SHIFTED_PPM,
550<<16, 550 * SHIFTED_PPM,
1000<<16, 1000 * SHIFTED_PPM,
}; };
#define LONG_MAX (~0UL>>1) #define LONG_MAX (~0UL>>1)