
Changes in 5.10.67 rtc: tps65910: Correct driver module alias io_uring: limit fixed table size by RLIMIT_NOFILE io_uring: place fixed tables under memcg limits io_uring: add ->splice_fd_in checks io_uring: fail links of cancelled timeouts io-wq: fix wakeup race when adding new work btrfs: wake up async_delalloc_pages waiters after submit btrfs: reset replace target device to allocation state on close blk-zoned: allow zone management send operations without CAP_SYS_ADMIN blk-zoned: allow BLKREPORTZONE without CAP_SYS_ADMIN PCI/MSI: Skip masking MSI-X on Xen PV powerpc/perf/hv-gpci: Fix counter value parsing xen: fix setting of max_pfn in shared_info 9p/xen: Fix end of loop tests for list_for_each_entry ceph: fix dereference of null pointer cf selftests/ftrace: Fix requirement check of README file tools/thermal/tmon: Add cross compiling support clk: socfpga: agilex: fix the parents of the psi_ref_clk clk: socfpga: agilex: fix up s2f_user0_clk representation clk: socfpga: agilex: add the bypass register for s2f_usr0 clock pinctrl: stmfx: Fix hazardous u8[] to unsigned long cast pinctrl: ingenic: Fix incorrect pull up/down info soc: qcom: aoss: Fix the out of bound usage of cooling_devs soc: aspeed: lpc-ctrl: Fix boundary check for mmap soc: aspeed: p2a-ctrl: Fix boundary check for mmap arm64: mm: Fix TLBI vs ASID rollover arm64: head: avoid over-mapping in map_memory iio: ltc2983: fix device probe wcn36xx: Ensure finish scan is not requested before start scan crypto: public_key: fix overflow during implicit conversion block: bfq: fix bfq_set_next_ioprio_data() power: supply: max17042: handle fails of reading status register dm crypt: Avoid percpu_counter spinlock contention in crypt_page_alloc() crypto: ccp - shutdown SEV firmware on kexec VMCI: fix NULL pointer dereference when unmapping queue pair media: uvc: don't do DMA on stack media: rc-loopback: return number of emitters rather than error s390/qdio: fix roll-back after timeout on ESTABLISH ccw s390/qdio: cancel the ESTABLISH ccw after timeout Revert "dmaengine: imx-sdma: refine to load context only once" dmaengine: imx-sdma: remove duplicated sdma_load_context libata: add ATA_HORKAGE_NO_NCQ_TRIM for Samsung 860 and 870 SSDs ARM: 9105/1: atags_to_fdt: don't warn about stack size f2fs: fix to do sanity check for sb/cp fields correctly PCI/portdrv: Enable Bandwidth Notification only if port supports it PCI: Restrict ASMedia ASM1062 SATA Max Payload Size Supported PCI: Return ~0 data on pciconfig_read() CAP_SYS_ADMIN failure PCI: xilinx-nwl: Enable the clock through CCF PCI: aardvark: Configure PCIe resources from 'ranges' DT property PCI: Export pci_pio_to_address() for module use PCI: aardvark: Fix checking for PIO status PCI: aardvark: Fix masking and unmasking legacy INTx interrupts HID: input: do not report stylus battery state as "full" f2fs: quota: fix potential deadlock pinctrl: remove empty lines in pinctrl subsystem pinctrl: armada-37xx: Correct PWM pins definitions scsi: bsg: Remove support for SCSI_IOCTL_SEND_COMMAND clk: rockchip: drop GRF dependency for rk3328/rk3036 pll types IB/hfi1: Adjust pkey entry in index 0 RDMA/iwcm: Release resources if iw_cm module initialization fails docs: Fix infiniband uverbs minor number scsi: BusLogic: Use %X for u32 sized integer rather than %lX pinctrl: samsung: Fix pinctrl bank pin count vfio: Use config not menuconfig for VFIO_NOIOMMU scsi: ufs: Fix memory corruption by ufshcd_read_desc_param() cpuidle: pseries: Fixup CEDE0 latency only for POWER10 onwards powerpc/stacktrace: Include linux/delay.h RDMA/efa: Remove double QP type assignment RDMA/mlx5: Delete not-available udata check cpuidle: pseries: Mark pseries_idle_proble() as __init f2fs: reduce the scope of setting fsck tag when de->name_len is zero openrisc: don't printk() unconditionally dma-debug: fix debugfs initialization order NFSv4/pNFS: Fix a layoutget livelock loop NFSv4/pNFS: Always allow update of a zero valued layout barrier NFSv4/pnfs: The layout barrier indicate a minimal value for the seqid SUNRPC: Fix potential memory corruption SUNRPC/xprtrdma: Fix reconnection locking SUNRPC query transport's source port sunrpc: Fix return value of get_srcport() scsi: fdomain: Fix error return code in fdomain_probe() pinctrl: single: Fix error return code in pcs_parse_bits_in_pinctrl_entry() powerpc/numa: Consider the max NUMA node for migratable LPAR scsi: smartpqi: Fix an error code in pqi_get_raid_map() scsi: qedi: Fix error codes in qedi_alloc_global_queues() scsi: qedf: Fix error codes in qedf_alloc_global_queues() powerpc/config: Renable MTD_PHYSMAP_OF iommu/vt-d: Update the virtual command related registers HID: i2c-hid: Fix Elan touchpad regression clk: imx8m: fix clock tree update of TF-A managed clocks KVM: PPC: Book3S HV: Fix copy_tofrom_guest routines scsi: ufs: ufs-exynos: Fix static checker warning KVM: PPC: Book3S HV Nested: Reflect guest PMU in-use to L0 when guest SPRs are live platform/x86: dell-smbios-wmi: Add missing kfree in error-exit from run_smbios_call powerpc/smp: Update cpu_core_map on all PowerPc systems RDMA/hns: Fix QP's resp incomplete assignment fscache: Fix cookie key hashing clk: at91: clk-generated: Limit the requested rate to our range KVM: PPC: Fix clearing never mapped TCEs in realmode soc: mediatek: cmdq: add address shift in jump f2fs: fix to account missing .skipped_gc_rwsem f2fs: fix unexpected ENOENT comes from f2fs_map_blocks() f2fs: fix to unmap pages from userspace process in punch_hole() f2fs: deallocate compressed pages when error happens f2fs: should put a page beyond EOF when preparing a write MIPS: Malta: fix alignment of the devicetree buffer kbuild: Fix 'no symbols' warning when CONFIG_TRIM_UNUSD_KSYMS=y userfaultfd: prevent concurrent API initialization drm/vc4: hdmi: Set HD_CTL_WHOLSMP and HD_CTL_CHALIGN_SET drm/amdgpu: Fix amdgpu_ras_eeprom_init() ASoC: atmel: ATMEL drivers don't need HAS_DMA media: dib8000: rewrite the init prbs logic libbpf: Fix reuse of pinned map on older kernel x86/hyperv: fix for unwanted manipulation of sched_clock when TSC marked unstable crypto: mxs-dcp - Use sg_mapping_iter to copy data PCI: Use pci_update_current_state() in pci_enable_device_flags() tipc: keep the skb in rcv queue until the whole data is read net: phy: Fix data type in DP83822 dp8382x_disable_wol() iio: dac: ad5624r: Fix incorrect handling of an optional regulator. iavf: do not override the adapter state in the watchdog task iavf: fix locking of critical sections ARM: dts: qcom: apq8064: correct clock names video: fbdev: kyro: fix a DoS bug by restricting user input netlink: Deal with ESRCH error in nlmsg_notify() Smack: Fix wrong semantics in smk_access_entry() drm: avoid blocking in drm_clients_info's rcu section drm: serialize drm_file.master with a new spinlock drm: protect drm_master pointers in drm_lease.c rcu: Fix macro name CONFIG_TASKS_RCU_TRACE igc: Check if num of q_vectors is smaller than max before array access usb: host: fotg210: fix the endpoint's transactional opportunities calculation usb: host: fotg210: fix the actual_length of an iso packet usb: gadget: u_ether: fix a potential null pointer dereference USB: EHCI: ehci-mv: improve error handling in mv_ehci_enable() usb: gadget: composite: Allow bMaxPower=0 if self-powered staging: board: Fix uninitialized spinlock when attaching genpd tty: serial: jsm: hold port lock when reporting modem line changes bus: fsl-mc: fix mmio base address for child DPRCs selftests: firmware: Fix ignored return val of asprintf() warn drm/amd/display: Fix timer_per_pixel unit error media: hantro: vp8: Move noisy WARN_ON to vpu_debug media: platform: stm32: unprepare clocks at handling errors in probe media: atomisp: Fix runtime PM imbalance in atomisp_pci_probe media: atomisp: pci: fix error return code in atomisp_pci_probe() nfp: fix return statement in nfp_net_parse_meta() ethtool: improve compat ioctl handling drm/amdgpu: Fix a printing message drm/amd/amdgpu: Update debugfs link_settings output link_rate field in hex bpf/tests: Fix copy-and-paste error in double word test bpf/tests: Do not PASS tests without actually testing the result drm/bridge: nwl-dsi: Avoid potential multiplication overflow on 32-bit arm64: dts: allwinner: h6: tanix-tx6: Fix regulator node names video: fbdev: asiliantfb: Error out if 'pixclock' equals zero video: fbdev: kyro: Error out if 'pixclock' equals zero video: fbdev: riva: Error out if 'pixclock' equals zero ipv4: ip_output.c: Fix out-of-bounds warning in ip_copy_addrs() flow_dissector: Fix out-of-bounds warnings s390/jump_label: print real address in a case of a jump label bug s390: make PCI mio support a machine flag serial: 8250: Define RX trigger levels for OxSemi 950 devices xtensa: ISS: don't panic in rs_init hvsi: don't panic on tty_register_driver failure serial: 8250_pci: make setup_port() parameters explicitly unsigned staging: ks7010: Fix the initialization of the 'sleep_status' structure samples: bpf: Fix tracex7 error raised on the missing argument libbpf: Fix race when pinning maps in parallel ata: sata_dwc_460ex: No need to call phy_exit() befre phy_init() Bluetooth: skip invalid hci_sync_conn_complete_evt workqueue: Fix possible memory leaks in wq_numa_init() ARM: dts: stm32: Set {bitclock,frame}-master phandles on DHCOM SoM ARM: dts: stm32: Set {bitclock,frame}-master phandles on ST DKx ARM: dts: stm32: Update AV96 adv7513 node per dtbs_check bonding: 3ad: fix the concurrency between __bond_release_one() and bond_3ad_state_machine_handler() ARM: dts: at91: use the right property for shutdown controller arm64: tegra: Fix Tegra194 PCIe EP compatible string ASoC: Intel: bytcr_rt5640: Move "Platform Clock" routes to the maps for the matching in-/output ASoC: Intel: update sof_pcm512x quirks media: imx258: Rectify mismatch of VTS value media: imx258: Limit the max analogue gain to 480 media: v4l2-dv-timings.c: fix wrong condition in two for-loops media: TDA1997x: fix tda1997x_query_dv_timings() return value media: tegra-cec: Handle errors of clk_prepare_enable() gfs2: Fix glock recursion in freeze_go_xmote_bh arm64: dts: qcom: sdm630: Rewrite memory map arm64: dts: qcom: sdm630: Fix TLMM node and pinctrl configuration serial: 8250_omap: Handle optional overrun-throttle-ms property ARM: dts: imx53-ppd: Fix ACHC entry arm64: dts: qcom: ipq8074: fix pci node reg property arm64: dts: qcom: sdm660: use reg value for memory node arm64: dts: qcom: ipq6018: drop '0x' from unit address arm64: dts: qcom: sdm630: don't use underscore in node name arm64: dts: qcom: msm8994: don't use underscore in node name arm64: dts: qcom: msm8996: don't use underscore in node name arm64: dts: qcom: sm8250: Fix epss_l3 unit address nvmem: qfprom: Fix up qfprom_disable_fuse_blowing() ordering net: ethernet: stmmac: Do not use unreachable() in ipq806x_gmac_probe() drm/msm: mdp4: drop vblank get/put from prepare/complete_commit drm/msm/dsi: Fix DSI and DSI PHY regulator config from SDM660 drm: xlnx: zynqmp_dpsub: Call pm_runtime_get_sync before setting pixel clock drm: xlnx: zynqmp: release reset to DP controller before accessing DP registers thunderbolt: Fix port linking by checking all adapters drm/amd/display: fix missing writeback disablement if plane is removed drm/amd/display: fix incorrect CM/TF programming sequence in dwb selftests/bpf: Fix xdp_tx.c prog section name drm/vmwgfx: fix potential UAF in vmwgfx_surface.c Bluetooth: schedule SCO timeouts with delayed_work Bluetooth: avoid circular locks in sco_sock_connect drm/msm/dp: return correct edid checksum after corrupted edid checksum read net/mlx5: Fix variable type to match 64bit gpu: drm: amd: amdgpu: amdgpu_i2c: fix possible uninitialized-variable access in amdgpu_i2c_router_select_ddc_port() drm/display: fix possible null-pointer dereference in dcn10_set_clock() mac80211: Fix monitor MTU limit so that A-MSDUs get through ARM: tegra: acer-a500: Remove bogus USB VBUS regulators ARM: tegra: tamonten: Fix UART pad setting arm64: tegra: Fix compatible string for Tegra132 CPUs arm64: dts: ls1046a: fix eeprom entries nvme-tcp: don't check blk_mq_tag_to_rq when receiving pdu data nvme: code command_id with a genctr for use-after-free validation Bluetooth: Fix handling of LE Enhanced Connection Complete opp: Don't print an error if required-opps is missing serial: sh-sci: fix break handling for sysrq iomap: pass writeback errors to the mapping tcp: enable data-less, empty-cookie SYN with TFO_SERVER_COOKIE_NOT_REQD rpc: fix gss_svc_init cleanup on failure selftests/bpf: Fix flaky send_signal test hwmon: (pmbus/ibm-cffps) Fix write bits for LED control staging: rts5208: Fix get_ms_information() heap buffer size net: Fix offloading indirect devices dependency on qdisc order creation kselftest/arm64: mte: Fix misleading output when skipping tests kselftest/arm64: pac: Fix skipping of tests on systems without PAC gfs2: Don't call dlm after protocol is unmounted usb: chipidea: host: fix port index underflow and UBSAN complains lockd: lockd server-side shouldn't set fl_ops drm/exynos: Always initialize mapping in exynos_drm_register_dma() rtl8xxxu: Fix the handling of TX A-MPDU aggregation rtw88: use read_poll_timeout instead of fixed sleep rtw88: wow: build wow function only if CONFIG_PM is on rtw88: wow: fix size access error of probe request octeontx2-pf: Fix NIX1_RX interface backpressure m68knommu: only set CONFIG_ISA_DMA_API for ColdFire sub-arch btrfs: tree-log: check btrfs_lookup_data_extent return value soundwire: intel: fix potential race condition during power down ASoC: Intel: Skylake: Fix module configuration for KPB and MIXER ASoC: Intel: Skylake: Fix passing loadable flag for module of: Don't allow __of_attached_node_sysfs() without CONFIG_SYSFS mmc: sdhci-of-arasan: Modified SD default speed to 19MHz for ZynqMP mmc: sdhci-of-arasan: Check return value of non-void funtions mmc: rtsx_pci: Fix long reads when clock is prescaled selftests/bpf: Enlarge select() timeout for test_maps mmc: core: Return correct emmc response in case of ioctl error cifs: fix wrong release in sess_alloc_buffer() failed path Revert "USB: xhci: fix U1/U2 handling for hardware with XHCI_INTEL_HOST quirk set" usb: musb: musb_dsps: request_irq() after initializing musb usbip: give back URBs for unsent unlink requests during cleanup usbip:vhci_hcd USB port can get stuck in the disabled state ASoC: rockchip: i2s: Fix regmap_ops hang ASoC: rockchip: i2s: Fixup config for DAIFMT_DSP_A/B drm/amdkfd: Account for SH/SE count when setting up cu masks. nfsd: fix crash on LOCKT on reexported NFSv3 iwlwifi: pcie: free RBs during configure iwlwifi: mvm: fix a memory leak in iwl_mvm_mac_ctxt_beacon_changed iwlwifi: mvm: avoid static queue number aliasing iwlwifi: mvm: fix access to BSS elements iwlwifi: fw: correctly limit to monitor dump iwlwifi: mvm: Fix scan channel flags settings net/mlx5: DR, fix a potential use-after-free bug net/mlx5: DR, Enable QP retransmission parport: remove non-zero check on count selftests/bpf: Fix potential unreleased lock wcn36xx: Fix missing frame timestamp for beacon/probe-resp ath9k: fix OOB read ar9300_eeprom_restore_internal ath9k: fix sleeping in atomic context net: fix NULL pointer reference in cipso_v4_doi_free fix array-index-out-of-bounds in taprio_change net: w5100: check return value after calling platform_get_resource() net: hns3: clean up a type mismatch warning fs/io_uring Don't use the return value from import_iovec(). io_uring: remove duplicated io_size from rw parisc: fix crash with signals and alloca ovl: fix BUG_ON() in may_delete() when called from ovl_cleanup() scsi: BusLogic: Fix missing pr_cont() use scsi: qla2xxx: Changes to support kdump kernel scsi: qla2xxx: Sync queue idx with queue_pair_map idx cpufreq: powernv: Fix init_chip_info initialization in numa=off s390/pv: fix the forcing of the swiotlb hugetlb: fix hugetlb cgroup refcounting during vma split mm/hmm: bypass devmap pte when all pfn requested flags are fulfilled mm/hugetlb: initialize hugetlb_usage in mm_init mm,vmscan: fix divide by zero in get_scan_count memcg: enable accounting for pids in nested pid namespaces libnvdimm/pmem: Fix crash triggered when I/O in-flight during unbind platform/chrome: cros_ec_proto: Send command again when timeout occurs lib/test_stackinit: Fix static initializer test net: dsa: lantiq_gswip: fix maximum frame length drm/mgag200: Select clock in PLL update functions drm/msi/mdp4: populate priv->kms in mdp4_kms_init drm/dp_mst: Fix return code on sideband message failure drm/panfrost: Make sure MMU context lifetime is not bound to panfrost_priv drm/amdgpu: Fix BUG_ON assert drm/amd/display: Update number of DCN3 clock states drm/amd/display: Update bounding box states (v2) drm/panfrost: Simplify lock_region calculation drm/panfrost: Use u64 for size in lock_region drm/panfrost: Clamp lock region to Bifrost minimum fanotify: limit number of event merge attempts Linux 5.10.67 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ic8df59518265d0cdf724e93e8922cde48fc85ce9
967 lines
24 KiB
C
967 lines
24 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_HUGETLB_H
|
|
#define _LINUX_HUGETLB_H
|
|
|
|
#include <linux/mm_types.h>
|
|
#include <linux/mmdebug.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/hugetlb_inline.h>
|
|
#include <linux/cgroup.h>
|
|
#include <linux/list.h>
|
|
#include <linux/kref.h>
|
|
#include <linux/pgtable.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/userfaultfd_k.h>
|
|
|
|
struct ctl_table;
|
|
struct user_struct;
|
|
struct mmu_gather;
|
|
|
|
#ifndef is_hugepd
|
|
typedef struct { unsigned long pd; } hugepd_t;
|
|
#define is_hugepd(hugepd) (0)
|
|
#define __hugepd(x) ((hugepd_t) { (x) })
|
|
#endif
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
#include <linux/mempolicy.h>
|
|
#include <linux/shm.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
struct hugepage_subpool {
|
|
spinlock_t lock;
|
|
long count;
|
|
long max_hpages; /* Maximum huge pages or -1 if no maximum. */
|
|
long used_hpages; /* Used count against maximum, includes */
|
|
/* both alloced and reserved pages. */
|
|
struct hstate *hstate;
|
|
long min_hpages; /* Minimum huge pages or -1 if no minimum. */
|
|
long rsv_hpages; /* Pages reserved against global pool to */
|
|
/* sasitfy minimum size. */
|
|
};
|
|
|
|
struct resv_map {
|
|
struct kref refs;
|
|
spinlock_t lock;
|
|
struct list_head regions;
|
|
long adds_in_progress;
|
|
struct list_head region_cache;
|
|
long region_cache_count;
|
|
#ifdef CONFIG_CGROUP_HUGETLB
|
|
/*
|
|
* On private mappings, the counter to uncharge reservations is stored
|
|
* here. If these fields are 0, then either the mapping is shared, or
|
|
* cgroup accounting is disabled for this resv_map.
|
|
*/
|
|
struct page_counter *reservation_counter;
|
|
unsigned long pages_per_hpage;
|
|
struct cgroup_subsys_state *css;
|
|
#endif
|
|
};
|
|
|
|
/*
|
|
* Region tracking -- allows tracking of reservations and instantiated pages
|
|
* across the pages in a mapping.
|
|
*
|
|
* The region data structures are embedded into a resv_map and protected
|
|
* by a resv_map's lock. The set of regions within the resv_map represent
|
|
* reservations for huge pages, or huge pages that have already been
|
|
* instantiated within the map. The from and to elements are huge page
|
|
* indicies into the associated mapping. from indicates the starting index
|
|
* of the region. to represents the first index past the end of the region.
|
|
*
|
|
* For example, a file region structure with from == 0 and to == 4 represents
|
|
* four huge pages in a mapping. It is important to note that the to element
|
|
* represents the first element past the end of the region. This is used in
|
|
* arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
|
|
*
|
|
* Interval notation of the form [from, to) will be used to indicate that
|
|
* the endpoint from is inclusive and to is exclusive.
|
|
*/
|
|
struct file_region {
|
|
struct list_head link;
|
|
long from;
|
|
long to;
|
|
#ifdef CONFIG_CGROUP_HUGETLB
|
|
/*
|
|
* On shared mappings, each reserved region appears as a struct
|
|
* file_region in resv_map. These fields hold the info needed to
|
|
* uncharge each reservation.
|
|
*/
|
|
struct page_counter *reservation_counter;
|
|
struct cgroup_subsys_state *css;
|
|
#endif
|
|
};
|
|
|
|
extern struct resv_map *resv_map_alloc(void);
|
|
void resv_map_release(struct kref *ref);
|
|
|
|
extern spinlock_t hugetlb_lock;
|
|
extern int hugetlb_max_hstate __read_mostly;
|
|
#define for_each_hstate(h) \
|
|
for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
|
|
|
|
struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
|
|
long min_hpages);
|
|
void hugepage_put_subpool(struct hugepage_subpool *spool);
|
|
|
|
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
|
|
int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
|
|
int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
|
|
loff_t *);
|
|
int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
|
|
loff_t *);
|
|
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
|
|
loff_t *);
|
|
|
|
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
|
|
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
|
|
struct page **, struct vm_area_struct **,
|
|
unsigned long *, unsigned long *, long, unsigned int,
|
|
int *);
|
|
void unmap_hugepage_range(struct vm_area_struct *,
|
|
unsigned long, unsigned long, struct page *);
|
|
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
|
struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end,
|
|
struct page *ref_page);
|
|
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end,
|
|
struct page *ref_page);
|
|
void hugetlb_report_meminfo(struct seq_file *);
|
|
int hugetlb_report_node_meminfo(char *buf, int len, int nid);
|
|
void hugetlb_show_meminfo(void);
|
|
unsigned long hugetlb_total_pages(void);
|
|
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long address, unsigned int flags);
|
|
#ifdef CONFIG_USERFAULTFD
|
|
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
|
|
struct vm_area_struct *dst_vma,
|
|
unsigned long dst_addr,
|
|
unsigned long src_addr,
|
|
enum mcopy_atomic_mode mode,
|
|
struct page **pagep);
|
|
#endif /* CONFIG_USERFAULTFD */
|
|
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
|
|
struct vm_area_struct *vma,
|
|
vm_flags_t vm_flags);
|
|
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
|
|
long freed);
|
|
bool isolate_huge_page(struct page *page, struct list_head *list);
|
|
void putback_active_hugepage(struct page *page);
|
|
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
|
|
void free_huge_page(struct page *page);
|
|
void hugetlb_fix_reserve_counts(struct inode *inode);
|
|
extern struct mutex *hugetlb_fault_mutex_table;
|
|
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
|
|
|
|
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long addr, pud_t *pud);
|
|
|
|
struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
|
|
|
|
extern int sysctl_hugetlb_shm_group;
|
|
extern struct list_head huge_boot_pages;
|
|
|
|
/* arch callbacks */
|
|
|
|
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long addr, unsigned long sz);
|
|
pte_t *huge_pte_offset(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long sz);
|
|
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
unsigned long *addr, pte_t *ptep);
|
|
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
|
|
unsigned long *start, unsigned long *end);
|
|
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
|
int write);
|
|
struct page *follow_huge_pd(struct vm_area_struct *vma,
|
|
unsigned long address, hugepd_t hpd,
|
|
int flags, int pdshift);
|
|
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
|
pmd_t *pmd, int flags);
|
|
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
|
pud_t *pud, int flags);
|
|
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
|
|
pgd_t *pgd, int flags);
|
|
|
|
int pmd_huge(pmd_t pmd);
|
|
int pud_huge(pud_t pud);
|
|
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
|
unsigned long address, unsigned long end, pgprot_t newprot);
|
|
|
|
bool is_hugetlb_entry_migration(pte_t pte);
|
|
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
|
|
|
|
#else /* !CONFIG_HUGETLB_PAGE */
|
|
|
|
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
|
|
{
|
|
}
|
|
|
|
static inline unsigned long hugetlb_total_pages(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline struct address_space *hugetlb_page_mapping_lock_write(
|
|
struct page *hpage)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline int huge_pmd_unshare(struct mm_struct *mm,
|
|
struct vm_area_struct *vma,
|
|
unsigned long *addr, pte_t *ptep)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void adjust_range_if_pmd_sharing_possible(
|
|
struct vm_area_struct *vma,
|
|
unsigned long *start, unsigned long *end)
|
|
{
|
|
}
|
|
|
|
static inline long follow_hugetlb_page(struct mm_struct *mm,
|
|
struct vm_area_struct *vma, struct page **pages,
|
|
struct vm_area_struct **vmas, unsigned long *position,
|
|
unsigned long *nr_pages, long i, unsigned int flags,
|
|
int *nonblocking)
|
|
{
|
|
BUG();
|
|
return 0;
|
|
}
|
|
|
|
static inline struct page *follow_huge_addr(struct mm_struct *mm,
|
|
unsigned long address, int write)
|
|
{
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
static inline int copy_hugetlb_page_range(struct mm_struct *dst,
|
|
struct mm_struct *src, struct vm_area_struct *vma)
|
|
{
|
|
BUG();
|
|
return 0;
|
|
}
|
|
|
|
static inline void hugetlb_report_meminfo(struct seq_file *m)
|
|
{
|
|
}
|
|
|
|
static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void hugetlb_show_meminfo(void)
|
|
{
|
|
}
|
|
|
|
static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
|
|
unsigned long address, hugepd_t hpd, int flags,
|
|
int pdshift)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct page *follow_huge_pmd(struct mm_struct *mm,
|
|
unsigned long address, pmd_t *pmd, int flags)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct page *follow_huge_pud(struct mm_struct *mm,
|
|
unsigned long address, pud_t *pud, int flags)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct page *follow_huge_pgd(struct mm_struct *mm,
|
|
unsigned long address, pgd_t *pgd, int flags)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline int prepare_hugepage_range(struct file *file,
|
|
unsigned long addr, unsigned long len)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static inline int pmd_huge(pmd_t pmd)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int pud_huge(pud_t pud)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long len)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
|
unsigned long addr, unsigned long end,
|
|
unsigned long floor, unsigned long ceiling)
|
|
{
|
|
BUG();
|
|
}
|
|
|
|
#ifdef CONFIG_USERFAULTFD
|
|
static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
|
|
pte_t *dst_pte,
|
|
struct vm_area_struct *dst_vma,
|
|
unsigned long dst_addr,
|
|
unsigned long src_addr,
|
|
enum mcopy_atomic_mode mode,
|
|
struct page **pagep)
|
|
{
|
|
BUG();
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_USERFAULTFD */
|
|
|
|
static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
|
unsigned long sz)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline bool isolate_huge_page(struct page *page, struct list_head *list)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void putback_active_hugepage(struct page *page)
|
|
{
|
|
}
|
|
|
|
static inline void move_hugetlb_state(struct page *oldpage,
|
|
struct page *newpage, int reason)
|
|
{
|
|
}
|
|
|
|
static inline unsigned long hugetlb_change_protection(
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
unsigned long end, pgprot_t newprot)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
|
struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end, struct page *ref_page)
|
|
{
|
|
BUG();
|
|
}
|
|
|
|
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
|
|
struct vm_area_struct *vma, unsigned long start,
|
|
unsigned long end, struct page *ref_page)
|
|
{
|
|
BUG();
|
|
}
|
|
|
|
static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
|
|
struct vm_area_struct *vma, unsigned long address,
|
|
unsigned int flags)
|
|
{
|
|
BUG();
|
|
return 0;
|
|
}
|
|
|
|
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
|
|
|
|
#endif /* !CONFIG_HUGETLB_PAGE */
|
|
/*
|
|
* hugepages at page global directory. If arch support
|
|
* hugepages at pgd level, they need to define this.
|
|
*/
|
|
#ifndef pgd_huge
|
|
#define pgd_huge(x) 0
|
|
#endif
|
|
#ifndef p4d_huge
|
|
#define p4d_huge(x) 0
|
|
#endif
|
|
|
|
#ifndef pgd_write
|
|
static inline int pgd_write(pgd_t pgd)
|
|
{
|
|
BUG();
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#define HUGETLB_ANON_FILE "anon_hugepage"
|
|
|
|
enum {
|
|
/*
|
|
* The file will be used as an shm file so shmfs accounting rules
|
|
* apply
|
|
*/
|
|
HUGETLB_SHMFS_INODE = 1,
|
|
/*
|
|
* The file is being created on the internal vfs mount and shmfs
|
|
* accounting rules do not apply
|
|
*/
|
|
HUGETLB_ANONHUGE_INODE = 2,
|
|
};
|
|
|
|
#ifdef CONFIG_HUGETLBFS
|
|
struct hugetlbfs_sb_info {
|
|
long max_inodes; /* inodes allowed */
|
|
long free_inodes; /* inodes free */
|
|
spinlock_t stat_lock;
|
|
struct hstate *hstate;
|
|
struct hugepage_subpool *spool;
|
|
kuid_t uid;
|
|
kgid_t gid;
|
|
umode_t mode;
|
|
};
|
|
|
|
static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
|
|
{
|
|
return sb->s_fs_info;
|
|
}
|
|
|
|
struct hugetlbfs_inode_info {
|
|
struct shared_policy policy;
|
|
struct inode vfs_inode;
|
|
unsigned int seals;
|
|
};
|
|
|
|
static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
|
|
{
|
|
return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
|
|
}
|
|
|
|
extern const struct file_operations hugetlbfs_file_operations;
|
|
extern const struct vm_operations_struct hugetlb_vm_ops;
|
|
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
|
|
struct user_struct **user, int creat_flags,
|
|
int page_size_log);
|
|
|
|
static inline bool is_file_hugepages(struct file *file)
|
|
{
|
|
if (file->f_op == &hugetlbfs_file_operations)
|
|
return true;
|
|
|
|
return is_file_shm_hugepages(file);
|
|
}
|
|
|
|
static inline struct hstate *hstate_inode(struct inode *i)
|
|
{
|
|
return HUGETLBFS_SB(i->i_sb)->hstate;
|
|
}
|
|
#else /* !CONFIG_HUGETLBFS */
|
|
|
|
#define is_file_hugepages(file) false
|
|
static inline struct file *
|
|
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
|
|
struct user_struct **user, int creat_flags,
|
|
int page_size_log)
|
|
{
|
|
return ERR_PTR(-ENOSYS);
|
|
}
|
|
|
|
static inline struct hstate *hstate_inode(struct inode *i)
|
|
{
|
|
return NULL;
|
|
}
|
|
#endif /* !CONFIG_HUGETLBFS */
|
|
|
|
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
|
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|
unsigned long len, unsigned long pgoff,
|
|
unsigned long flags);
|
|
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
#define HSTATE_NAME_LEN 32
|
|
/* Defines one hugetlb page size */
|
|
struct hstate {
|
|
int next_nid_to_alloc;
|
|
int next_nid_to_free;
|
|
unsigned int order;
|
|
unsigned long mask;
|
|
unsigned long max_huge_pages;
|
|
unsigned long nr_huge_pages;
|
|
unsigned long free_huge_pages;
|
|
unsigned long resv_huge_pages;
|
|
unsigned long surplus_huge_pages;
|
|
unsigned long nr_overcommit_huge_pages;
|
|
struct list_head hugepage_activelist;
|
|
struct list_head hugepage_freelists[MAX_NUMNODES];
|
|
unsigned int nr_huge_pages_node[MAX_NUMNODES];
|
|
unsigned int free_huge_pages_node[MAX_NUMNODES];
|
|
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
|
|
#ifdef CONFIG_CGROUP_HUGETLB
|
|
/* cgroup control files */
|
|
struct cftype cgroup_files_dfl[7];
|
|
struct cftype cgroup_files_legacy[9];
|
|
#endif
|
|
char name[HSTATE_NAME_LEN];
|
|
};
|
|
|
|
struct huge_bootmem_page {
|
|
struct list_head list;
|
|
struct hstate *hstate;
|
|
};
|
|
|
|
struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
unsigned long addr, int avoid_reserve);
|
|
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
|
|
nodemask_t *nmask, gfp_t gfp_mask);
|
|
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
|
|
unsigned long address);
|
|
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
|
|
pgoff_t idx);
|
|
|
|
/* arch callback */
|
|
int __init __alloc_bootmem_huge_page(struct hstate *h);
|
|
int __init alloc_bootmem_huge_page(struct hstate *h);
|
|
|
|
void __init hugetlb_add_hstate(unsigned order);
|
|
bool __init arch_hugetlb_valid_size(unsigned long size);
|
|
struct hstate *size_to_hstate(unsigned long size);
|
|
|
|
#ifndef HUGE_MAX_HSTATE
|
|
#define HUGE_MAX_HSTATE 1
|
|
#endif
|
|
|
|
extern struct hstate hstates[HUGE_MAX_HSTATE];
|
|
extern unsigned int default_hstate_idx;
|
|
|
|
#define default_hstate (hstates[default_hstate_idx])
|
|
|
|
static inline struct hstate *hstate_file(struct file *f)
|
|
{
|
|
return hstate_inode(file_inode(f));
|
|
}
|
|
|
|
static inline struct hstate *hstate_sizelog(int page_size_log)
|
|
{
|
|
if (!page_size_log)
|
|
return &default_hstate;
|
|
|
|
return size_to_hstate(1UL << page_size_log);
|
|
}
|
|
|
|
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
|
|
{
|
|
return hstate_file(vma->vm_file);
|
|
}
|
|
|
|
static inline unsigned long huge_page_size(struct hstate *h)
|
|
{
|
|
return (unsigned long)PAGE_SIZE << h->order;
|
|
}
|
|
|
|
extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
|
|
|
|
extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
|
|
|
|
static inline unsigned long huge_page_mask(struct hstate *h)
|
|
{
|
|
return h->mask;
|
|
}
|
|
|
|
static inline unsigned int huge_page_order(struct hstate *h)
|
|
{
|
|
return h->order;
|
|
}
|
|
|
|
static inline unsigned huge_page_shift(struct hstate *h)
|
|
{
|
|
return h->order + PAGE_SHIFT;
|
|
}
|
|
|
|
static inline bool hstate_is_gigantic(struct hstate *h)
|
|
{
|
|
return huge_page_order(h) >= MAX_ORDER;
|
|
}
|
|
|
|
static inline unsigned int pages_per_huge_page(struct hstate *h)
|
|
{
|
|
return 1 << h->order;
|
|
}
|
|
|
|
static inline unsigned int blocks_per_huge_page(struct hstate *h)
|
|
{
|
|
return huge_page_size(h) / 512;
|
|
}
|
|
|
|
#include <asm/hugetlb.h>
|
|
|
|
#ifndef is_hugepage_only_range
|
|
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|
unsigned long addr, unsigned long len)
|
|
{
|
|
return 0;
|
|
}
|
|
#define is_hugepage_only_range is_hugepage_only_range
|
|
#endif
|
|
|
|
#ifndef arch_clear_hugepage_flags
|
|
static inline void arch_clear_hugepage_flags(struct page *page) { }
|
|
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
|
|
#endif
|
|
|
|
#ifndef arch_make_huge_pte
|
|
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
|
|
struct page *page, int writable)
|
|
{
|
|
return entry;
|
|
}
|
|
#endif
|
|
|
|
static inline struct hstate *page_hstate(struct page *page)
|
|
{
|
|
VM_BUG_ON_PAGE(!PageHuge(page), page);
|
|
return size_to_hstate(page_size(page));
|
|
}
|
|
|
|
static inline unsigned hstate_index_to_shift(unsigned index)
|
|
{
|
|
return hstates[index].order + PAGE_SHIFT;
|
|
}
|
|
|
|
static inline int hstate_index(struct hstate *h)
|
|
{
|
|
return h - hstates;
|
|
}
|
|
|
|
extern int dissolve_free_huge_page(struct page *page);
|
|
extern int dissolve_free_huge_pages(unsigned long start_pfn,
|
|
unsigned long end_pfn);
|
|
|
|
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
|
#ifndef arch_hugetlb_migration_supported
|
|
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
|
|
{
|
|
if ((huge_page_shift(h) == PMD_SHIFT) ||
|
|
(huge_page_shift(h) == PUD_SHIFT) ||
|
|
(huge_page_shift(h) == PGDIR_SHIFT))
|
|
return true;
|
|
else
|
|
return false;
|
|
}
|
|
#endif
|
|
#else
|
|
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
|
|
{
|
|
return false;
|
|
}
|
|
#endif
|
|
|
|
static inline bool hugepage_migration_supported(struct hstate *h)
|
|
{
|
|
return arch_hugetlb_migration_supported(h);
|
|
}
|
|
|
|
/*
|
|
* Movability check is different as compared to migration check.
|
|
* It determines whether or not a huge page should be placed on
|
|
* movable zone or not. Movability of any huge page should be
|
|
* required only if huge page size is supported for migration.
|
|
* There wont be any reason for the huge page to be movable if
|
|
* it is not migratable to start with. Also the size of the huge
|
|
* page should be large enough to be placed under a movable zone
|
|
* and still feasible enough to be migratable. Just the presence
|
|
* in movable zone does not make the migration feasible.
|
|
*
|
|
* So even though large huge page sizes like the gigantic ones
|
|
* are migratable they should not be movable because its not
|
|
* feasible to migrate them from movable zone.
|
|
*/
|
|
static inline bool hugepage_movable_supported(struct hstate *h)
|
|
{
|
|
if (!hugepage_migration_supported(h))
|
|
return false;
|
|
|
|
if (hstate_is_gigantic(h))
|
|
return false;
|
|
return true;
|
|
}
|
|
|
|
/* Movability of hugepages depends on migration support. */
|
|
static inline gfp_t htlb_alloc_mask(struct hstate *h)
|
|
{
|
|
if (hugepage_movable_supported(h))
|
|
return GFP_HIGHUSER_MOVABLE;
|
|
else
|
|
return GFP_HIGHUSER;
|
|
}
|
|
|
|
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
|
|
{
|
|
gfp_t modified_mask = htlb_alloc_mask(h);
|
|
|
|
/* Some callers might want to enforce node */
|
|
modified_mask |= (gfp_mask & __GFP_THISNODE);
|
|
|
|
modified_mask |= (gfp_mask & __GFP_NOWARN);
|
|
|
|
return modified_mask;
|
|
}
|
|
|
|
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
|
struct mm_struct *mm, pte_t *pte)
|
|
{
|
|
if (huge_page_size(h) == PMD_SIZE)
|
|
return pmd_lockptr(mm, (pmd_t *) pte);
|
|
VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
|
|
return &mm->page_table_lock;
|
|
}
|
|
|
|
#ifndef hugepages_supported
|
|
/*
|
|
* Some platform decide whether they support huge pages at boot
|
|
* time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
|
|
* when there is no such support
|
|
*/
|
|
#define hugepages_supported() (HPAGE_SHIFT != 0)
|
|
#endif
|
|
|
|
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
|
|
|
|
static inline void hugetlb_count_init(struct mm_struct *mm)
|
|
{
|
|
atomic_long_set(&mm->hugetlb_usage, 0);
|
|
}
|
|
|
|
static inline void hugetlb_count_add(long l, struct mm_struct *mm)
|
|
{
|
|
atomic_long_add(l, &mm->hugetlb_usage);
|
|
}
|
|
|
|
static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
|
|
{
|
|
atomic_long_sub(l, &mm->hugetlb_usage);
|
|
}
|
|
|
|
#ifndef set_huge_swap_pte_at
|
|
static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte, unsigned long sz)
|
|
{
|
|
set_huge_pte_at(mm, addr, ptep, pte);
|
|
}
|
|
#endif
|
|
|
|
#ifndef huge_ptep_modify_prot_start
|
|
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
|
|
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep)
|
|
{
|
|
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
|
|
}
|
|
#endif
|
|
|
|
#ifndef huge_ptep_modify_prot_commit
|
|
#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
|
|
static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep,
|
|
pte_t old_pte, pte_t pte)
|
|
{
|
|
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
|
|
}
|
|
#endif
|
|
|
|
void set_page_huge_active(struct page *page);
|
|
|
|
#else /* CONFIG_HUGETLB_PAGE */
|
|
struct hstate {};
|
|
|
|
static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
|
|
unsigned long addr,
|
|
int avoid_reserve)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct page *
|
|
alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
|
|
nodemask_t *nmask, gfp_t gfp_mask)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct page *alloc_huge_page_vma(struct hstate *h,
|
|
struct vm_area_struct *vma,
|
|
unsigned long address)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline int __alloc_bootmem_huge_page(struct hstate *h)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline struct hstate *hstate_file(struct file *f)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct hstate *hstate_sizelog(int page_size_log)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct hstate *page_hstate(struct page *page)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline unsigned long huge_page_size(struct hstate *h)
|
|
{
|
|
return PAGE_SIZE;
|
|
}
|
|
|
|
static inline unsigned long huge_page_mask(struct hstate *h)
|
|
{
|
|
return PAGE_MASK;
|
|
}
|
|
|
|
static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
|
|
{
|
|
return PAGE_SIZE;
|
|
}
|
|
|
|
static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
|
|
{
|
|
return PAGE_SIZE;
|
|
}
|
|
|
|
static inline unsigned int huge_page_order(struct hstate *h)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline unsigned int huge_page_shift(struct hstate *h)
|
|
{
|
|
return PAGE_SHIFT;
|
|
}
|
|
|
|
static inline bool hstate_is_gigantic(struct hstate *h)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline unsigned int pages_per_huge_page(struct hstate *h)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline unsigned hstate_index_to_shift(unsigned index)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int hstate_index(struct hstate *h)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int dissolve_free_huge_page(struct page *page)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int dissolve_free_huge_pages(unsigned long start_pfn,
|
|
unsigned long end_pfn)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline bool hugepage_migration_supported(struct hstate *h)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool hugepage_movable_supported(struct hstate *h)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline gfp_t htlb_alloc_mask(struct hstate *h)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
|
|
struct mm_struct *mm, pte_t *pte)
|
|
{
|
|
return &mm->page_table_lock;
|
|
}
|
|
|
|
static inline void hugetlb_count_init(struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
|
|
{
|
|
}
|
|
|
|
static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
|
|
{
|
|
}
|
|
|
|
static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte, unsigned long sz)
|
|
{
|
|
}
|
|
#endif /* CONFIG_HUGETLB_PAGE */
|
|
|
|
static inline spinlock_t *huge_pte_lock(struct hstate *h,
|
|
struct mm_struct *mm, pte_t *pte)
|
|
{
|
|
spinlock_t *ptl;
|
|
|
|
ptl = huge_pte_lockptr(h, mm, pte);
|
|
spin_lock(ptl);
|
|
return ptl;
|
|
}
|
|
|
|
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
|
|
extern void __init hugetlb_cma_reserve(int order);
|
|
extern void __init hugetlb_cma_check(void);
|
|
#else
|
|
static inline __init void hugetlb_cma_reserve(int order)
|
|
{
|
|
}
|
|
static inline __init void hugetlb_cma_check(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
|
|
|
|
#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
|
|
/*
|
|
* ARCHes with special requirements for evicting HUGETLB backing TLB entries can
|
|
* implement this.
|
|
*/
|
|
#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
|
|
#endif
|
|
|
|
#endif /* _LINUX_HUGETLB_H */
|