12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037 |
- // SPDX-License-Identifier: GPL-2.0-only
- /* Common code for 32 and 64-bit NUMA */
- #include <linux/acpi.h>
- #include <linux/kernel.h>
- #include <linux/mm.h>
- #include <linux/string.h>
- #include <linux/init.h>
- #include <linux/memblock.h>
- #include <linux/mmzone.h>
- #include <linux/ctype.h>
- #include <linux/nodemask.h>
- #include <linux/sched.h>
- #include <linux/topology.h>
- #include <linux/sort.h>
- #include <asm/e820/api.h>
- #include <asm/proto.h>
- #include <asm/dma.h>
- #include <asm/amd_nb.h>
- #include "numa_internal.h"
- int numa_off;
- nodemask_t numa_nodes_parsed __initdata;
- struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
- EXPORT_SYMBOL(node_data);
- static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
- static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
- static int numa_distance_cnt;
- static u8 *numa_distance;
- static __init int numa_setup(char *opt)
- {
- if (!opt)
- return -EINVAL;
- if (!strncmp(opt, "off", 3))
- numa_off = 1;
- if (!strncmp(opt, "fake=", 5))
- return numa_emu_cmdline(opt + 5);
- if (!strncmp(opt, "noacpi", 6))
- disable_srat();
- if (!strncmp(opt, "nohmat", 6))
- disable_hmat();
- return 0;
- }
- early_param("numa", numa_setup);
- /*
- * apicid, cpu, node mappings
- */
- s16 __apicid_to_node[MAX_LOCAL_APIC] = {
- [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
- };
- int numa_cpu_node(int cpu)
- {
- int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
- if (apicid != BAD_APICID)
- return __apicid_to_node[apicid];
- return NUMA_NO_NODE;
- }
- cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
- EXPORT_SYMBOL(node_to_cpumask_map);
- /*
- * Map cpu index to node index
- */
- DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
- EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
- void numa_set_node(int cpu, int node)
- {
- int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
- /* early setting, no percpu area yet */
- if (cpu_to_node_map) {
- cpu_to_node_map[cpu] = node;
- return;
- }
- #ifdef CONFIG_DEBUG_PER_CPU_MAPS
- if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
- printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
- dump_stack();
- return;
- }
- #endif
- per_cpu(x86_cpu_to_node_map, cpu) = node;
- set_cpu_numa_node(cpu, node);
- }
- void numa_clear_node(int cpu)
- {
- numa_set_node(cpu, NUMA_NO_NODE);
- }
- /*
- * Allocate node_to_cpumask_map based on number of available nodes
- * Requires node_possible_map to be valid.
- *
- * Note: cpumask_of_node() is not valid until after this is done.
- * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
- */
- void __init setup_node_to_cpumask_map(void)
- {
- unsigned int node;
- /* setup nr_node_ids if not done yet */
- if (nr_node_ids == MAX_NUMNODES)
- setup_nr_node_ids();
- /* allocate the map */
- for (node = 0; node < nr_node_ids; node++)
- alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
- /* cpumask_of_node() will now work */
- pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
- }
- static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
- struct numa_meminfo *mi)
- {
- /* ignore zero length blks */
- if (start == end)
- return 0;
- /* whine about and ignore invalid blks */
- if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
- pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
- nid, start, end - 1);
- return 0;
- }
- if (mi->nr_blks >= NR_NODE_MEMBLKS) {
- pr_err("too many memblk ranges\n");
- return -EINVAL;
- }
- mi->blk[mi->nr_blks].start = start;
- mi->blk[mi->nr_blks].end = end;
- mi->blk[mi->nr_blks].nid = nid;
- mi->nr_blks++;
- return 0;
- }
- /**
- * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
- * @idx: Index of memblk to remove
- * @mi: numa_meminfo to remove memblk from
- *
- * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
- * decrementing @mi->nr_blks.
- */
- void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
- {
- mi->nr_blks--;
- memmove(&mi->blk[idx], &mi->blk[idx + 1],
- (mi->nr_blks - idx) * sizeof(mi->blk[0]));
- }
- /**
- * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
- * @dst: numa_meminfo to append block to
- * @idx: Index of memblk to remove
- * @src: numa_meminfo to remove memblk from
- */
- static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
- struct numa_meminfo *src)
- {
- dst->blk[dst->nr_blks++] = src->blk[idx];
- numa_remove_memblk_from(idx, src);
- }
- /**
- * numa_add_memblk - Add one numa_memblk to numa_meminfo
- * @nid: NUMA node ID of the new memblk
- * @start: Start address of the new memblk
- * @end: End address of the new memblk
- *
- * Add a new memblk to the default numa_meminfo.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
- int __init numa_add_memblk(int nid, u64 start, u64 end)
- {
- return numa_add_memblk_to(nid, start, end, &numa_meminfo);
- }
- /* Allocate NODE_DATA for a node on the local memory */
- static void __init alloc_node_data(int nid)
- {
- const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
- u64 nd_pa;
- void *nd;
- int tnid;
- /*
- * Allocate node data. Try node-local memory and then any node.
- * Never allocate in DMA zone.
- */
- nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
- if (!nd_pa) {
- pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
- nd_size, nid);
- return;
- }
- nd = __va(nd_pa);
- /* report and initialize */
- printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
- nd_pa, nd_pa + nd_size - 1);
- tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
- if (tnid != nid)
- printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
- node_data[nid] = nd;
- memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
- node_set_online(nid);
- }
- /**
- * numa_cleanup_meminfo - Cleanup a numa_meminfo
- * @mi: numa_meminfo to clean up
- *
- * Sanitize @mi by merging and removing unnecessary memblks. Also check for
- * conflicts and clear unused memblks.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
- int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
- {
- const u64 low = 0;
- const u64 high = PFN_PHYS(max_pfn);
- int i, j, k;
- /* first, trim all entries */
- for (i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *bi = &mi->blk[i];
- /* move / save reserved memory ranges */
- if (!memblock_overlaps_region(&memblock.memory,
- bi->start, bi->end - bi->start)) {
- numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
- continue;
- }
- /* make sure all non-reserved blocks are inside the limits */
- bi->start = max(bi->start, low);
- /* preserve info for non-RAM areas above 'max_pfn': */
- if (bi->end > high) {
- numa_add_memblk_to(bi->nid, high, bi->end,
- &numa_reserved_meminfo);
- bi->end = high;
- }
- /* and there's no empty block */
- if (bi->start >= bi->end)
- numa_remove_memblk_from(i--, mi);
- }
- /* merge neighboring / overlapping entries */
- for (i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *bi = &mi->blk[i];
- for (j = i + 1; j < mi->nr_blks; j++) {
- struct numa_memblk *bj = &mi->blk[j];
- u64 start, end;
- /*
- * See whether there are overlapping blocks. Whine
- * about but allow overlaps of the same nid. They
- * will be merged below.
- */
- if (bi->end > bj->start && bi->start < bj->end) {
- if (bi->nid != bj->nid) {
- pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
- bi->nid, bi->start, bi->end - 1,
- bj->nid, bj->start, bj->end - 1);
- return -EINVAL;
- }
- pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
- bi->nid, bi->start, bi->end - 1,
- bj->start, bj->end - 1);
- }
- /*
- * Join together blocks on the same node, holes
- * between which don't overlap with memory on other
- * nodes.
- */
- if (bi->nid != bj->nid)
- continue;
- start = min(bi->start, bj->start);
- end = max(bi->end, bj->end);
- for (k = 0; k < mi->nr_blks; k++) {
- struct numa_memblk *bk = &mi->blk[k];
- if (bi->nid == bk->nid)
- continue;
- if (start < bk->end && end > bk->start)
- break;
- }
- if (k < mi->nr_blks)
- continue;
- printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
- bi->nid, bi->start, bi->end - 1, bj->start,
- bj->end - 1, start, end - 1);
- bi->start = start;
- bi->end = end;
- numa_remove_memblk_from(j--, mi);
- }
- }
- /* clear unused ones */
- for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
- mi->blk[i].start = mi->blk[i].end = 0;
- mi->blk[i].nid = NUMA_NO_NODE;
- }
- return 0;
- }
- /*
- * Set nodes, which have memory in @mi, in *@nodemask.
- */
- static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
- const struct numa_meminfo *mi)
- {
- int i;
- for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
- if (mi->blk[i].start != mi->blk[i].end &&
- mi->blk[i].nid != NUMA_NO_NODE)
- node_set(mi->blk[i].nid, *nodemask);
- }
- /**
- * numa_reset_distance - Reset NUMA distance table
- *
- * The current table is freed. The next numa_set_distance() call will
- * create a new one.
- */
- void __init numa_reset_distance(void)
- {
- size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
- /* numa_distance could be 1LU marking allocation failure, test cnt */
- if (numa_distance_cnt)
- memblock_free(numa_distance, size);
- numa_distance_cnt = 0;
- numa_distance = NULL; /* enable table creation */
- }
- static int __init numa_alloc_distance(void)
- {
- nodemask_t nodes_parsed;
- size_t size;
- int i, j, cnt = 0;
- u64 phys;
- /* size the new table and allocate it */
- nodes_parsed = numa_nodes_parsed;
- numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
- for_each_node_mask(i, nodes_parsed)
- cnt = i;
- cnt++;
- size = cnt * cnt * sizeof(numa_distance[0]);
- phys = memblock_phys_alloc_range(size, PAGE_SIZE, 0,
- PFN_PHYS(max_pfn_mapped));
- if (!phys) {
- pr_warn("Warning: can't allocate distance table!\n");
- /* don't retry until explicitly reset */
- numa_distance = (void *)1LU;
- return -ENOMEM;
- }
- numa_distance = __va(phys);
- numa_distance_cnt = cnt;
- /* fill with the default distances */
- for (i = 0; i < cnt; i++)
- for (j = 0; j < cnt; j++)
- numa_distance[i * cnt + j] = i == j ?
- LOCAL_DISTANCE : REMOTE_DISTANCE;
- printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
- return 0;
- }
- /**
- * numa_set_distance - Set NUMA distance from one NUMA to another
- * @from: the 'from' node to set distance
- * @to: the 'to' node to set distance
- * @distance: NUMA distance
- *
- * Set the distance from node @from to @to to @distance. If distance table
- * doesn't exist, one which is large enough to accommodate all the currently
- * known nodes will be created.
- *
- * If such table cannot be allocated, a warning is printed and further
- * calls are ignored until the distance table is reset with
- * numa_reset_distance().
- *
- * If @from or @to is higher than the highest known node or lower than zero
- * at the time of table creation or @distance doesn't make sense, the call
- * is ignored.
- * This is to allow simplification of specific NUMA config implementations.
- */
- void __init numa_set_distance(int from, int to, int distance)
- {
- if (!numa_distance && numa_alloc_distance() < 0)
- return;
- if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
- from < 0 || to < 0) {
- pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
- if ((u8)distance != distance ||
- (from == to && distance != LOCAL_DISTANCE)) {
- pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
- from, to, distance);
- return;
- }
- numa_distance[from * numa_distance_cnt + to] = distance;
- }
- int __node_distance(int from, int to)
- {
- if (from >= numa_distance_cnt || to >= numa_distance_cnt)
- return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
- return numa_distance[from * numa_distance_cnt + to];
- }
- EXPORT_SYMBOL(__node_distance);
- /*
- * Sanity check to catch more bad NUMA configurations (they are amazingly
- * common). Make sure the nodes cover all memory.
- */
- static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
- {
- u64 numaram, e820ram;
- int i;
- numaram = 0;
- for (i = 0; i < mi->nr_blks; i++) {
- u64 s = mi->blk[i].start >> PAGE_SHIFT;
- u64 e = mi->blk[i].end >> PAGE_SHIFT;
- numaram += e - s;
- numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
- if ((s64)numaram < 0)
- numaram = 0;
- }
- e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
- /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
- if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
- printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
- (numaram << PAGE_SHIFT) >> 20,
- (e820ram << PAGE_SHIFT) >> 20);
- return false;
- }
- return true;
- }
- /*
- * Mark all currently memblock-reserved physical memory (which covers the
- * kernel's own memory ranges) as hot-unswappable.
- */
- static void __init numa_clear_kernel_node_hotplug(void)
- {
- nodemask_t reserved_nodemask = NODE_MASK_NONE;
- struct memblock_region *mb_region;
- int i;
- /*
- * We have to do some preprocessing of memblock regions, to
- * make them suitable for reservation.
- *
- * At this time, all memory regions reserved by memblock are
- * used by the kernel, but those regions are not split up
- * along node boundaries yet, and don't necessarily have their
- * node ID set yet either.
- *
- * So iterate over all memory known to the x86 architecture,
- * and use those ranges to set the nid in memblock.reserved.
- * This will split up the memblock regions along node
- * boundaries and will set the node IDs as well.
- */
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- struct numa_memblk *mb = numa_meminfo.blk + i;
- int ret;
- ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
- WARN_ON_ONCE(ret);
- }
- /*
- * Now go over all reserved memblock regions, to construct a
- * node mask of all kernel reserved memory areas.
- *
- * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
- * numa_meminfo might not include all memblock.reserved
- * memory ranges, because quirks such as trim_snb_memory()
- * reserve specific pages for Sandy Bridge graphics. ]
- */
- for_each_reserved_mem_region(mb_region) {
- int nid = memblock_get_region_node(mb_region);
- if (nid != MAX_NUMNODES)
- node_set(nid, reserved_nodemask);
- }
- /*
- * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
- * belonging to the reserved node mask.
- *
- * Note that this will include memory regions that reside
- * on nodes that contain kernel memory - entire nodes
- * become hot-unpluggable:
- */
- for (i = 0; i < numa_meminfo.nr_blks; i++) {
- struct numa_memblk *mb = numa_meminfo.blk + i;
- if (!node_isset(mb->nid, reserved_nodemask))
- continue;
- memblock_clear_hotplug(mb->start, mb->end - mb->start);
- }
- }
- static int __init numa_register_memblks(struct numa_meminfo *mi)
- {
- int i, nid;
- /* Account for nodes with cpus and no memory */
- node_possible_map = numa_nodes_parsed;
- numa_nodemask_from_meminfo(&node_possible_map, mi);
- if (WARN_ON(nodes_empty(node_possible_map)))
- return -EINVAL;
- for (i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *mb = &mi->blk[i];
- memblock_set_node(mb->start, mb->end - mb->start,
- &memblock.memory, mb->nid);
- }
- /*
- * At very early time, the kernel have to use some memory such as
- * loading the kernel image. We cannot prevent this anyway. So any
- * node the kernel resides in should be un-hotpluggable.
- *
- * And when we come here, alloc node data won't fail.
- */
- numa_clear_kernel_node_hotplug();
- /*
- * If sections array is gonna be used for pfn -> nid mapping, check
- * whether its granularity is fine enough.
- */
- if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
- unsigned long pfn_align = node_map_pfn_alignment();
- if (pfn_align && pfn_align < PAGES_PER_SECTION) {
- pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
- PFN_PHYS(pfn_align) >> 20,
- PFN_PHYS(PAGES_PER_SECTION) >> 20);
- return -EINVAL;
- }
- }
- if (!numa_meminfo_cover_memory(mi))
- return -EINVAL;
- /* Finally register nodes. */
- for_each_node_mask(nid, node_possible_map) {
- u64 start = PFN_PHYS(max_pfn);
- u64 end = 0;
- for (i = 0; i < mi->nr_blks; i++) {
- if (nid != mi->blk[i].nid)
- continue;
- start = min(mi->blk[i].start, start);
- end = max(mi->blk[i].end, end);
- }
- if (start >= end)
- continue;
- alloc_node_data(nid);
- }
- /* Dump memblock with node info and return. */
- memblock_dump_all();
- return 0;
- }
- /*
- * There are unfortunately some poorly designed mainboards around that
- * only connect memory to a single CPU. This breaks the 1:1 cpu->node
- * mapping. To avoid this fill in the mapping for all possible CPUs,
- * as the number of CPUs is not known yet. We round robin the existing
- * nodes.
- */
- static void __init numa_init_array(void)
- {
- int rr, i;
- rr = first_node(node_online_map);
- for (i = 0; i < nr_cpu_ids; i++) {
- if (early_cpu_to_node(i) != NUMA_NO_NODE)
- continue;
- numa_set_node(i, rr);
- rr = next_node_in(rr, node_online_map);
- }
- }
- static int __init numa_init(int (*init_func)(void))
- {
- int i;
- int ret;
- for (i = 0; i < MAX_LOCAL_APIC; i++)
- set_apicid_to_node(i, NUMA_NO_NODE);
- nodes_clear(numa_nodes_parsed);
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
- memset(&numa_meminfo, 0, sizeof(numa_meminfo));
- WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
- MAX_NUMNODES));
- WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
- MAX_NUMNODES));
- /* In case that parsing SRAT failed. */
- WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
- numa_reset_distance();
- ret = init_func();
- if (ret < 0)
- return ret;
- /*
- * We reset memblock back to the top-down direction
- * here because if we configured ACPI_NUMA, we have
- * parsed SRAT in init_func(). It is ok to have the
- * reset here even if we did't configure ACPI_NUMA
- * or acpi numa init fails and fallbacks to dummy
- * numa init.
- */
- memblock_set_bottom_up(false);
- ret = numa_cleanup_meminfo(&numa_meminfo);
- if (ret < 0)
- return ret;
- numa_emulation(&numa_meminfo, numa_distance_cnt);
- ret = numa_register_memblks(&numa_meminfo);
- if (ret < 0)
- return ret;
- for (i = 0; i < nr_cpu_ids; i++) {
- int nid = early_cpu_to_node(i);
- if (nid == NUMA_NO_NODE)
- continue;
- if (!node_online(nid))
- numa_clear_node(i);
- }
- numa_init_array();
- return 0;
- }
- /**
- * dummy_numa_init - Fallback dummy NUMA init
- *
- * Used if there's no underlying NUMA architecture, NUMA initialization
- * fails, or NUMA is disabled on the command line.
- *
- * Must online at least one node and add memory blocks that cover all
- * allowed memory. This function must not fail.
- */
- static int __init dummy_numa_init(void)
- {
- printk(KERN_INFO "%s\n",
- numa_off ? "NUMA turned off" : "No NUMA configuration found");
- printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
- 0LLU, PFN_PHYS(max_pfn) - 1);
- node_set(0, numa_nodes_parsed);
- numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
- return 0;
- }
- /**
- * x86_numa_init - Initialize NUMA
- *
- * Try each configured NUMA initialization method until one succeeds. The
- * last fallback is dummy single node config encompassing whole memory and
- * never fails.
- */
- void __init x86_numa_init(void)
- {
- if (!numa_off) {
- #ifdef CONFIG_ACPI_NUMA
- if (!numa_init(x86_acpi_numa_init))
- return;
- #endif
- #ifdef CONFIG_AMD_NUMA
- if (!numa_init(amd_numa_init))
- return;
- #endif
- }
- numa_init(dummy_numa_init);
- }
- /*
- * A node may exist which has one or more Generic Initiators but no CPUs and no
- * memory.
- *
- * This function must be called after init_cpu_to_node(), to ensure that any
- * memoryless CPU nodes have already been brought online, and before the
- * node_data[nid] is needed for zone list setup in build_all_zonelists().
- *
- * When this function is called, any nodes containing either memory and/or CPUs
- * will already be online and there is no need to do anything extra, even if
- * they also contain one or more Generic Initiators.
- */
- void __init init_gi_nodes(void)
- {
- int nid;
- /*
- * Exclude this node from
- * bringup_nonboot_cpus
- * cpu_up
- * __try_online_node
- * register_one_node
- * because node_subsys is not initialized yet.
- * TODO remove dependency on node_online
- */
- for_each_node_state(nid, N_GENERIC_INITIATOR)
- if (!node_online(nid))
- node_set_online(nid);
- }
- /*
- * Setup early cpu_to_node.
- *
- * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
- * and apicid_to_node[] tables have valid entries for a CPU.
- * This means we skip cpu_to_node[] initialisation for NUMA
- * emulation and faking node case (when running a kernel compiled
- * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
- * is already initialized in a round robin manner at numa_init_array,
- * prior to this call, and this initialization is good enough
- * for the fake NUMA cases.
- *
- * Called before the per_cpu areas are setup.
- */
- void __init init_cpu_to_node(void)
- {
- int cpu;
- u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
- BUG_ON(cpu_to_apicid == NULL);
- for_each_possible_cpu(cpu) {
- int node = numa_cpu_node(cpu);
- if (node == NUMA_NO_NODE)
- continue;
- /*
- * Exclude this node from
- * bringup_nonboot_cpus
- * cpu_up
- * __try_online_node
- * register_one_node
- * because node_subsys is not initialized yet.
- * TODO remove dependency on node_online
- */
- if (!node_online(node))
- node_set_online(node);
- numa_set_node(cpu, node);
- }
- }
- #ifndef CONFIG_DEBUG_PER_CPU_MAPS
- # ifndef CONFIG_NUMA_EMU
- void numa_add_cpu(int cpu)
- {
- cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
- }
- void numa_remove_cpu(int cpu)
- {
- cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
- }
- # endif /* !CONFIG_NUMA_EMU */
- #else /* !CONFIG_DEBUG_PER_CPU_MAPS */
- int __cpu_to_node(int cpu)
- {
- if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
- printk(KERN_WARNING
- "cpu_to_node(%d): usage too early!\n", cpu);
- dump_stack();
- return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
- }
- return per_cpu(x86_cpu_to_node_map, cpu);
- }
- EXPORT_SYMBOL(__cpu_to_node);
- /*
- * Same function as cpu_to_node() but used if called before the
- * per_cpu areas are setup.
- */
- int early_cpu_to_node(int cpu)
- {
- if (early_per_cpu_ptr(x86_cpu_to_node_map))
- return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
- if (!cpu_possible(cpu)) {
- printk(KERN_WARNING
- "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
- dump_stack();
- return NUMA_NO_NODE;
- }
- return per_cpu(x86_cpu_to_node_map, cpu);
- }
- void debug_cpumask_set_cpu(int cpu, int node, bool enable)
- {
- struct cpumask *mask;
- if (node == NUMA_NO_NODE) {
- /* early_cpu_to_node() already emits a warning and trace */
- return;
- }
- mask = node_to_cpumask_map[node];
- if (!cpumask_available(mask)) {
- pr_err("node_to_cpumask_map[%i] NULL\n", node);
- dump_stack();
- return;
- }
- if (enable)
- cpumask_set_cpu(cpu, mask);
- else
- cpumask_clear_cpu(cpu, mask);
- printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
- enable ? "numa_add_cpu" : "numa_remove_cpu",
- cpu, node, cpumask_pr_args(mask));
- return;
- }
- # ifndef CONFIG_NUMA_EMU
- static void numa_set_cpumask(int cpu, bool enable)
- {
- debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
- }
- void numa_add_cpu(int cpu)
- {
- numa_set_cpumask(cpu, true);
- }
- void numa_remove_cpu(int cpu)
- {
- numa_set_cpumask(cpu, false);
- }
- # endif /* !CONFIG_NUMA_EMU */
- /*
- * Returns a pointer to the bitmask of CPUs on Node 'node'.
- */
- const struct cpumask *cpumask_of_node(int node)
- {
- if ((unsigned)node >= nr_node_ids) {
- printk(KERN_WARNING
- "cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
- node, nr_node_ids);
- dump_stack();
- return cpu_none_mask;
- }
- if (!cpumask_available(node_to_cpumask_map[node])) {
- printk(KERN_WARNING
- "cpumask_of_node(%d): no node_to_cpumask_map!\n",
- node);
- dump_stack();
- return cpu_online_mask;
- }
- return node_to_cpumask_map[node];
- }
- EXPORT_SYMBOL(cpumask_of_node);
- #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
- #ifdef CONFIG_NUMA_KEEP_MEMINFO
- static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
- {
- int i;
- for (i = 0; i < mi->nr_blks; i++)
- if (mi->blk[i].start <= start && mi->blk[i].end > start)
- return mi->blk[i].nid;
- return NUMA_NO_NODE;
- }
- int phys_to_target_node(phys_addr_t start)
- {
- int nid = meminfo_to_nid(&numa_meminfo, start);
- /*
- * Prefer online nodes, but if reserved memory might be
- * hot-added continue the search with reserved ranges.
- */
- if (nid != NUMA_NO_NODE)
- return nid;
- return meminfo_to_nid(&numa_reserved_meminfo, start);
- }
- EXPORT_SYMBOL_GPL(phys_to_target_node);
- int memory_add_physaddr_to_nid(u64 start)
- {
- int nid = meminfo_to_nid(&numa_meminfo, start);
- if (nid == NUMA_NO_NODE)
- nid = numa_meminfo.blk[0].nid;
- return nid;
- }
- EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
- static int __init cmp_memblk(const void *a, const void *b)
- {
- const struct numa_memblk *ma = *(const struct numa_memblk **)a;
- const struct numa_memblk *mb = *(const struct numa_memblk **)b;
- return ma->start - mb->start;
- }
- static struct numa_memblk *numa_memblk_list[NR_NODE_MEMBLKS] __initdata;
- /**
- * numa_fill_memblks - Fill gaps in numa_meminfo memblks
- * @start: address to begin fill
- * @end: address to end fill
- *
- * Find and extend numa_meminfo memblks to cover the @start-@end
- * physical address range, such that the first memblk includes
- * @start, the last memblk includes @end, and any gaps in between
- * are filled.
- *
- * RETURNS:
- * 0 : Success
- * NUMA_NO_MEMBLK : No memblk exists in @start-@end range
- */
- int __init numa_fill_memblks(u64 start, u64 end)
- {
- struct numa_memblk **blk = &numa_memblk_list[0];
- struct numa_meminfo *mi = &numa_meminfo;
- int count = 0;
- u64 prev_end;
- /*
- * Create a list of pointers to numa_meminfo memblks that
- * overlap start, end. Exclude (start == bi->end) since
- * end addresses in both a CFMWS range and a memblk range
- * are exclusive.
- *
- * This list of pointers is used to make in-place changes
- * that fill out the numa_meminfo memblks.
- */
- for (int i = 0; i < mi->nr_blks; i++) {
- struct numa_memblk *bi = &mi->blk[i];
- if (start < bi->end && end >= bi->start) {
- blk[count] = &mi->blk[i];
- count++;
- }
- }
- if (!count)
- return NUMA_NO_MEMBLK;
- /* Sort the list of pointers in memblk->start order */
- sort(&blk[0], count, sizeof(blk[0]), cmp_memblk, NULL);
- /* Make sure the first/last memblks include start/end */
- blk[0]->start = min(blk[0]->start, start);
- blk[count - 1]->end = max(blk[count - 1]->end, end);
- /*
- * Fill any gaps by tracking the previous memblks
- * end address and backfilling to it if needed.
- */
- prev_end = blk[0]->end;
- for (int i = 1; i < count; i++) {
- struct numa_memblk *curr = blk[i];
- if (prev_end >= curr->start) {
- if (prev_end < curr->end)
- prev_end = curr->end;
- } else {
- curr->start = prev_end;
- prev_end = curr->end;
- }
- }
- return 0;
- }
- #endif
|