resource.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/kernel/resource.c
  4. *
  5. * Copyright (C) 1999 Linus Torvalds
  6. * Copyright (C) 1999 Martin Mares <[email protected]>
  7. *
  8. * Arbitrary resource management.
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/export.h>
  12. #include <linux/errno.h>
  13. #include <linux/ioport.h>
  14. #include <linux/init.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/fs.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/pseudo_fs.h>
  20. #include <linux/sched.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/device.h>
  23. #include <linux/pfn.h>
  24. #include <linux/mm.h>
  25. #include <linux/mount.h>
  26. #include <linux/resource_ext.h>
  27. #include <uapi/linux/magic.h>
  28. #include <asm/io.h>
  29. struct resource ioport_resource = {
  30. .name = "PCI IO",
  31. .start = 0,
  32. .end = IO_SPACE_LIMIT,
  33. .flags = IORESOURCE_IO,
  34. };
  35. EXPORT_SYMBOL(ioport_resource);
  36. struct resource iomem_resource = {
  37. .name = "PCI mem",
  38. .start = 0,
  39. .end = -1,
  40. .flags = IORESOURCE_MEM,
  41. };
  42. EXPORT_SYMBOL(iomem_resource);
  43. /* constraints to be met while allocating resources */
  44. struct resource_constraint {
  45. resource_size_t min, max, align;
  46. resource_size_t (*alignf)(void *, const struct resource *,
  47. resource_size_t, resource_size_t);
  48. void *alignf_data;
  49. };
  50. static DEFINE_RWLOCK(resource_lock);
  51. static struct resource *next_resource(struct resource *p)
  52. {
  53. if (p->child)
  54. return p->child;
  55. while (!p->sibling && p->parent)
  56. p = p->parent;
  57. return p->sibling;
  58. }
  59. static struct resource *next_resource_skip_children(struct resource *p)
  60. {
  61. while (!p->sibling && p->parent)
  62. p = p->parent;
  63. return p->sibling;
  64. }
  65. #define for_each_resource(_root, _p, _skip_children) \
  66. for ((_p) = (_root)->child; (_p); \
  67. (_p) = (_skip_children) ? next_resource_skip_children(_p) : \
  68. next_resource(_p))
  69. static void *r_next(struct seq_file *m, void *v, loff_t *pos)
  70. {
  71. struct resource *p = v;
  72. (*pos)++;
  73. return (void *)next_resource(p);
  74. }
  75. #ifdef CONFIG_PROC_FS
  76. enum { MAX_IORES_LEVEL = 5 };
  77. static void *r_start(struct seq_file *m, loff_t *pos)
  78. __acquires(resource_lock)
  79. {
  80. struct resource *p = pde_data(file_inode(m->file));
  81. loff_t l = 0;
  82. read_lock(&resource_lock);
  83. for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
  84. ;
  85. return p;
  86. }
  87. static void r_stop(struct seq_file *m, void *v)
  88. __releases(resource_lock)
  89. {
  90. read_unlock(&resource_lock);
  91. }
  92. static int r_show(struct seq_file *m, void *v)
  93. {
  94. struct resource *root = pde_data(file_inode(m->file));
  95. struct resource *r = v, *p;
  96. unsigned long long start, end;
  97. int width = root->end < 0x10000 ? 4 : 8;
  98. int depth;
  99. for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
  100. if (p->parent == root)
  101. break;
  102. if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
  103. start = r->start;
  104. end = r->end;
  105. } else {
  106. start = end = 0;
  107. }
  108. seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
  109. depth * 2, "",
  110. width, start,
  111. width, end,
  112. r->name ? r->name : "<BAD>");
  113. return 0;
  114. }
  115. static const struct seq_operations resource_op = {
  116. .start = r_start,
  117. .next = r_next,
  118. .stop = r_stop,
  119. .show = r_show,
  120. };
  121. static int __init ioresources_init(void)
  122. {
  123. proc_create_seq_data("ioports", 0, NULL, &resource_op,
  124. &ioport_resource);
  125. proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
  126. return 0;
  127. }
  128. __initcall(ioresources_init);
  129. #endif /* CONFIG_PROC_FS */
  130. static void free_resource(struct resource *res)
  131. {
  132. /**
  133. * If the resource was allocated using memblock early during boot
  134. * we'll leak it here: we can only return full pages back to the
  135. * buddy and trying to be smart and reusing them eventually in
  136. * alloc_resource() overcomplicates resource handling.
  137. */
  138. if (res && PageSlab(virt_to_head_page(res)))
  139. kfree(res);
  140. }
  141. static struct resource *alloc_resource(gfp_t flags)
  142. {
  143. return kzalloc(sizeof(struct resource), flags);
  144. }
  145. /* Return the conflict entry if you can't request it */
  146. static struct resource * __request_resource(struct resource *root, struct resource *new)
  147. {
  148. resource_size_t start = new->start;
  149. resource_size_t end = new->end;
  150. struct resource *tmp, **p;
  151. if (end < start)
  152. return root;
  153. if (start < root->start)
  154. return root;
  155. if (end > root->end)
  156. return root;
  157. p = &root->child;
  158. for (;;) {
  159. tmp = *p;
  160. if (!tmp || tmp->start > end) {
  161. new->sibling = tmp;
  162. *p = new;
  163. new->parent = root;
  164. return NULL;
  165. }
  166. p = &tmp->sibling;
  167. if (tmp->end < start)
  168. continue;
  169. return tmp;
  170. }
  171. }
  172. static int __release_resource(struct resource *old, bool release_child)
  173. {
  174. struct resource *tmp, **p, *chd;
  175. p = &old->parent->child;
  176. for (;;) {
  177. tmp = *p;
  178. if (!tmp)
  179. break;
  180. if (tmp == old) {
  181. if (release_child || !(tmp->child)) {
  182. *p = tmp->sibling;
  183. } else {
  184. for (chd = tmp->child;; chd = chd->sibling) {
  185. chd->parent = tmp->parent;
  186. if (!(chd->sibling))
  187. break;
  188. }
  189. *p = tmp->child;
  190. chd->sibling = tmp->sibling;
  191. }
  192. old->parent = NULL;
  193. return 0;
  194. }
  195. p = &tmp->sibling;
  196. }
  197. return -EINVAL;
  198. }
  199. static void __release_child_resources(struct resource *r)
  200. {
  201. struct resource *tmp, *p;
  202. resource_size_t size;
  203. p = r->child;
  204. r->child = NULL;
  205. while (p) {
  206. tmp = p;
  207. p = p->sibling;
  208. tmp->parent = NULL;
  209. tmp->sibling = NULL;
  210. __release_child_resources(tmp);
  211. printk(KERN_DEBUG "release child resource %pR\n", tmp);
  212. /* need to restore size, and keep flags */
  213. size = resource_size(tmp);
  214. tmp->start = 0;
  215. tmp->end = size - 1;
  216. }
  217. }
  218. void release_child_resources(struct resource *r)
  219. {
  220. write_lock(&resource_lock);
  221. __release_child_resources(r);
  222. write_unlock(&resource_lock);
  223. }
  224. /**
  225. * request_resource_conflict - request and reserve an I/O or memory resource
  226. * @root: root resource descriptor
  227. * @new: resource descriptor desired by caller
  228. *
  229. * Returns 0 for success, conflict resource on error.
  230. */
  231. struct resource *request_resource_conflict(struct resource *root, struct resource *new)
  232. {
  233. struct resource *conflict;
  234. write_lock(&resource_lock);
  235. conflict = __request_resource(root, new);
  236. write_unlock(&resource_lock);
  237. return conflict;
  238. }
  239. /**
  240. * request_resource - request and reserve an I/O or memory resource
  241. * @root: root resource descriptor
  242. * @new: resource descriptor desired by caller
  243. *
  244. * Returns 0 for success, negative error code on error.
  245. */
  246. int request_resource(struct resource *root, struct resource *new)
  247. {
  248. struct resource *conflict;
  249. conflict = request_resource_conflict(root, new);
  250. return conflict ? -EBUSY : 0;
  251. }
  252. EXPORT_SYMBOL(request_resource);
  253. /**
  254. * release_resource - release a previously reserved resource
  255. * @old: resource pointer
  256. */
  257. int release_resource(struct resource *old)
  258. {
  259. int retval;
  260. write_lock(&resource_lock);
  261. retval = __release_resource(old, true);
  262. write_unlock(&resource_lock);
  263. return retval;
  264. }
  265. EXPORT_SYMBOL(release_resource);
  266. /**
  267. * find_next_iomem_res - Finds the lowest iomem resource that covers part of
  268. * [@start..@end].
  269. *
  270. * If a resource is found, returns 0 and @*res is overwritten with the part
  271. * of the resource that's within [@start..@end]; if none is found, returns
  272. * -ENODEV. Returns -EINVAL for invalid parameters.
  273. *
  274. * @start: start address of the resource searched for
  275. * @end: end address of same resource
  276. * @flags: flags which the resource must have
  277. * @desc: descriptor the resource must have
  278. * @res: return ptr, if resource found
  279. *
  280. * The caller must specify @start, @end, @flags, and @desc
  281. * (which may be IORES_DESC_NONE).
  282. */
  283. static int find_next_iomem_res(resource_size_t start, resource_size_t end,
  284. unsigned long flags, unsigned long desc,
  285. struct resource *res)
  286. {
  287. struct resource *p;
  288. if (!res)
  289. return -EINVAL;
  290. if (start >= end)
  291. return -EINVAL;
  292. read_lock(&resource_lock);
  293. for (p = iomem_resource.child; p; p = next_resource(p)) {
  294. /* If we passed the resource we are looking for, stop */
  295. if (p->start > end) {
  296. p = NULL;
  297. break;
  298. }
  299. /* Skip until we find a range that matches what we look for */
  300. if (p->end < start)
  301. continue;
  302. if ((p->flags & flags) != flags)
  303. continue;
  304. if ((desc != IORES_DESC_NONE) && (desc != p->desc))
  305. continue;
  306. /* Found a match, break */
  307. break;
  308. }
  309. if (p) {
  310. /* copy data */
  311. *res = (struct resource) {
  312. .start = max(start, p->start),
  313. .end = min(end, p->end),
  314. .flags = p->flags,
  315. .desc = p->desc,
  316. .parent = p->parent,
  317. };
  318. }
  319. read_unlock(&resource_lock);
  320. return p ? 0 : -ENODEV;
  321. }
  322. static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
  323. unsigned long flags, unsigned long desc,
  324. void *arg,
  325. int (*func)(struct resource *, void *))
  326. {
  327. struct resource res;
  328. int ret = -EINVAL;
  329. while (start < end &&
  330. !find_next_iomem_res(start, end, flags, desc, &res)) {
  331. ret = (*func)(&res, arg);
  332. if (ret)
  333. break;
  334. start = res.end + 1;
  335. }
  336. return ret;
  337. }
  338. /**
  339. * walk_iomem_res_desc - Walks through iomem resources and calls func()
  340. * with matching resource ranges.
  341. * *
  342. * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
  343. * @flags: I/O resource flags
  344. * @start: start addr
  345. * @end: end addr
  346. * @arg: function argument for the callback @func
  347. * @func: callback function that is called for each qualifying resource area
  348. *
  349. * All the memory ranges which overlap start,end and also match flags and
  350. * desc are valid candidates.
  351. *
  352. * NOTE: For a new descriptor search, define a new IORES_DESC in
  353. * <linux/ioport.h> and set it in 'desc' of a target resource entry.
  354. */
  355. int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
  356. u64 end, void *arg, int (*func)(struct resource *, void *))
  357. {
  358. return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
  359. }
  360. EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
  361. /*
  362. * This function calls the @func callback against all memory ranges of type
  363. * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
  364. * Now, this function is only for System RAM, it deals with full ranges and
  365. * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
  366. * ranges.
  367. */
  368. int walk_system_ram_res(u64 start, u64 end, void *arg,
  369. int (*func)(struct resource *, void *))
  370. {
  371. unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
  372. return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
  373. func);
  374. }
  375. /*
  376. * This function calls the @func callback against all memory ranges, which
  377. * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
  378. */
  379. int walk_mem_res(u64 start, u64 end, void *arg,
  380. int (*func)(struct resource *, void *))
  381. {
  382. unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  383. return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
  384. func);
  385. }
  386. /*
  387. * This function calls the @func callback against all memory ranges of type
  388. * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
  389. * It is to be used only for System RAM.
  390. */
  391. int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
  392. void *arg, int (*func)(unsigned long, unsigned long, void *))
  393. {
  394. resource_size_t start, end;
  395. unsigned long flags;
  396. struct resource res;
  397. unsigned long pfn, end_pfn;
  398. int ret = -EINVAL;
  399. start = (u64) start_pfn << PAGE_SHIFT;
  400. end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
  401. flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
  402. while (start < end &&
  403. !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
  404. pfn = PFN_UP(res.start);
  405. end_pfn = PFN_DOWN(res.end + 1);
  406. if (end_pfn > pfn)
  407. ret = (*func)(pfn, end_pfn - pfn, arg);
  408. if (ret)
  409. break;
  410. start = res.end + 1;
  411. }
  412. return ret;
  413. }
  414. static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
  415. {
  416. return 1;
  417. }
  418. /*
  419. * This generic page_is_ram() returns true if specified address is
  420. * registered as System RAM in iomem_resource list.
  421. */
  422. int __weak page_is_ram(unsigned long pfn)
  423. {
  424. return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
  425. }
  426. EXPORT_SYMBOL_GPL(page_is_ram);
  427. static int __region_intersects(struct resource *parent, resource_size_t start,
  428. size_t size, unsigned long flags,
  429. unsigned long desc)
  430. {
  431. struct resource res;
  432. int type = 0; int other = 0;
  433. struct resource *p;
  434. res.start = start;
  435. res.end = start + size - 1;
  436. for (p = parent->child; p ; p = p->sibling) {
  437. bool is_type = (((p->flags & flags) == flags) &&
  438. ((desc == IORES_DESC_NONE) ||
  439. (desc == p->desc)));
  440. if (resource_overlaps(p, &res))
  441. is_type ? type++ : other++;
  442. }
  443. if (type == 0)
  444. return REGION_DISJOINT;
  445. if (other == 0)
  446. return REGION_INTERSECTS;
  447. return REGION_MIXED;
  448. }
  449. /**
  450. * region_intersects() - determine intersection of region with known resources
  451. * @start: region start address
  452. * @size: size of region
  453. * @flags: flags of resource (in iomem_resource)
  454. * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
  455. *
  456. * Check if the specified region partially overlaps or fully eclipses a
  457. * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
  458. * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
  459. * return REGION_MIXED if the region overlaps @flags/@desc and another
  460. * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
  461. * and no other defined resource. Note that REGION_INTERSECTS is also
  462. * returned in the case when the specified region overlaps RAM and undefined
  463. * memory holes.
  464. *
  465. * region_intersect() is used by memory remapping functions to ensure
  466. * the user is not remapping RAM and is a vast speed up over walking
  467. * through the resource table page by page.
  468. */
  469. int region_intersects(resource_size_t start, size_t size, unsigned long flags,
  470. unsigned long desc)
  471. {
  472. int ret;
  473. read_lock(&resource_lock);
  474. ret = __region_intersects(&iomem_resource, start, size, flags, desc);
  475. read_unlock(&resource_lock);
  476. return ret;
  477. }
  478. EXPORT_SYMBOL_GPL(region_intersects);
  479. void __weak arch_remove_reservations(struct resource *avail)
  480. {
  481. }
  482. static resource_size_t simple_align_resource(void *data,
  483. const struct resource *avail,
  484. resource_size_t size,
  485. resource_size_t align)
  486. {
  487. return avail->start;
  488. }
  489. static void resource_clip(struct resource *res, resource_size_t min,
  490. resource_size_t max)
  491. {
  492. if (res->start < min)
  493. res->start = min;
  494. if (res->end > max)
  495. res->end = max;
  496. }
  497. /*
  498. * Find empty slot in the resource tree with the given range and
  499. * alignment constraints
  500. */
  501. static int __find_resource(struct resource *root, struct resource *old,
  502. struct resource *new,
  503. resource_size_t size,
  504. struct resource_constraint *constraint)
  505. {
  506. struct resource *this = root->child;
  507. struct resource tmp = *new, avail, alloc;
  508. tmp.start = root->start;
  509. /*
  510. * Skip past an allocated resource that starts at 0, since the assignment
  511. * of this->start - 1 to tmp->end below would cause an underflow.
  512. */
  513. if (this && this->start == root->start) {
  514. tmp.start = (this == old) ? old->start : this->end + 1;
  515. this = this->sibling;
  516. }
  517. for(;;) {
  518. if (this)
  519. tmp.end = (this == old) ? this->end : this->start - 1;
  520. else
  521. tmp.end = root->end;
  522. if (tmp.end < tmp.start)
  523. goto next;
  524. resource_clip(&tmp, constraint->min, constraint->max);
  525. arch_remove_reservations(&tmp);
  526. /* Check for overflow after ALIGN() */
  527. avail.start = ALIGN(tmp.start, constraint->align);
  528. avail.end = tmp.end;
  529. avail.flags = new->flags & ~IORESOURCE_UNSET;
  530. if (avail.start >= tmp.start) {
  531. alloc.flags = avail.flags;
  532. alloc.start = constraint->alignf(constraint->alignf_data, &avail,
  533. size, constraint->align);
  534. alloc.end = alloc.start + size - 1;
  535. if (alloc.start <= alloc.end &&
  536. resource_contains(&avail, &alloc)) {
  537. new->start = alloc.start;
  538. new->end = alloc.end;
  539. return 0;
  540. }
  541. }
  542. next: if (!this || this->end == root->end)
  543. break;
  544. if (this != old)
  545. tmp.start = this->end + 1;
  546. this = this->sibling;
  547. }
  548. return -EBUSY;
  549. }
  550. /*
  551. * Find empty slot in the resource tree given range and alignment.
  552. */
  553. static int find_resource(struct resource *root, struct resource *new,
  554. resource_size_t size,
  555. struct resource_constraint *constraint)
  556. {
  557. return __find_resource(root, NULL, new, size, constraint);
  558. }
  559. /**
  560. * reallocate_resource - allocate a slot in the resource tree given range & alignment.
  561. * The resource will be relocated if the new size cannot be reallocated in the
  562. * current location.
  563. *
  564. * @root: root resource descriptor
  565. * @old: resource descriptor desired by caller
  566. * @newsize: new size of the resource descriptor
  567. * @constraint: the size and alignment constraints to be met.
  568. */
  569. static int reallocate_resource(struct resource *root, struct resource *old,
  570. resource_size_t newsize,
  571. struct resource_constraint *constraint)
  572. {
  573. int err=0;
  574. struct resource new = *old;
  575. struct resource *conflict;
  576. write_lock(&resource_lock);
  577. if ((err = __find_resource(root, old, &new, newsize, constraint)))
  578. goto out;
  579. if (resource_contains(&new, old)) {
  580. old->start = new.start;
  581. old->end = new.end;
  582. goto out;
  583. }
  584. if (old->child) {
  585. err = -EBUSY;
  586. goto out;
  587. }
  588. if (resource_contains(old, &new)) {
  589. old->start = new.start;
  590. old->end = new.end;
  591. } else {
  592. __release_resource(old, true);
  593. *old = new;
  594. conflict = __request_resource(root, old);
  595. BUG_ON(conflict);
  596. }
  597. out:
  598. write_unlock(&resource_lock);
  599. return err;
  600. }
  601. /**
  602. * allocate_resource - allocate empty slot in the resource tree given range & alignment.
  603. * The resource will be reallocated with a new size if it was already allocated
  604. * @root: root resource descriptor
  605. * @new: resource descriptor desired by caller
  606. * @size: requested resource region size
  607. * @min: minimum boundary to allocate
  608. * @max: maximum boundary to allocate
  609. * @align: alignment requested, in bytes
  610. * @alignf: alignment function, optional, called if not NULL
  611. * @alignf_data: arbitrary data to pass to the @alignf function
  612. */
  613. int allocate_resource(struct resource *root, struct resource *new,
  614. resource_size_t size, resource_size_t min,
  615. resource_size_t max, resource_size_t align,
  616. resource_size_t (*alignf)(void *,
  617. const struct resource *,
  618. resource_size_t,
  619. resource_size_t),
  620. void *alignf_data)
  621. {
  622. int err;
  623. struct resource_constraint constraint;
  624. if (!alignf)
  625. alignf = simple_align_resource;
  626. constraint.min = min;
  627. constraint.max = max;
  628. constraint.align = align;
  629. constraint.alignf = alignf;
  630. constraint.alignf_data = alignf_data;
  631. if ( new->parent ) {
  632. /* resource is already allocated, try reallocating with
  633. the new constraints */
  634. return reallocate_resource(root, new, size, &constraint);
  635. }
  636. write_lock(&resource_lock);
  637. err = find_resource(root, new, size, &constraint);
  638. if (err >= 0 && __request_resource(root, new))
  639. err = -EBUSY;
  640. write_unlock(&resource_lock);
  641. return err;
  642. }
  643. EXPORT_SYMBOL(allocate_resource);
  644. /**
  645. * lookup_resource - find an existing resource by a resource start address
  646. * @root: root resource descriptor
  647. * @start: resource start address
  648. *
  649. * Returns a pointer to the resource if found, NULL otherwise
  650. */
  651. struct resource *lookup_resource(struct resource *root, resource_size_t start)
  652. {
  653. struct resource *res;
  654. read_lock(&resource_lock);
  655. for (res = root->child; res; res = res->sibling) {
  656. if (res->start == start)
  657. break;
  658. }
  659. read_unlock(&resource_lock);
  660. return res;
  661. }
  662. /*
  663. * Insert a resource into the resource tree. If successful, return NULL,
  664. * otherwise return the conflicting resource (compare to __request_resource())
  665. */
  666. static struct resource * __insert_resource(struct resource *parent, struct resource *new)
  667. {
  668. struct resource *first, *next;
  669. for (;; parent = first) {
  670. first = __request_resource(parent, new);
  671. if (!first)
  672. return first;
  673. if (first == parent)
  674. return first;
  675. if (WARN_ON(first == new)) /* duplicated insertion */
  676. return first;
  677. if ((first->start > new->start) || (first->end < new->end))
  678. break;
  679. if ((first->start == new->start) && (first->end == new->end))
  680. break;
  681. }
  682. for (next = first; ; next = next->sibling) {
  683. /* Partial overlap? Bad, and unfixable */
  684. if (next->start < new->start || next->end > new->end)
  685. return next;
  686. if (!next->sibling)
  687. break;
  688. if (next->sibling->start > new->end)
  689. break;
  690. }
  691. new->parent = parent;
  692. new->sibling = next->sibling;
  693. new->child = first;
  694. next->sibling = NULL;
  695. for (next = first; next; next = next->sibling)
  696. next->parent = new;
  697. if (parent->child == first) {
  698. parent->child = new;
  699. } else {
  700. next = parent->child;
  701. while (next->sibling != first)
  702. next = next->sibling;
  703. next->sibling = new;
  704. }
  705. return NULL;
  706. }
  707. /**
  708. * insert_resource_conflict - Inserts resource in the resource tree
  709. * @parent: parent of the new resource
  710. * @new: new resource to insert
  711. *
  712. * Returns 0 on success, conflict resource if the resource can't be inserted.
  713. *
  714. * This function is equivalent to request_resource_conflict when no conflict
  715. * happens. If a conflict happens, and the conflicting resources
  716. * entirely fit within the range of the new resource, then the new
  717. * resource is inserted and the conflicting resources become children of
  718. * the new resource.
  719. *
  720. * This function is intended for producers of resources, such as FW modules
  721. * and bus drivers.
  722. */
  723. struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
  724. {
  725. struct resource *conflict;
  726. write_lock(&resource_lock);
  727. conflict = __insert_resource(parent, new);
  728. write_unlock(&resource_lock);
  729. return conflict;
  730. }
  731. /**
  732. * insert_resource - Inserts a resource in the resource tree
  733. * @parent: parent of the new resource
  734. * @new: new resource to insert
  735. *
  736. * Returns 0 on success, -EBUSY if the resource can't be inserted.
  737. *
  738. * This function is intended for producers of resources, such as FW modules
  739. * and bus drivers.
  740. */
  741. int insert_resource(struct resource *parent, struct resource *new)
  742. {
  743. struct resource *conflict;
  744. conflict = insert_resource_conflict(parent, new);
  745. return conflict ? -EBUSY : 0;
  746. }
  747. EXPORT_SYMBOL_GPL(insert_resource);
  748. /**
  749. * insert_resource_expand_to_fit - Insert a resource into the resource tree
  750. * @root: root resource descriptor
  751. * @new: new resource to insert
  752. *
  753. * Insert a resource into the resource tree, possibly expanding it in order
  754. * to make it encompass any conflicting resources.
  755. */
  756. void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
  757. {
  758. if (new->parent)
  759. return;
  760. write_lock(&resource_lock);
  761. for (;;) {
  762. struct resource *conflict;
  763. conflict = __insert_resource(root, new);
  764. if (!conflict)
  765. break;
  766. if (conflict == root)
  767. break;
  768. /* Ok, expand resource to cover the conflict, then try again .. */
  769. if (conflict->start < new->start)
  770. new->start = conflict->start;
  771. if (conflict->end > new->end)
  772. new->end = conflict->end;
  773. printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
  774. }
  775. write_unlock(&resource_lock);
  776. }
  777. /*
  778. * Not for general consumption, only early boot memory map parsing, PCI
  779. * resource discovery, and late discovery of CXL resources are expected
  780. * to use this interface. The former are built-in and only the latter,
  781. * CXL, is a module.
  782. */
  783. EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
  784. /**
  785. * remove_resource - Remove a resource in the resource tree
  786. * @old: resource to remove
  787. *
  788. * Returns 0 on success, -EINVAL if the resource is not valid.
  789. *
  790. * This function removes a resource previously inserted by insert_resource()
  791. * or insert_resource_conflict(), and moves the children (if any) up to
  792. * where they were before. insert_resource() and insert_resource_conflict()
  793. * insert a new resource, and move any conflicting resources down to the
  794. * children of the new resource.
  795. *
  796. * insert_resource(), insert_resource_conflict() and remove_resource() are
  797. * intended for producers of resources, such as FW modules and bus drivers.
  798. */
  799. int remove_resource(struct resource *old)
  800. {
  801. int retval;
  802. write_lock(&resource_lock);
  803. retval = __release_resource(old, false);
  804. write_unlock(&resource_lock);
  805. return retval;
  806. }
  807. EXPORT_SYMBOL_GPL(remove_resource);
  808. static int __adjust_resource(struct resource *res, resource_size_t start,
  809. resource_size_t size)
  810. {
  811. struct resource *tmp, *parent = res->parent;
  812. resource_size_t end = start + size - 1;
  813. int result = -EBUSY;
  814. if (!parent)
  815. goto skip;
  816. if ((start < parent->start) || (end > parent->end))
  817. goto out;
  818. if (res->sibling && (res->sibling->start <= end))
  819. goto out;
  820. tmp = parent->child;
  821. if (tmp != res) {
  822. while (tmp->sibling != res)
  823. tmp = tmp->sibling;
  824. if (start <= tmp->end)
  825. goto out;
  826. }
  827. skip:
  828. for (tmp = res->child; tmp; tmp = tmp->sibling)
  829. if ((tmp->start < start) || (tmp->end > end))
  830. goto out;
  831. res->start = start;
  832. res->end = end;
  833. result = 0;
  834. out:
  835. return result;
  836. }
  837. /**
  838. * adjust_resource - modify a resource's start and size
  839. * @res: resource to modify
  840. * @start: new start value
  841. * @size: new size
  842. *
  843. * Given an existing resource, change its start and size to match the
  844. * arguments. Returns 0 on success, -EBUSY if it can't fit.
  845. * Existing children of the resource are assumed to be immutable.
  846. */
  847. int adjust_resource(struct resource *res, resource_size_t start,
  848. resource_size_t size)
  849. {
  850. int result;
  851. write_lock(&resource_lock);
  852. result = __adjust_resource(res, start, size);
  853. write_unlock(&resource_lock);
  854. return result;
  855. }
  856. EXPORT_SYMBOL(adjust_resource);
  857. static void __init
  858. __reserve_region_with_split(struct resource *root, resource_size_t start,
  859. resource_size_t end, const char *name)
  860. {
  861. struct resource *parent = root;
  862. struct resource *conflict;
  863. struct resource *res = alloc_resource(GFP_ATOMIC);
  864. struct resource *next_res = NULL;
  865. int type = resource_type(root);
  866. if (!res)
  867. return;
  868. res->name = name;
  869. res->start = start;
  870. res->end = end;
  871. res->flags = type | IORESOURCE_BUSY;
  872. res->desc = IORES_DESC_NONE;
  873. while (1) {
  874. conflict = __request_resource(parent, res);
  875. if (!conflict) {
  876. if (!next_res)
  877. break;
  878. res = next_res;
  879. next_res = NULL;
  880. continue;
  881. }
  882. /* conflict covered whole area */
  883. if (conflict->start <= res->start &&
  884. conflict->end >= res->end) {
  885. free_resource(res);
  886. WARN_ON(next_res);
  887. break;
  888. }
  889. /* failed, split and try again */
  890. if (conflict->start > res->start) {
  891. end = res->end;
  892. res->end = conflict->start - 1;
  893. if (conflict->end < end) {
  894. next_res = alloc_resource(GFP_ATOMIC);
  895. if (!next_res) {
  896. free_resource(res);
  897. break;
  898. }
  899. next_res->name = name;
  900. next_res->start = conflict->end + 1;
  901. next_res->end = end;
  902. next_res->flags = type | IORESOURCE_BUSY;
  903. next_res->desc = IORES_DESC_NONE;
  904. }
  905. } else {
  906. res->start = conflict->end + 1;
  907. }
  908. }
  909. }
  910. void __init
  911. reserve_region_with_split(struct resource *root, resource_size_t start,
  912. resource_size_t end, const char *name)
  913. {
  914. int abort = 0;
  915. write_lock(&resource_lock);
  916. if (root->start > start || root->end < end) {
  917. pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
  918. (unsigned long long)start, (unsigned long long)end,
  919. root);
  920. if (start > root->end || end < root->start)
  921. abort = 1;
  922. else {
  923. if (end > root->end)
  924. end = root->end;
  925. if (start < root->start)
  926. start = root->start;
  927. pr_err("fixing request to [0x%llx-0x%llx]\n",
  928. (unsigned long long)start,
  929. (unsigned long long)end);
  930. }
  931. dump_stack();
  932. }
  933. if (!abort)
  934. __reserve_region_with_split(root, start, end, name);
  935. write_unlock(&resource_lock);
  936. }
  937. /**
  938. * resource_alignment - calculate resource's alignment
  939. * @res: resource pointer
  940. *
  941. * Returns alignment on success, 0 (invalid alignment) on failure.
  942. */
  943. resource_size_t resource_alignment(struct resource *res)
  944. {
  945. switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
  946. case IORESOURCE_SIZEALIGN:
  947. return resource_size(res);
  948. case IORESOURCE_STARTALIGN:
  949. return res->start;
  950. default:
  951. return 0;
  952. }
  953. }
  954. /*
  955. * This is compatibility stuff for IO resources.
  956. *
  957. * Note how this, unlike the above, knows about
  958. * the IO flag meanings (busy etc).
  959. *
  960. * request_region creates a new busy region.
  961. *
  962. * release_region releases a matching busy region.
  963. */
  964. static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
  965. static struct inode *iomem_inode;
  966. #ifdef CONFIG_IO_STRICT_DEVMEM
  967. static void revoke_iomem(struct resource *res)
  968. {
  969. /* pairs with smp_store_release() in iomem_init_inode() */
  970. struct inode *inode = smp_load_acquire(&iomem_inode);
  971. /*
  972. * Check that the initialization has completed. Losing the race
  973. * is ok because it means drivers are claiming resources before
  974. * the fs_initcall level of init and prevent iomem_get_mapping users
  975. * from establishing mappings.
  976. */
  977. if (!inode)
  978. return;
  979. /*
  980. * The expectation is that the driver has successfully marked
  981. * the resource busy by this point, so devmem_is_allowed()
  982. * should start returning false, however for performance this
  983. * does not iterate the entire resource range.
  984. */
  985. if (devmem_is_allowed(PHYS_PFN(res->start)) &&
  986. devmem_is_allowed(PHYS_PFN(res->end))) {
  987. /*
  988. * *cringe* iomem=relaxed says "go ahead, what's the
  989. * worst that can happen?"
  990. */
  991. return;
  992. }
  993. unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
  994. }
  995. #else
  996. static void revoke_iomem(struct resource *res) {}
  997. #endif
  998. struct address_space *iomem_get_mapping(void)
  999. {
  1000. /*
  1001. * This function is only called from file open paths, hence guaranteed
  1002. * that fs_initcalls have completed and no need to check for NULL. But
  1003. * since revoke_iomem can be called before the initcall we still need
  1004. * the barrier to appease checkers.
  1005. */
  1006. return smp_load_acquire(&iomem_inode)->i_mapping;
  1007. }
  1008. static int __request_region_locked(struct resource *res, struct resource *parent,
  1009. resource_size_t start, resource_size_t n,
  1010. const char *name, int flags)
  1011. {
  1012. DECLARE_WAITQUEUE(wait, current);
  1013. res->name = name;
  1014. res->start = start;
  1015. res->end = start + n - 1;
  1016. for (;;) {
  1017. struct resource *conflict;
  1018. res->flags = resource_type(parent) | resource_ext_type(parent);
  1019. res->flags |= IORESOURCE_BUSY | flags;
  1020. res->desc = parent->desc;
  1021. conflict = __request_resource(parent, res);
  1022. if (!conflict)
  1023. break;
  1024. /*
  1025. * mm/hmm.c reserves physical addresses which then
  1026. * become unavailable to other users. Conflicts are
  1027. * not expected. Warn to aid debugging if encountered.
  1028. */
  1029. if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
  1030. pr_warn("Unaddressable device %s %pR conflicts with %pR",
  1031. conflict->name, conflict, res);
  1032. }
  1033. if (conflict != parent) {
  1034. if (!(conflict->flags & IORESOURCE_BUSY)) {
  1035. parent = conflict;
  1036. continue;
  1037. }
  1038. }
  1039. if (conflict->flags & flags & IORESOURCE_MUXED) {
  1040. add_wait_queue(&muxed_resource_wait, &wait);
  1041. write_unlock(&resource_lock);
  1042. set_current_state(TASK_UNINTERRUPTIBLE);
  1043. schedule();
  1044. remove_wait_queue(&muxed_resource_wait, &wait);
  1045. write_lock(&resource_lock);
  1046. continue;
  1047. }
  1048. /* Uhhuh, that didn't work out.. */
  1049. return -EBUSY;
  1050. }
  1051. return 0;
  1052. }
  1053. /**
  1054. * __request_region - create a new busy resource region
  1055. * @parent: parent resource descriptor
  1056. * @start: resource start address
  1057. * @n: resource region size
  1058. * @name: reserving caller's ID string
  1059. * @flags: IO resource flags
  1060. */
  1061. struct resource *__request_region(struct resource *parent,
  1062. resource_size_t start, resource_size_t n,
  1063. const char *name, int flags)
  1064. {
  1065. struct resource *res = alloc_resource(GFP_KERNEL);
  1066. int ret;
  1067. if (!res)
  1068. return NULL;
  1069. write_lock(&resource_lock);
  1070. ret = __request_region_locked(res, parent, start, n, name, flags);
  1071. write_unlock(&resource_lock);
  1072. if (ret) {
  1073. free_resource(res);
  1074. return NULL;
  1075. }
  1076. if (parent == &iomem_resource)
  1077. revoke_iomem(res);
  1078. return res;
  1079. }
  1080. EXPORT_SYMBOL(__request_region);
  1081. /**
  1082. * __release_region - release a previously reserved resource region
  1083. * @parent: parent resource descriptor
  1084. * @start: resource start address
  1085. * @n: resource region size
  1086. *
  1087. * The described resource region must match a currently busy region.
  1088. */
  1089. void __release_region(struct resource *parent, resource_size_t start,
  1090. resource_size_t n)
  1091. {
  1092. struct resource **p;
  1093. resource_size_t end;
  1094. p = &parent->child;
  1095. end = start + n - 1;
  1096. write_lock(&resource_lock);
  1097. for (;;) {
  1098. struct resource *res = *p;
  1099. if (!res)
  1100. break;
  1101. if (res->start <= start && res->end >= end) {
  1102. if (!(res->flags & IORESOURCE_BUSY)) {
  1103. p = &res->child;
  1104. continue;
  1105. }
  1106. if (res->start != start || res->end != end)
  1107. break;
  1108. *p = res->sibling;
  1109. write_unlock(&resource_lock);
  1110. if (res->flags & IORESOURCE_MUXED)
  1111. wake_up(&muxed_resource_wait);
  1112. free_resource(res);
  1113. return;
  1114. }
  1115. p = &res->sibling;
  1116. }
  1117. write_unlock(&resource_lock);
  1118. printk(KERN_WARNING "Trying to free nonexistent resource "
  1119. "<%016llx-%016llx>\n", (unsigned long long)start,
  1120. (unsigned long long)end);
  1121. }
  1122. EXPORT_SYMBOL(__release_region);
  1123. #ifdef CONFIG_MEMORY_HOTREMOVE
  1124. /**
  1125. * release_mem_region_adjustable - release a previously reserved memory region
  1126. * @start: resource start address
  1127. * @size: resource region size
  1128. *
  1129. * This interface is intended for memory hot-delete. The requested region
  1130. * is released from a currently busy memory resource. The requested region
  1131. * must either match exactly or fit into a single busy resource entry. In
  1132. * the latter case, the remaining resource is adjusted accordingly.
  1133. * Existing children of the busy memory resource must be immutable in the
  1134. * request.
  1135. *
  1136. * Note:
  1137. * - Additional release conditions, such as overlapping region, can be
  1138. * supported after they are confirmed as valid cases.
  1139. * - When a busy memory resource gets split into two entries, the code
  1140. * assumes that all children remain in the lower address entry for
  1141. * simplicity. Enhance this logic when necessary.
  1142. */
  1143. void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
  1144. {
  1145. struct resource *parent = &iomem_resource;
  1146. struct resource *new_res = NULL;
  1147. bool alloc_nofail = false;
  1148. struct resource **p;
  1149. struct resource *res;
  1150. resource_size_t end;
  1151. end = start + size - 1;
  1152. if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
  1153. return;
  1154. /*
  1155. * We free up quite a lot of memory on memory hotunplug (esp., memap),
  1156. * just before releasing the region. This is highly unlikely to
  1157. * fail - let's play save and make it never fail as the caller cannot
  1158. * perform any error handling (e.g., trying to re-add memory will fail
  1159. * similarly).
  1160. */
  1161. retry:
  1162. new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
  1163. p = &parent->child;
  1164. write_lock(&resource_lock);
  1165. while ((res = *p)) {
  1166. if (res->start >= end)
  1167. break;
  1168. /* look for the next resource if it does not fit into */
  1169. if (res->start > start || res->end < end) {
  1170. p = &res->sibling;
  1171. continue;
  1172. }
  1173. if (!(res->flags & IORESOURCE_MEM))
  1174. break;
  1175. if (!(res->flags & IORESOURCE_BUSY)) {
  1176. p = &res->child;
  1177. continue;
  1178. }
  1179. /* found the target resource; let's adjust accordingly */
  1180. if (res->start == start && res->end == end) {
  1181. /* free the whole entry */
  1182. *p = res->sibling;
  1183. free_resource(res);
  1184. } else if (res->start == start && res->end != end) {
  1185. /* adjust the start */
  1186. WARN_ON_ONCE(__adjust_resource(res, end + 1,
  1187. res->end - end));
  1188. } else if (res->start != start && res->end == end) {
  1189. /* adjust the end */
  1190. WARN_ON_ONCE(__adjust_resource(res, res->start,
  1191. start - res->start));
  1192. } else {
  1193. /* split into two entries - we need a new resource */
  1194. if (!new_res) {
  1195. new_res = alloc_resource(GFP_ATOMIC);
  1196. if (!new_res) {
  1197. alloc_nofail = true;
  1198. write_unlock(&resource_lock);
  1199. goto retry;
  1200. }
  1201. }
  1202. new_res->name = res->name;
  1203. new_res->start = end + 1;
  1204. new_res->end = res->end;
  1205. new_res->flags = res->flags;
  1206. new_res->desc = res->desc;
  1207. new_res->parent = res->parent;
  1208. new_res->sibling = res->sibling;
  1209. new_res->child = NULL;
  1210. if (WARN_ON_ONCE(__adjust_resource(res, res->start,
  1211. start - res->start)))
  1212. break;
  1213. res->sibling = new_res;
  1214. new_res = NULL;
  1215. }
  1216. break;
  1217. }
  1218. write_unlock(&resource_lock);
  1219. free_resource(new_res);
  1220. }
  1221. #endif /* CONFIG_MEMORY_HOTREMOVE */
  1222. #ifdef CONFIG_MEMORY_HOTPLUG
  1223. static bool system_ram_resources_mergeable(struct resource *r1,
  1224. struct resource *r2)
  1225. {
  1226. /* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
  1227. return r1->flags == r2->flags && r1->end + 1 == r2->start &&
  1228. r1->name == r2->name && r1->desc == r2->desc &&
  1229. !r1->child && !r2->child;
  1230. }
  1231. /**
  1232. * merge_system_ram_resource - mark the System RAM resource mergeable and try to
  1233. * merge it with adjacent, mergeable resources
  1234. * @res: resource descriptor
  1235. *
  1236. * This interface is intended for memory hotplug, whereby lots of contiguous
  1237. * system ram resources are added (e.g., via add_memory*()) by a driver, and
  1238. * the actual resource boundaries are not of interest (e.g., it might be
  1239. * relevant for DIMMs). Only resources that are marked mergeable, that have the
  1240. * same parent, and that don't have any children are considered. All mergeable
  1241. * resources must be immutable during the request.
  1242. *
  1243. * Note:
  1244. * - The caller has to make sure that no pointers to resources that are
  1245. * marked mergeable are used anymore after this call - the resource might
  1246. * be freed and the pointer might be stale!
  1247. * - release_mem_region_adjustable() will split on demand on memory hotunplug
  1248. */
  1249. void merge_system_ram_resource(struct resource *res)
  1250. {
  1251. const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
  1252. struct resource *cur;
  1253. if (WARN_ON_ONCE((res->flags & flags) != flags))
  1254. return;
  1255. write_lock(&resource_lock);
  1256. res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
  1257. /* Try to merge with next item in the list. */
  1258. cur = res->sibling;
  1259. if (cur && system_ram_resources_mergeable(res, cur)) {
  1260. res->end = cur->end;
  1261. res->sibling = cur->sibling;
  1262. free_resource(cur);
  1263. }
  1264. /* Try to merge with previous item in the list. */
  1265. cur = res->parent->child;
  1266. while (cur && cur->sibling != res)
  1267. cur = cur->sibling;
  1268. if (cur && system_ram_resources_mergeable(cur, res)) {
  1269. cur->end = res->end;
  1270. cur->sibling = res->sibling;
  1271. free_resource(res);
  1272. }
  1273. write_unlock(&resource_lock);
  1274. }
  1275. #endif /* CONFIG_MEMORY_HOTPLUG */
  1276. /*
  1277. * Managed region resource
  1278. */
  1279. static void devm_resource_release(struct device *dev, void *ptr)
  1280. {
  1281. struct resource **r = ptr;
  1282. release_resource(*r);
  1283. }
  1284. /**
  1285. * devm_request_resource() - request and reserve an I/O or memory resource
  1286. * @dev: device for which to request the resource
  1287. * @root: root of the resource tree from which to request the resource
  1288. * @new: descriptor of the resource to request
  1289. *
  1290. * This is a device-managed version of request_resource(). There is usually
  1291. * no need to release resources requested by this function explicitly since
  1292. * that will be taken care of when the device is unbound from its driver.
  1293. * If for some reason the resource needs to be released explicitly, because
  1294. * of ordering issues for example, drivers must call devm_release_resource()
  1295. * rather than the regular release_resource().
  1296. *
  1297. * When a conflict is detected between any existing resources and the newly
  1298. * requested resource, an error message will be printed.
  1299. *
  1300. * Returns 0 on success or a negative error code on failure.
  1301. */
  1302. int devm_request_resource(struct device *dev, struct resource *root,
  1303. struct resource *new)
  1304. {
  1305. struct resource *conflict, **ptr;
  1306. ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
  1307. if (!ptr)
  1308. return -ENOMEM;
  1309. *ptr = new;
  1310. conflict = request_resource_conflict(root, new);
  1311. if (conflict) {
  1312. dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
  1313. new, conflict->name, conflict);
  1314. devres_free(ptr);
  1315. return -EBUSY;
  1316. }
  1317. devres_add(dev, ptr);
  1318. return 0;
  1319. }
  1320. EXPORT_SYMBOL(devm_request_resource);
  1321. static int devm_resource_match(struct device *dev, void *res, void *data)
  1322. {
  1323. struct resource **ptr = res;
  1324. return *ptr == data;
  1325. }
  1326. /**
  1327. * devm_release_resource() - release a previously requested resource
  1328. * @dev: device for which to release the resource
  1329. * @new: descriptor of the resource to release
  1330. *
  1331. * Releases a resource previously requested using devm_request_resource().
  1332. */
  1333. void devm_release_resource(struct device *dev, struct resource *new)
  1334. {
  1335. WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
  1336. new));
  1337. }
  1338. EXPORT_SYMBOL(devm_release_resource);
  1339. struct region_devres {
  1340. struct resource *parent;
  1341. resource_size_t start;
  1342. resource_size_t n;
  1343. };
  1344. static void devm_region_release(struct device *dev, void *res)
  1345. {
  1346. struct region_devres *this = res;
  1347. __release_region(this->parent, this->start, this->n);
  1348. }
  1349. static int devm_region_match(struct device *dev, void *res, void *match_data)
  1350. {
  1351. struct region_devres *this = res, *match = match_data;
  1352. return this->parent == match->parent &&
  1353. this->start == match->start && this->n == match->n;
  1354. }
  1355. struct resource *
  1356. __devm_request_region(struct device *dev, struct resource *parent,
  1357. resource_size_t start, resource_size_t n, const char *name)
  1358. {
  1359. struct region_devres *dr = NULL;
  1360. struct resource *res;
  1361. dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
  1362. GFP_KERNEL);
  1363. if (!dr)
  1364. return NULL;
  1365. dr->parent = parent;
  1366. dr->start = start;
  1367. dr->n = n;
  1368. res = __request_region(parent, start, n, name, 0);
  1369. if (res)
  1370. devres_add(dev, dr);
  1371. else
  1372. devres_free(dr);
  1373. return res;
  1374. }
  1375. EXPORT_SYMBOL(__devm_request_region);
  1376. void __devm_release_region(struct device *dev, struct resource *parent,
  1377. resource_size_t start, resource_size_t n)
  1378. {
  1379. struct region_devres match_data = { parent, start, n };
  1380. __release_region(parent, start, n);
  1381. WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
  1382. &match_data));
  1383. }
  1384. EXPORT_SYMBOL(__devm_release_region);
  1385. /*
  1386. * Reserve I/O ports or memory based on "reserve=" kernel parameter.
  1387. */
  1388. #define MAXRESERVE 4
  1389. static int __init reserve_setup(char *str)
  1390. {
  1391. static int reserved;
  1392. static struct resource reserve[MAXRESERVE];
  1393. for (;;) {
  1394. unsigned int io_start, io_num;
  1395. int x = reserved;
  1396. struct resource *parent;
  1397. if (get_option(&str, &io_start) != 2)
  1398. break;
  1399. if (get_option(&str, &io_num) == 0)
  1400. break;
  1401. if (x < MAXRESERVE) {
  1402. struct resource *res = reserve + x;
  1403. /*
  1404. * If the region starts below 0x10000, we assume it's
  1405. * I/O port space; otherwise assume it's memory.
  1406. */
  1407. if (io_start < 0x10000) {
  1408. res->flags = IORESOURCE_IO;
  1409. parent = &ioport_resource;
  1410. } else {
  1411. res->flags = IORESOURCE_MEM;
  1412. parent = &iomem_resource;
  1413. }
  1414. res->name = "reserved";
  1415. res->start = io_start;
  1416. res->end = io_start + io_num - 1;
  1417. res->flags |= IORESOURCE_BUSY;
  1418. res->desc = IORES_DESC_NONE;
  1419. res->child = NULL;
  1420. if (request_resource(parent, res) == 0)
  1421. reserved = x+1;
  1422. }
  1423. }
  1424. return 1;
  1425. }
  1426. __setup("reserve=", reserve_setup);
  1427. /*
  1428. * Check if the requested addr and size spans more than any slot in the
  1429. * iomem resource tree.
  1430. */
  1431. int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
  1432. {
  1433. struct resource *p = &iomem_resource;
  1434. int err = 0;
  1435. loff_t l;
  1436. read_lock(&resource_lock);
  1437. for (p = p->child; p ; p = r_next(NULL, p, &l)) {
  1438. /*
  1439. * We can probably skip the resources without
  1440. * IORESOURCE_IO attribute?
  1441. */
  1442. if (p->start >= addr + size)
  1443. continue;
  1444. if (p->end < addr)
  1445. continue;
  1446. if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
  1447. PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
  1448. continue;
  1449. /*
  1450. * if a resource is "BUSY", it's not a hardware resource
  1451. * but a driver mapping of such a resource; we don't want
  1452. * to warn for those; some drivers legitimately map only
  1453. * partial hardware resources. (example: vesafb)
  1454. */
  1455. if (p->flags & IORESOURCE_BUSY)
  1456. continue;
  1457. printk(KERN_WARNING "resource sanity check: requesting [mem %#010llx-%#010llx], which spans more than %s %pR\n",
  1458. (unsigned long long)addr,
  1459. (unsigned long long)(addr + size - 1),
  1460. p->name, p);
  1461. err = -1;
  1462. break;
  1463. }
  1464. read_unlock(&resource_lock);
  1465. return err;
  1466. }
  1467. #ifdef CONFIG_STRICT_DEVMEM
  1468. static int strict_iomem_checks = 1;
  1469. #else
  1470. static int strict_iomem_checks;
  1471. #endif
  1472. /*
  1473. * Check if an address is exclusive to the kernel and must not be mapped to
  1474. * user space, for example, via /dev/mem.
  1475. *
  1476. * Returns true if exclusive to the kernel, otherwise returns false.
  1477. */
  1478. bool iomem_is_exclusive(u64 addr)
  1479. {
  1480. const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
  1481. IORESOURCE_EXCLUSIVE;
  1482. bool skip_children = false, err = false;
  1483. int size = PAGE_SIZE;
  1484. struct resource *p;
  1485. addr = addr & PAGE_MASK;
  1486. read_lock(&resource_lock);
  1487. for_each_resource(&iomem_resource, p, skip_children) {
  1488. if (p->start >= addr + size)
  1489. break;
  1490. if (p->end < addr) {
  1491. skip_children = true;
  1492. continue;
  1493. }
  1494. skip_children = false;
  1495. /*
  1496. * IORESOURCE_SYSTEM_RAM resources are exclusive if
  1497. * IORESOURCE_EXCLUSIVE is set, even if they
  1498. * are not busy and even if "iomem=relaxed" is set. The
  1499. * responsible driver dynamically adds/removes system RAM within
  1500. * such an area and uncontrolled access is dangerous.
  1501. */
  1502. if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
  1503. err = true;
  1504. break;
  1505. }
  1506. /*
  1507. * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
  1508. * or CONFIG_IO_STRICT_DEVMEM is enabled and the
  1509. * resource is busy.
  1510. */
  1511. if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
  1512. continue;
  1513. if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
  1514. || p->flags & IORESOURCE_EXCLUSIVE) {
  1515. err = true;
  1516. break;
  1517. }
  1518. }
  1519. read_unlock(&resource_lock);
  1520. return err;
  1521. }
  1522. struct resource_entry *resource_list_create_entry(struct resource *res,
  1523. size_t extra_size)
  1524. {
  1525. struct resource_entry *entry;
  1526. entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
  1527. if (entry) {
  1528. INIT_LIST_HEAD(&entry->node);
  1529. entry->res = res ? res : &entry->__res;
  1530. }
  1531. return entry;
  1532. }
  1533. EXPORT_SYMBOL(resource_list_create_entry);
  1534. void resource_list_free(struct list_head *head)
  1535. {
  1536. struct resource_entry *entry, *tmp;
  1537. list_for_each_entry_safe(entry, tmp, head, node)
  1538. resource_list_destroy_entry(entry);
  1539. }
  1540. EXPORT_SYMBOL(resource_list_free);
  1541. #ifdef CONFIG_GET_FREE_REGION
  1542. #define GFR_DESCENDING (1UL << 0)
  1543. #define GFR_REQUEST_REGION (1UL << 1)
  1544. #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
  1545. static resource_size_t gfr_start(struct resource *base, resource_size_t size,
  1546. resource_size_t align, unsigned long flags)
  1547. {
  1548. if (flags & GFR_DESCENDING) {
  1549. resource_size_t end;
  1550. end = min_t(resource_size_t, base->end,
  1551. (1ULL << MAX_PHYSMEM_BITS) - 1);
  1552. return end - size + 1;
  1553. }
  1554. return ALIGN(base->start, align);
  1555. }
  1556. static bool gfr_continue(struct resource *base, resource_size_t addr,
  1557. resource_size_t size, unsigned long flags)
  1558. {
  1559. if (flags & GFR_DESCENDING)
  1560. return addr > size && addr >= base->start;
  1561. /*
  1562. * In the ascend case be careful that the last increment by
  1563. * @size did not wrap 0.
  1564. */
  1565. return addr > addr - size &&
  1566. addr <= min_t(resource_size_t, base->end,
  1567. (1ULL << MAX_PHYSMEM_BITS) - 1);
  1568. }
  1569. static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
  1570. unsigned long flags)
  1571. {
  1572. if (flags & GFR_DESCENDING)
  1573. return addr - size;
  1574. return addr + size;
  1575. }
  1576. static void remove_free_mem_region(void *_res)
  1577. {
  1578. struct resource *res = _res;
  1579. if (res->parent)
  1580. remove_resource(res);
  1581. free_resource(res);
  1582. }
  1583. static struct resource *
  1584. get_free_mem_region(struct device *dev, struct resource *base,
  1585. resource_size_t size, const unsigned long align,
  1586. const char *name, const unsigned long desc,
  1587. const unsigned long flags)
  1588. {
  1589. resource_size_t addr;
  1590. struct resource *res;
  1591. struct region_devres *dr = NULL;
  1592. size = ALIGN(size, align);
  1593. res = alloc_resource(GFP_KERNEL);
  1594. if (!res)
  1595. return ERR_PTR(-ENOMEM);
  1596. if (dev && (flags & GFR_REQUEST_REGION)) {
  1597. dr = devres_alloc(devm_region_release,
  1598. sizeof(struct region_devres), GFP_KERNEL);
  1599. if (!dr) {
  1600. free_resource(res);
  1601. return ERR_PTR(-ENOMEM);
  1602. }
  1603. } else if (dev) {
  1604. if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
  1605. return ERR_PTR(-ENOMEM);
  1606. }
  1607. write_lock(&resource_lock);
  1608. for (addr = gfr_start(base, size, align, flags);
  1609. gfr_continue(base, addr, size, flags);
  1610. addr = gfr_next(addr, size, flags)) {
  1611. if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
  1612. REGION_DISJOINT)
  1613. continue;
  1614. if (flags & GFR_REQUEST_REGION) {
  1615. if (__request_region_locked(res, &iomem_resource, addr,
  1616. size, name, 0))
  1617. break;
  1618. if (dev) {
  1619. dr->parent = &iomem_resource;
  1620. dr->start = addr;
  1621. dr->n = size;
  1622. devres_add(dev, dr);
  1623. }
  1624. res->desc = desc;
  1625. write_unlock(&resource_lock);
  1626. /*
  1627. * A driver is claiming this region so revoke any
  1628. * mappings.
  1629. */
  1630. revoke_iomem(res);
  1631. } else {
  1632. res->start = addr;
  1633. res->end = addr + size - 1;
  1634. res->name = name;
  1635. res->desc = desc;
  1636. res->flags = IORESOURCE_MEM;
  1637. /*
  1638. * Only succeed if the resource hosts an exclusive
  1639. * range after the insert
  1640. */
  1641. if (__insert_resource(base, res) || res->child)
  1642. break;
  1643. write_unlock(&resource_lock);
  1644. }
  1645. return res;
  1646. }
  1647. write_unlock(&resource_lock);
  1648. if (flags & GFR_REQUEST_REGION) {
  1649. free_resource(res);
  1650. devres_free(dr);
  1651. } else if (dev)
  1652. devm_release_action(dev, remove_free_mem_region, res);
  1653. return ERR_PTR(-ERANGE);
  1654. }
  1655. /**
  1656. * devm_request_free_mem_region - find free region for device private memory
  1657. *
  1658. * @dev: device struct to bind the resource to
  1659. * @size: size in bytes of the device memory to add
  1660. * @base: resource tree to look in
  1661. *
  1662. * This function tries to find an empty range of physical address big enough to
  1663. * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
  1664. * memory, which in turn allocates struct pages.
  1665. */
  1666. struct resource *devm_request_free_mem_region(struct device *dev,
  1667. struct resource *base, unsigned long size)
  1668. {
  1669. unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
  1670. return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
  1671. dev_name(dev),
  1672. IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
  1673. }
  1674. EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
  1675. struct resource *request_free_mem_region(struct resource *base,
  1676. unsigned long size, const char *name)
  1677. {
  1678. unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
  1679. return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
  1680. IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
  1681. }
  1682. EXPORT_SYMBOL_GPL(request_free_mem_region);
  1683. /**
  1684. * alloc_free_mem_region - find a free region relative to @base
  1685. * @base: resource that will parent the new resource
  1686. * @size: size in bytes of memory to allocate from @base
  1687. * @align: alignment requirements for the allocation
  1688. * @name: resource name
  1689. *
  1690. * Buses like CXL, that can dynamically instantiate new memory regions,
  1691. * need a method to allocate physical address space for those regions.
  1692. * Allocate and insert a new resource to cover a free, unclaimed by a
  1693. * descendant of @base, range in the span of @base.
  1694. */
  1695. struct resource *alloc_free_mem_region(struct resource *base,
  1696. unsigned long size, unsigned long align,
  1697. const char *name)
  1698. {
  1699. /* Default of ascending direction and insert resource */
  1700. unsigned long flags = 0;
  1701. return get_free_mem_region(NULL, base, size, align, name,
  1702. IORES_DESC_NONE, flags);
  1703. }
  1704. EXPORT_SYMBOL_NS_GPL(alloc_free_mem_region, CXL);
  1705. #endif /* CONFIG_GET_FREE_REGION */
  1706. static int __init strict_iomem(char *str)
  1707. {
  1708. if (strstr(str, "relaxed"))
  1709. strict_iomem_checks = 0;
  1710. if (strstr(str, "strict"))
  1711. strict_iomem_checks = 1;
  1712. return 1;
  1713. }
  1714. static int iomem_fs_init_fs_context(struct fs_context *fc)
  1715. {
  1716. return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
  1717. }
  1718. static struct file_system_type iomem_fs_type = {
  1719. .name = "iomem",
  1720. .owner = THIS_MODULE,
  1721. .init_fs_context = iomem_fs_init_fs_context,
  1722. .kill_sb = kill_anon_super,
  1723. };
  1724. static int __init iomem_init_inode(void)
  1725. {
  1726. static struct vfsmount *iomem_vfs_mount;
  1727. static int iomem_fs_cnt;
  1728. struct inode *inode;
  1729. int rc;
  1730. rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
  1731. if (rc < 0) {
  1732. pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
  1733. return rc;
  1734. }
  1735. inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
  1736. if (IS_ERR(inode)) {
  1737. rc = PTR_ERR(inode);
  1738. pr_err("Cannot allocate inode for iomem: %d\n", rc);
  1739. simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
  1740. return rc;
  1741. }
  1742. /*
  1743. * Publish iomem revocation inode initialized.
  1744. * Pairs with smp_load_acquire() in revoke_iomem().
  1745. */
  1746. smp_store_release(&iomem_inode, inode);
  1747. return 0;
  1748. }
  1749. fs_initcall(iomem_init_inode);
  1750. __setup("iomem=", strict_iomem);