nodemask.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __LINUX_NODEMASK_H
  3. #define __LINUX_NODEMASK_H
  4. /*
  5. * Nodemasks provide a bitmap suitable for representing the
  6. * set of Node's in a system, one bit position per Node number.
  7. *
  8. * See detailed comments in the file linux/bitmap.h describing the
  9. * data type on which these nodemasks are based.
  10. *
  11. * For details of nodemask_parse_user(), see bitmap_parse_user() in
  12. * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(),
  13. * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in
  14. * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in
  15. * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in
  16. * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in
  17. * lib/bitmap.c.
  18. *
  19. * The available nodemask operations are:
  20. *
  21. * void node_set(node, mask) turn on bit 'node' in mask
  22. * void node_clear(node, mask) turn off bit 'node' in mask
  23. * void nodes_setall(mask) set all bits
  24. * void nodes_clear(mask) clear all bits
  25. * int node_isset(node, mask) true iff bit 'node' set in mask
  26. * int node_test_and_set(node, mask) test and set bit 'node' in mask
  27. *
  28. * void nodes_and(dst, src1, src2) dst = src1 & src2 [intersection]
  29. * void nodes_or(dst, src1, src2) dst = src1 | src2 [union]
  30. * void nodes_xor(dst, src1, src2) dst = src1 ^ src2
  31. * void nodes_andnot(dst, src1, src2) dst = src1 & ~src2
  32. * void nodes_complement(dst, src) dst = ~src
  33. *
  34. * int nodes_equal(mask1, mask2) Does mask1 == mask2?
  35. * int nodes_intersects(mask1, mask2) Do mask1 and mask2 intersect?
  36. * int nodes_subset(mask1, mask2) Is mask1 a subset of mask2?
  37. * int nodes_empty(mask) Is mask empty (no bits sets)?
  38. * int nodes_full(mask) Is mask full (all bits sets)?
  39. * int nodes_weight(mask) Hamming weight - number of set bits
  40. *
  41. * void nodes_shift_right(dst, src, n) Shift right
  42. * void nodes_shift_left(dst, src, n) Shift left
  43. *
  44. * unsigned int first_node(mask) Number lowest set bit, or MAX_NUMNODES
  45. * unsigend int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
  46. * unsigned int next_node_in(node, mask) Next node past 'node', or wrap to first,
  47. * or MAX_NUMNODES
  48. * unsigned int first_unset_node(mask) First node not set in mask, or
  49. * MAX_NUMNODES
  50. *
  51. * nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
  52. * NODE_MASK_ALL Initializer - all bits set
  53. * NODE_MASK_NONE Initializer - no bits set
  54. * unsigned long *nodes_addr(mask) Array of unsigned long's in mask
  55. *
  56. * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask
  57. * int nodelist_parse(buf, map) Parse ascii string as nodelist
  58. * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit)
  59. * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src)
  60. * void nodes_onto(dst, orig, relmap) *dst = orig relative to relmap
  61. * void nodes_fold(dst, orig, sz) dst bits = orig bits mod sz
  62. *
  63. * for_each_node_mask(node, mask) for-loop node over mask
  64. *
  65. * int num_online_nodes() Number of online Nodes
  66. * int num_possible_nodes() Number of all possible Nodes
  67. *
  68. * int node_random(mask) Random node with set bit in mask
  69. *
  70. * int node_online(node) Is some node online?
  71. * int node_possible(node) Is some node possible?
  72. *
  73. * node_set_online(node) set bit 'node' in node_online_map
  74. * node_set_offline(node) clear bit 'node' in node_online_map
  75. *
  76. * for_each_node(node) for-loop node over node_possible_map
  77. * for_each_online_node(node) for-loop node over node_online_map
  78. *
  79. * Subtlety:
  80. * 1) The 'type-checked' form of node_isset() causes gcc (3.3.2, anyway)
  81. * to generate slightly worse code. So use a simple one-line #define
  82. * for node_isset(), instead of wrapping an inline inside a macro, the
  83. * way we do the other calls.
  84. *
  85. * NODEMASK_SCRATCH
  86. * When doing above logical AND, OR, XOR, Remap operations the callers tend to
  87. * need temporary nodemask_t's on the stack. But if NODES_SHIFT is large,
  88. * nodemask_t's consume too much stack space. NODEMASK_SCRATCH is a helper
  89. * for such situations. See below and CPUMASK_ALLOC also.
  90. */
  91. #include <linux/threads.h>
  92. #include <linux/bitmap.h>
  93. #include <linux/minmax.h>
  94. #include <linux/numa.h>
  95. #include <linux/random.h>
  96. typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
  97. extern nodemask_t _unused_nodemask_arg_;
  98. /**
  99. * nodemask_pr_args - printf args to output a nodemask
  100. * @maskp: nodemask to be printed
  101. *
  102. * Can be used to provide arguments for '%*pb[l]' when printing a nodemask.
  103. */
  104. #define nodemask_pr_args(maskp) __nodemask_pr_numnodes(maskp), \
  105. __nodemask_pr_bits(maskp)
  106. static inline unsigned int __nodemask_pr_numnodes(const nodemask_t *m)
  107. {
  108. return m ? MAX_NUMNODES : 0;
  109. }
  110. static inline const unsigned long *__nodemask_pr_bits(const nodemask_t *m)
  111. {
  112. return m ? m->bits : NULL;
  113. }
  114. /*
  115. * The inline keyword gives the compiler room to decide to inline, or
  116. * not inline a function as it sees best. However, as these functions
  117. * are called in both __init and non-__init functions, if they are not
  118. * inlined we will end up with a section mismatch error (of the type of
  119. * freeable items not being freed). So we must use __always_inline here
  120. * to fix the problem. If other functions in the future also end up in
  121. * this situation they will also need to be annotated as __always_inline
  122. */
  123. #define node_set(node, dst) __node_set((node), &(dst))
  124. static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
  125. {
  126. set_bit(node, dstp->bits);
  127. }
  128. #define node_clear(node, dst) __node_clear((node), &(dst))
  129. static inline void __node_clear(int node, volatile nodemask_t *dstp)
  130. {
  131. clear_bit(node, dstp->bits);
  132. }
  133. #define nodes_setall(dst) __nodes_setall(&(dst), MAX_NUMNODES)
  134. static inline void __nodes_setall(nodemask_t *dstp, unsigned int nbits)
  135. {
  136. bitmap_fill(dstp->bits, nbits);
  137. }
  138. #define nodes_clear(dst) __nodes_clear(&(dst), MAX_NUMNODES)
  139. static inline void __nodes_clear(nodemask_t *dstp, unsigned int nbits)
  140. {
  141. bitmap_zero(dstp->bits, nbits);
  142. }
  143. /* No static inline type checking - see Subtlety (1) above. */
  144. #define node_isset(node, nodemask) test_bit((node), (nodemask).bits)
  145. #define node_test_and_set(node, nodemask) \
  146. __node_test_and_set((node), &(nodemask))
  147. static inline bool __node_test_and_set(int node, nodemask_t *addr)
  148. {
  149. return test_and_set_bit(node, addr->bits);
  150. }
  151. #define nodes_and(dst, src1, src2) \
  152. __nodes_and(&(dst), &(src1), &(src2), MAX_NUMNODES)
  153. static inline void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p,
  154. const nodemask_t *src2p, unsigned int nbits)
  155. {
  156. bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
  157. }
  158. #define nodes_or(dst, src1, src2) \
  159. __nodes_or(&(dst), &(src1), &(src2), MAX_NUMNODES)
  160. static inline void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p,
  161. const nodemask_t *src2p, unsigned int nbits)
  162. {
  163. bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
  164. }
  165. #define nodes_xor(dst, src1, src2) \
  166. __nodes_xor(&(dst), &(src1), &(src2), MAX_NUMNODES)
  167. static inline void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p,
  168. const nodemask_t *src2p, unsigned int nbits)
  169. {
  170. bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
  171. }
  172. #define nodes_andnot(dst, src1, src2) \
  173. __nodes_andnot(&(dst), &(src1), &(src2), MAX_NUMNODES)
  174. static inline void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p,
  175. const nodemask_t *src2p, unsigned int nbits)
  176. {
  177. bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
  178. }
  179. #define nodes_complement(dst, src) \
  180. __nodes_complement(&(dst), &(src), MAX_NUMNODES)
  181. static inline void __nodes_complement(nodemask_t *dstp,
  182. const nodemask_t *srcp, unsigned int nbits)
  183. {
  184. bitmap_complement(dstp->bits, srcp->bits, nbits);
  185. }
  186. #define nodes_equal(src1, src2) \
  187. __nodes_equal(&(src1), &(src2), MAX_NUMNODES)
  188. static inline bool __nodes_equal(const nodemask_t *src1p,
  189. const nodemask_t *src2p, unsigned int nbits)
  190. {
  191. return bitmap_equal(src1p->bits, src2p->bits, nbits);
  192. }
  193. #define nodes_intersects(src1, src2) \
  194. __nodes_intersects(&(src1), &(src2), MAX_NUMNODES)
  195. static inline bool __nodes_intersects(const nodemask_t *src1p,
  196. const nodemask_t *src2p, unsigned int nbits)
  197. {
  198. return bitmap_intersects(src1p->bits, src2p->bits, nbits);
  199. }
  200. #define nodes_subset(src1, src2) \
  201. __nodes_subset(&(src1), &(src2), MAX_NUMNODES)
  202. static inline bool __nodes_subset(const nodemask_t *src1p,
  203. const nodemask_t *src2p, unsigned int nbits)
  204. {
  205. return bitmap_subset(src1p->bits, src2p->bits, nbits);
  206. }
  207. #define nodes_empty(src) __nodes_empty(&(src), MAX_NUMNODES)
  208. static inline bool __nodes_empty(const nodemask_t *srcp, unsigned int nbits)
  209. {
  210. return bitmap_empty(srcp->bits, nbits);
  211. }
  212. #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES)
  213. static inline bool __nodes_full(const nodemask_t *srcp, unsigned int nbits)
  214. {
  215. return bitmap_full(srcp->bits, nbits);
  216. }
  217. #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES)
  218. static inline int __nodes_weight(const nodemask_t *srcp, unsigned int nbits)
  219. {
  220. return bitmap_weight(srcp->bits, nbits);
  221. }
  222. #define nodes_shift_right(dst, src, n) \
  223. __nodes_shift_right(&(dst), &(src), (n), MAX_NUMNODES)
  224. static inline void __nodes_shift_right(nodemask_t *dstp,
  225. const nodemask_t *srcp, int n, int nbits)
  226. {
  227. bitmap_shift_right(dstp->bits, srcp->bits, n, nbits);
  228. }
  229. #define nodes_shift_left(dst, src, n) \
  230. __nodes_shift_left(&(dst), &(src), (n), MAX_NUMNODES)
  231. static inline void __nodes_shift_left(nodemask_t *dstp,
  232. const nodemask_t *srcp, int n, int nbits)
  233. {
  234. bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
  235. }
  236. /* FIXME: better would be to fix all architectures to never return
  237. > MAX_NUMNODES, then the silly min_ts could be dropped. */
  238. #define first_node(src) __first_node(&(src))
  239. static inline unsigned int __first_node(const nodemask_t *srcp)
  240. {
  241. return min_t(unsigned int, MAX_NUMNODES, find_first_bit(srcp->bits, MAX_NUMNODES));
  242. }
  243. #define next_node(n, src) __next_node((n), &(src))
  244. static inline unsigned int __next_node(int n, const nodemask_t *srcp)
  245. {
  246. return min_t(unsigned int, MAX_NUMNODES, find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
  247. }
  248. /*
  249. * Find the next present node in src, starting after node n, wrapping around to
  250. * the first node in src if needed. Returns MAX_NUMNODES if src is empty.
  251. */
  252. #define next_node_in(n, src) __next_node_in((n), &(src))
  253. static inline unsigned int __next_node_in(int node, const nodemask_t *srcp)
  254. {
  255. unsigned int ret = __next_node(node, srcp);
  256. if (ret == MAX_NUMNODES)
  257. ret = __first_node(srcp);
  258. return ret;
  259. }
  260. static inline void init_nodemask_of_node(nodemask_t *mask, int node)
  261. {
  262. nodes_clear(*mask);
  263. node_set(node, *mask);
  264. }
  265. #define nodemask_of_node(node) \
  266. ({ \
  267. typeof(_unused_nodemask_arg_) m; \
  268. if (sizeof(m) == sizeof(unsigned long)) { \
  269. m.bits[0] = 1UL << (node); \
  270. } else { \
  271. init_nodemask_of_node(&m, (node)); \
  272. } \
  273. m; \
  274. })
  275. #define first_unset_node(mask) __first_unset_node(&(mask))
  276. static inline unsigned int __first_unset_node(const nodemask_t *maskp)
  277. {
  278. return min_t(unsigned int, MAX_NUMNODES,
  279. find_first_zero_bit(maskp->bits, MAX_NUMNODES));
  280. }
  281. #define NODE_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(MAX_NUMNODES)
  282. #if MAX_NUMNODES <= BITS_PER_LONG
  283. #define NODE_MASK_ALL \
  284. ((nodemask_t) { { \
  285. [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \
  286. } })
  287. #else
  288. #define NODE_MASK_ALL \
  289. ((nodemask_t) { { \
  290. [0 ... BITS_TO_LONGS(MAX_NUMNODES)-2] = ~0UL, \
  291. [BITS_TO_LONGS(MAX_NUMNODES)-1] = NODE_MASK_LAST_WORD \
  292. } })
  293. #endif
  294. #define NODE_MASK_NONE \
  295. ((nodemask_t) { { \
  296. [0 ... BITS_TO_LONGS(MAX_NUMNODES)-1] = 0UL \
  297. } })
  298. #define nodes_addr(src) ((src).bits)
  299. #define nodemask_parse_user(ubuf, ulen, dst) \
  300. __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES)
  301. static inline int __nodemask_parse_user(const char __user *buf, int len,
  302. nodemask_t *dstp, int nbits)
  303. {
  304. return bitmap_parse_user(buf, len, dstp->bits, nbits);
  305. }
  306. #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES)
  307. static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits)
  308. {
  309. return bitmap_parselist(buf, dstp->bits, nbits);
  310. }
  311. #define node_remap(oldbit, old, new) \
  312. __node_remap((oldbit), &(old), &(new), MAX_NUMNODES)
  313. static inline int __node_remap(int oldbit,
  314. const nodemask_t *oldp, const nodemask_t *newp, int nbits)
  315. {
  316. return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits);
  317. }
  318. #define nodes_remap(dst, src, old, new) \
  319. __nodes_remap(&(dst), &(src), &(old), &(new), MAX_NUMNODES)
  320. static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
  321. const nodemask_t *oldp, const nodemask_t *newp, int nbits)
  322. {
  323. bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
  324. }
  325. #define nodes_onto(dst, orig, relmap) \
  326. __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
  327. static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
  328. const nodemask_t *relmapp, int nbits)
  329. {
  330. bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
  331. }
  332. #define nodes_fold(dst, orig, sz) \
  333. __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
  334. static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
  335. int sz, int nbits)
  336. {
  337. bitmap_fold(dstp->bits, origp->bits, sz, nbits);
  338. }
  339. #if MAX_NUMNODES > 1
  340. #define for_each_node_mask(node, mask) \
  341. for ((node) = first_node(mask); \
  342. (node >= 0) && (node) < MAX_NUMNODES; \
  343. (node) = next_node((node), (mask)))
  344. #else /* MAX_NUMNODES == 1 */
  345. #define for_each_node_mask(node, mask) \
  346. for ((node) = 0; (node) < 1 && !nodes_empty(mask); (node)++)
  347. #endif /* MAX_NUMNODES */
  348. /*
  349. * Bitmasks that are kept for all the nodes.
  350. */
  351. enum node_states {
  352. N_POSSIBLE, /* The node could become online at some point */
  353. N_ONLINE, /* The node is online */
  354. N_NORMAL_MEMORY, /* The node has regular memory */
  355. #ifdef CONFIG_HIGHMEM
  356. N_HIGH_MEMORY, /* The node has regular or high memory */
  357. #else
  358. N_HIGH_MEMORY = N_NORMAL_MEMORY,
  359. #endif
  360. N_MEMORY, /* The node has memory(regular, high, movable) */
  361. N_CPU, /* The node has one or more cpus */
  362. N_GENERIC_INITIATOR, /* The node has one or more Generic Initiators */
  363. NR_NODE_STATES
  364. };
  365. /*
  366. * The following particular system nodemasks and operations
  367. * on them manage all possible and online nodes.
  368. */
  369. extern nodemask_t node_states[NR_NODE_STATES];
  370. #if MAX_NUMNODES > 1
  371. static inline int node_state(int node, enum node_states state)
  372. {
  373. return node_isset(node, node_states[state]);
  374. }
  375. static inline void node_set_state(int node, enum node_states state)
  376. {
  377. __node_set(node, &node_states[state]);
  378. }
  379. static inline void node_clear_state(int node, enum node_states state)
  380. {
  381. __node_clear(node, &node_states[state]);
  382. }
  383. static inline int num_node_state(enum node_states state)
  384. {
  385. return nodes_weight(node_states[state]);
  386. }
  387. #define for_each_node_state(__node, __state) \
  388. for_each_node_mask((__node), node_states[__state])
  389. #define first_online_node first_node(node_states[N_ONLINE])
  390. #define first_memory_node first_node(node_states[N_MEMORY])
  391. static inline unsigned int next_online_node(int nid)
  392. {
  393. return next_node(nid, node_states[N_ONLINE]);
  394. }
  395. static inline unsigned int next_memory_node(int nid)
  396. {
  397. return next_node(nid, node_states[N_MEMORY]);
  398. }
  399. extern unsigned int nr_node_ids;
  400. extern unsigned int nr_online_nodes;
  401. static inline void node_set_online(int nid)
  402. {
  403. node_set_state(nid, N_ONLINE);
  404. nr_online_nodes = num_node_state(N_ONLINE);
  405. }
  406. static inline void node_set_offline(int nid)
  407. {
  408. node_clear_state(nid, N_ONLINE);
  409. nr_online_nodes = num_node_state(N_ONLINE);
  410. }
  411. #else
  412. static inline int node_state(int node, enum node_states state)
  413. {
  414. return node == 0;
  415. }
  416. static inline void node_set_state(int node, enum node_states state)
  417. {
  418. }
  419. static inline void node_clear_state(int node, enum node_states state)
  420. {
  421. }
  422. static inline int num_node_state(enum node_states state)
  423. {
  424. return 1;
  425. }
  426. #define for_each_node_state(node, __state) \
  427. for ( (node) = 0; (node) == 0; (node) = 1)
  428. #define first_online_node 0
  429. #define first_memory_node 0
  430. #define next_online_node(nid) (MAX_NUMNODES)
  431. #define next_memory_node(nid) (MAX_NUMNODES)
  432. #define nr_node_ids 1U
  433. #define nr_online_nodes 1U
  434. #define node_set_online(node) node_set_state((node), N_ONLINE)
  435. #define node_set_offline(node) node_clear_state((node), N_ONLINE)
  436. #endif
  437. static inline int node_random(const nodemask_t *maskp)
  438. {
  439. #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
  440. int w, bit;
  441. w = nodes_weight(*maskp);
  442. switch (w) {
  443. case 0:
  444. bit = NUMA_NO_NODE;
  445. break;
  446. case 1:
  447. bit = first_node(*maskp);
  448. break;
  449. default:
  450. bit = find_nth_bit(maskp->bits, MAX_NUMNODES, prandom_u32_max(w));
  451. break;
  452. }
  453. return bit;
  454. #else
  455. return 0;
  456. #endif
  457. }
  458. #define node_online_map node_states[N_ONLINE]
  459. #define node_possible_map node_states[N_POSSIBLE]
  460. #define num_online_nodes() num_node_state(N_ONLINE)
  461. #define num_possible_nodes() num_node_state(N_POSSIBLE)
  462. #define node_online(node) node_state((node), N_ONLINE)
  463. #define node_possible(node) node_state((node), N_POSSIBLE)
  464. #define for_each_node(node) for_each_node_state(node, N_POSSIBLE)
  465. #define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
  466. /*
  467. * For nodemask scratch area.
  468. * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
  469. * name.
  470. */
  471. #if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */
  472. #define NODEMASK_ALLOC(type, name, gfp_flags) \
  473. type *name = kmalloc(sizeof(*name), gfp_flags)
  474. #define NODEMASK_FREE(m) kfree(m)
  475. #else
  476. #define NODEMASK_ALLOC(type, name, gfp_flags) type _##name, *name = &_##name
  477. #define NODEMASK_FREE(m) do {} while (0)
  478. #endif
  479. /* Example structure for using NODEMASK_ALLOC, used in mempolicy. */
  480. struct nodemask_scratch {
  481. nodemask_t mask1;
  482. nodemask_t mask2;
  483. };
  484. #define NODEMASK_SCRATCH(x) \
  485. NODEMASK_ALLOC(struct nodemask_scratch, x, \
  486. GFP_KERNEL | __GFP_NORETRY)
  487. #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
  488. #endif /* __LINUX_NODEMASK_H */