cmm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Collaborative memory management interface.
  4. *
  5. * Copyright (C) 2008 IBM Corporation
  6. * Author(s): Brian King ([email protected]),
  7. */
  8. #include <linux/ctype.h>
  9. #include <linux/delay.h>
  10. #include <linux/errno.h>
  11. #include <linux/fs.h>
  12. #include <linux/gfp.h>
  13. #include <linux/kthread.h>
  14. #include <linux/module.h>
  15. #include <linux/oom.h>
  16. #include <linux/reboot.h>
  17. #include <linux/sched.h>
  18. #include <linux/stringify.h>
  19. #include <linux/swap.h>
  20. #include <linux/device.h>
  21. #include <linux/balloon_compaction.h>
  22. #include <asm/firmware.h>
  23. #include <asm/hvcall.h>
  24. #include <asm/mmu.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/memory.h>
  27. #include <asm/plpar_wrappers.h>
  28. #include "pseries.h"
  29. #define CMM_DRIVER_VERSION "1.0.0"
  30. #define CMM_DEFAULT_DELAY 1
  31. #define CMM_HOTPLUG_DELAY 5
  32. #define CMM_DEBUG 0
  33. #define CMM_DISABLE 0
  34. #define CMM_OOM_KB 1024
  35. #define CMM_MIN_MEM_MB 256
  36. #define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
  37. #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
  38. #define CMM_MEM_HOTPLUG_PRI 1
  39. static unsigned int delay = CMM_DEFAULT_DELAY;
  40. static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY;
  41. static unsigned int oom_kb = CMM_OOM_KB;
  42. static unsigned int cmm_debug = CMM_DEBUG;
  43. static unsigned int cmm_disabled = CMM_DISABLE;
  44. static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
  45. static bool __read_mostly simulate;
  46. static unsigned long simulate_loan_target_kb;
  47. static struct device cmm_dev;
  48. MODULE_AUTHOR("Brian King <[email protected]>");
  49. MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
  50. MODULE_LICENSE("GPL");
  51. MODULE_VERSION(CMM_DRIVER_VERSION);
  52. module_param_named(delay, delay, uint, 0644);
  53. MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. "
  54. "[Default=" __stringify(CMM_DEFAULT_DELAY) "]");
  55. module_param_named(hotplug_delay, hotplug_delay, uint, 0644);
  56. MODULE_PARM_DESC(hotplug_delay, "Delay (in seconds) after memory hotplug remove "
  57. "before loaning resumes. "
  58. "[Default=" __stringify(CMM_HOTPLUG_DELAY) "]");
  59. module_param_named(oom_kb, oom_kb, uint, 0644);
  60. MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. "
  61. "[Default=" __stringify(CMM_OOM_KB) "]");
  62. module_param_named(min_mem_mb, min_mem_mb, ulong, 0644);
  63. MODULE_PARM_DESC(min_mem_mb, "Minimum amount of memory (in MB) to not balloon. "
  64. "[Default=" __stringify(CMM_MIN_MEM_MB) "]");
  65. module_param_named(debug, cmm_debug, uint, 0644);
  66. MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
  67. "[Default=" __stringify(CMM_DEBUG) "]");
  68. module_param_named(simulate, simulate, bool, 0444);
  69. MODULE_PARM_DESC(simulate, "Enable simulation mode (no communication with hw).");
  70. #define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
  71. static atomic_long_t loaned_pages;
  72. static unsigned long loaned_pages_target;
  73. static unsigned long oom_freed_pages;
  74. static DEFINE_MUTEX(hotplug_mutex);
  75. static int hotplug_occurred; /* protected by the hotplug mutex */
  76. static struct task_struct *cmm_thread_ptr;
  77. static struct balloon_dev_info b_dev_info;
  78. static long plpar_page_set_loaned(struct page *page)
  79. {
  80. const unsigned long vpa = page_to_phys(page);
  81. unsigned long cmo_page_sz = cmo_get_page_size();
  82. long rc = 0;
  83. int i;
  84. if (unlikely(simulate))
  85. return 0;
  86. for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
  87. rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
  88. for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
  89. plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
  90. vpa + i - cmo_page_sz, 0);
  91. return rc;
  92. }
  93. static long plpar_page_set_active(struct page *page)
  94. {
  95. const unsigned long vpa = page_to_phys(page);
  96. unsigned long cmo_page_sz = cmo_get_page_size();
  97. long rc = 0;
  98. int i;
  99. if (unlikely(simulate))
  100. return 0;
  101. for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
  102. rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
  103. for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
  104. plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
  105. vpa + i - cmo_page_sz, 0);
  106. return rc;
  107. }
  108. /**
  109. * cmm_alloc_pages - Allocate pages and mark them as loaned
  110. * @nr: number of pages to allocate
  111. *
  112. * Return value:
  113. * number of pages requested to be allocated which were not
  114. **/
  115. static long cmm_alloc_pages(long nr)
  116. {
  117. struct page *page;
  118. long rc;
  119. cmm_dbg("Begin request for %ld pages\n", nr);
  120. while (nr) {
  121. /* Exit if a hotplug operation is in progress or occurred */
  122. if (mutex_trylock(&hotplug_mutex)) {
  123. if (hotplug_occurred) {
  124. mutex_unlock(&hotplug_mutex);
  125. break;
  126. }
  127. mutex_unlock(&hotplug_mutex);
  128. } else {
  129. break;
  130. }
  131. page = balloon_page_alloc();
  132. if (!page)
  133. break;
  134. rc = plpar_page_set_loaned(page);
  135. if (rc) {
  136. pr_err("%s: Can not set page to loaned. rc=%ld\n", __func__, rc);
  137. __free_page(page);
  138. break;
  139. }
  140. balloon_page_enqueue(&b_dev_info, page);
  141. atomic_long_inc(&loaned_pages);
  142. adjust_managed_page_count(page, -1);
  143. nr--;
  144. }
  145. cmm_dbg("End request with %ld pages unfulfilled\n", nr);
  146. return nr;
  147. }
  148. /**
  149. * cmm_free_pages - Free pages and mark them as active
  150. * @nr: number of pages to free
  151. *
  152. * Return value:
  153. * number of pages requested to be freed which were not
  154. **/
  155. static long cmm_free_pages(long nr)
  156. {
  157. struct page *page;
  158. cmm_dbg("Begin free of %ld pages.\n", nr);
  159. while (nr) {
  160. page = balloon_page_dequeue(&b_dev_info);
  161. if (!page)
  162. break;
  163. plpar_page_set_active(page);
  164. adjust_managed_page_count(page, 1);
  165. __free_page(page);
  166. atomic_long_dec(&loaned_pages);
  167. nr--;
  168. }
  169. cmm_dbg("End request with %ld pages unfulfilled\n", nr);
  170. return nr;
  171. }
  172. /**
  173. * cmm_oom_notify - OOM notifier
  174. * @self: notifier block struct
  175. * @dummy: not used
  176. * @parm: returned - number of pages freed
  177. *
  178. * Return value:
  179. * NOTIFY_OK
  180. **/
  181. static int cmm_oom_notify(struct notifier_block *self,
  182. unsigned long dummy, void *parm)
  183. {
  184. unsigned long *freed = parm;
  185. long nr = KB2PAGES(oom_kb);
  186. cmm_dbg("OOM processing started\n");
  187. nr = cmm_free_pages(nr);
  188. loaned_pages_target = atomic_long_read(&loaned_pages);
  189. *freed += KB2PAGES(oom_kb) - nr;
  190. oom_freed_pages += KB2PAGES(oom_kb) - nr;
  191. cmm_dbg("OOM processing complete\n");
  192. return NOTIFY_OK;
  193. }
  194. /**
  195. * cmm_get_mpp - Read memory performance parameters
  196. *
  197. * Makes hcall to query the current page loan request from the hypervisor.
  198. *
  199. * Return value:
  200. * nothing
  201. **/
  202. static void cmm_get_mpp(void)
  203. {
  204. const long __loaned_pages = atomic_long_read(&loaned_pages);
  205. const long total_pages = totalram_pages() + __loaned_pages;
  206. int rc;
  207. struct hvcall_mpp_data mpp_data;
  208. signed long active_pages_target, page_loan_request, target;
  209. signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
  210. if (likely(!simulate)) {
  211. rc = h_get_mpp(&mpp_data);
  212. if (rc != H_SUCCESS)
  213. return;
  214. page_loan_request = div_s64((s64)mpp_data.loan_request,
  215. PAGE_SIZE);
  216. target = page_loan_request + __loaned_pages;
  217. } else {
  218. target = KB2PAGES(simulate_loan_target_kb);
  219. page_loan_request = target - __loaned_pages;
  220. }
  221. if (target < 0 || total_pages < min_mem_pages)
  222. target = 0;
  223. if (target > oom_freed_pages)
  224. target -= oom_freed_pages;
  225. else
  226. target = 0;
  227. active_pages_target = total_pages - target;
  228. if (min_mem_pages > active_pages_target)
  229. target = total_pages - min_mem_pages;
  230. if (target < 0)
  231. target = 0;
  232. loaned_pages_target = target;
  233. cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
  234. page_loan_request, __loaned_pages, loaned_pages_target,
  235. oom_freed_pages, totalram_pages());
  236. }
  237. static struct notifier_block cmm_oom_nb = {
  238. .notifier_call = cmm_oom_notify
  239. };
  240. /**
  241. * cmm_thread - CMM task thread
  242. * @dummy: not used
  243. *
  244. * Return value:
  245. * 0
  246. **/
  247. static int cmm_thread(void *dummy)
  248. {
  249. unsigned long timeleft;
  250. long __loaned_pages;
  251. while (1) {
  252. timeleft = msleep_interruptible(delay * 1000);
  253. if (kthread_should_stop() || timeleft)
  254. break;
  255. if (mutex_trylock(&hotplug_mutex)) {
  256. if (hotplug_occurred) {
  257. hotplug_occurred = 0;
  258. mutex_unlock(&hotplug_mutex);
  259. cmm_dbg("Hotplug operation has occurred, "
  260. "loaning activity suspended "
  261. "for %d seconds.\n",
  262. hotplug_delay);
  263. timeleft = msleep_interruptible(hotplug_delay *
  264. 1000);
  265. if (kthread_should_stop() || timeleft)
  266. break;
  267. continue;
  268. }
  269. mutex_unlock(&hotplug_mutex);
  270. } else {
  271. cmm_dbg("Hotplug operation in progress, activity "
  272. "suspended\n");
  273. continue;
  274. }
  275. cmm_get_mpp();
  276. __loaned_pages = atomic_long_read(&loaned_pages);
  277. if (loaned_pages_target > __loaned_pages) {
  278. if (cmm_alloc_pages(loaned_pages_target - __loaned_pages))
  279. loaned_pages_target = __loaned_pages;
  280. } else if (loaned_pages_target < __loaned_pages)
  281. cmm_free_pages(__loaned_pages - loaned_pages_target);
  282. }
  283. return 0;
  284. }
  285. #define CMM_SHOW(name, format, args...) \
  286. static ssize_t show_##name(struct device *dev, \
  287. struct device_attribute *attr, \
  288. char *buf) \
  289. { \
  290. return sprintf(buf, format, ##args); \
  291. } \
  292. static DEVICE_ATTR(name, 0444, show_##name, NULL)
  293. CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(atomic_long_read(&loaned_pages)));
  294. CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
  295. static ssize_t show_oom_pages(struct device *dev,
  296. struct device_attribute *attr, char *buf)
  297. {
  298. return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
  299. }
  300. static ssize_t store_oom_pages(struct device *dev,
  301. struct device_attribute *attr,
  302. const char *buf, size_t count)
  303. {
  304. unsigned long val = simple_strtoul (buf, NULL, 10);
  305. if (!capable(CAP_SYS_ADMIN))
  306. return -EPERM;
  307. if (val != 0)
  308. return -EBADMSG;
  309. oom_freed_pages = 0;
  310. return count;
  311. }
  312. static DEVICE_ATTR(oom_freed_kb, 0644,
  313. show_oom_pages, store_oom_pages);
  314. static struct device_attribute *cmm_attrs[] = {
  315. &dev_attr_loaned_kb,
  316. &dev_attr_loaned_target_kb,
  317. &dev_attr_oom_freed_kb,
  318. };
  319. static DEVICE_ULONG_ATTR(simulate_loan_target_kb, 0644,
  320. simulate_loan_target_kb);
  321. static struct bus_type cmm_subsys = {
  322. .name = "cmm",
  323. .dev_name = "cmm",
  324. };
  325. static void cmm_release_device(struct device *dev)
  326. {
  327. }
  328. /**
  329. * cmm_sysfs_register - Register with sysfs
  330. *
  331. * Return value:
  332. * 0 on success / other on failure
  333. **/
  334. static int cmm_sysfs_register(struct device *dev)
  335. {
  336. int i, rc;
  337. if ((rc = subsys_system_register(&cmm_subsys, NULL)))
  338. return rc;
  339. dev->id = 0;
  340. dev->bus = &cmm_subsys;
  341. dev->release = cmm_release_device;
  342. if ((rc = device_register(dev)))
  343. goto subsys_unregister;
  344. for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
  345. if ((rc = device_create_file(dev, cmm_attrs[i])))
  346. goto fail;
  347. }
  348. if (!simulate)
  349. return 0;
  350. rc = device_create_file(dev, &dev_attr_simulate_loan_target_kb.attr);
  351. if (rc)
  352. goto fail;
  353. return 0;
  354. fail:
  355. while (--i >= 0)
  356. device_remove_file(dev, cmm_attrs[i]);
  357. device_unregister(dev);
  358. subsys_unregister:
  359. bus_unregister(&cmm_subsys);
  360. return rc;
  361. }
  362. /**
  363. * cmm_unregister_sysfs - Unregister from sysfs
  364. *
  365. **/
  366. static void cmm_unregister_sysfs(struct device *dev)
  367. {
  368. int i;
  369. for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
  370. device_remove_file(dev, cmm_attrs[i]);
  371. device_unregister(dev);
  372. bus_unregister(&cmm_subsys);
  373. }
  374. /**
  375. * cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
  376. *
  377. **/
  378. static int cmm_reboot_notifier(struct notifier_block *nb,
  379. unsigned long action, void *unused)
  380. {
  381. if (action == SYS_RESTART) {
  382. if (cmm_thread_ptr)
  383. kthread_stop(cmm_thread_ptr);
  384. cmm_thread_ptr = NULL;
  385. cmm_free_pages(atomic_long_read(&loaned_pages));
  386. }
  387. return NOTIFY_DONE;
  388. }
  389. static struct notifier_block cmm_reboot_nb = {
  390. .notifier_call = cmm_reboot_notifier,
  391. };
  392. /**
  393. * cmm_memory_cb - Handle memory hotplug notifier calls
  394. * @self: notifier block struct
  395. * @action: action to take
  396. * @arg: struct memory_notify data for handler
  397. *
  398. * Return value:
  399. * NOTIFY_OK or notifier error based on subfunction return value
  400. *
  401. **/
  402. static int cmm_memory_cb(struct notifier_block *self,
  403. unsigned long action, void *arg)
  404. {
  405. switch (action) {
  406. case MEM_GOING_OFFLINE:
  407. mutex_lock(&hotplug_mutex);
  408. hotplug_occurred = 1;
  409. break;
  410. case MEM_OFFLINE:
  411. case MEM_CANCEL_OFFLINE:
  412. mutex_unlock(&hotplug_mutex);
  413. cmm_dbg("Memory offline operation complete.\n");
  414. break;
  415. case MEM_GOING_ONLINE:
  416. case MEM_ONLINE:
  417. case MEM_CANCEL_ONLINE:
  418. break;
  419. }
  420. return NOTIFY_OK;
  421. }
  422. static struct notifier_block cmm_mem_nb = {
  423. .notifier_call = cmm_memory_cb,
  424. .priority = CMM_MEM_HOTPLUG_PRI
  425. };
  426. #ifdef CONFIG_BALLOON_COMPACTION
  427. static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
  428. struct page *newpage, struct page *page,
  429. enum migrate_mode mode)
  430. {
  431. unsigned long flags;
  432. /*
  433. * loan/"inflate" the newpage first.
  434. *
  435. * We might race against the cmm_thread who might discover after our
  436. * loan request that another page is to be unloaned. However, once
  437. * the cmm_thread runs again later, this error will automatically
  438. * be corrected.
  439. */
  440. if (plpar_page_set_loaned(newpage)) {
  441. /* Unlikely, but possible. Tell the caller not to retry now. */
  442. pr_err_ratelimited("%s: Cannot set page to loaned.", __func__);
  443. return -EBUSY;
  444. }
  445. /* balloon page list reference */
  446. get_page(newpage);
  447. /*
  448. * When we migrate a page to a different zone, we have to fixup the
  449. * count of both involved zones as we adjusted the managed page count
  450. * when inflating.
  451. */
  452. if (page_zone(page) != page_zone(newpage)) {
  453. adjust_managed_page_count(page, 1);
  454. adjust_managed_page_count(newpage, -1);
  455. }
  456. spin_lock_irqsave(&b_dev_info->pages_lock, flags);
  457. balloon_page_insert(b_dev_info, newpage);
  458. balloon_page_delete(page);
  459. b_dev_info->isolated_pages--;
  460. spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
  461. /*
  462. * activate/"deflate" the old page. We ignore any errors just like the
  463. * other callers.
  464. */
  465. plpar_page_set_active(page);
  466. /* balloon page list reference */
  467. put_page(page);
  468. return MIGRATEPAGE_SUCCESS;
  469. }
  470. static void cmm_balloon_compaction_init(void)
  471. {
  472. balloon_devinfo_init(&b_dev_info);
  473. b_dev_info.migratepage = cmm_migratepage;
  474. }
  475. #else /* CONFIG_BALLOON_COMPACTION */
  476. static void cmm_balloon_compaction_init(void)
  477. {
  478. }
  479. #endif /* CONFIG_BALLOON_COMPACTION */
  480. /**
  481. * cmm_init - Module initialization
  482. *
  483. * Return value:
  484. * 0 on success / other on failure
  485. **/
  486. static int cmm_init(void)
  487. {
  488. int rc;
  489. if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
  490. return -EOPNOTSUPP;
  491. cmm_balloon_compaction_init();
  492. rc = register_oom_notifier(&cmm_oom_nb);
  493. if (rc < 0)
  494. goto out_balloon_compaction;
  495. if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
  496. goto out_oom_notifier;
  497. if ((rc = cmm_sysfs_register(&cmm_dev)))
  498. goto out_reboot_notifier;
  499. rc = register_memory_notifier(&cmm_mem_nb);
  500. if (rc)
  501. goto out_unregister_notifier;
  502. if (cmm_disabled)
  503. return 0;
  504. cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
  505. if (IS_ERR(cmm_thread_ptr)) {
  506. rc = PTR_ERR(cmm_thread_ptr);
  507. goto out_unregister_notifier;
  508. }
  509. return 0;
  510. out_unregister_notifier:
  511. unregister_memory_notifier(&cmm_mem_nb);
  512. cmm_unregister_sysfs(&cmm_dev);
  513. out_reboot_notifier:
  514. unregister_reboot_notifier(&cmm_reboot_nb);
  515. out_oom_notifier:
  516. unregister_oom_notifier(&cmm_oom_nb);
  517. out_balloon_compaction:
  518. return rc;
  519. }
  520. /**
  521. * cmm_exit - Module exit
  522. *
  523. * Return value:
  524. * nothing
  525. **/
  526. static void cmm_exit(void)
  527. {
  528. if (cmm_thread_ptr)
  529. kthread_stop(cmm_thread_ptr);
  530. unregister_oom_notifier(&cmm_oom_nb);
  531. unregister_reboot_notifier(&cmm_reboot_nb);
  532. unregister_memory_notifier(&cmm_mem_nb);
  533. cmm_free_pages(atomic_long_read(&loaned_pages));
  534. cmm_unregister_sysfs(&cmm_dev);
  535. }
  536. /**
  537. * cmm_set_disable - Disable/Enable CMM
  538. *
  539. * Return value:
  540. * 0 on success / other on failure
  541. **/
  542. static int cmm_set_disable(const char *val, const struct kernel_param *kp)
  543. {
  544. int disable = simple_strtoul(val, NULL, 10);
  545. if (disable != 0 && disable != 1)
  546. return -EINVAL;
  547. if (disable && !cmm_disabled) {
  548. if (cmm_thread_ptr)
  549. kthread_stop(cmm_thread_ptr);
  550. cmm_thread_ptr = NULL;
  551. cmm_free_pages(atomic_long_read(&loaned_pages));
  552. } else if (!disable && cmm_disabled) {
  553. cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
  554. if (IS_ERR(cmm_thread_ptr))
  555. return PTR_ERR(cmm_thread_ptr);
  556. }
  557. cmm_disabled = disable;
  558. return 0;
  559. }
  560. module_param_call(disable, cmm_set_disable, param_get_uint,
  561. &cmm_disabled, 0644);
  562. MODULE_PARM_DESC(disable, "Disable CMM. Set to 1 to disable. "
  563. "[Default=" __stringify(CMM_DISABLE) "]");
  564. module_init(cmm_init);
  565. module_exit(cmm_exit);