reboot.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/kernel/reboot.c
  4. *
  5. * Copyright (C) 2013 Linus Torvalds
  6. */
  7. #define pr_fmt(fmt) "reboot: " fmt
  8. #include <linux/atomic.h>
  9. #include <linux/ctype.h>
  10. #include <linux/export.h>
  11. #include <linux/kexec.h>
  12. #include <linux/kmod.h>
  13. #include <linux/kmsg_dump.h>
  14. #include <linux/reboot.h>
  15. #include <linux/suspend.h>
  16. #include <linux/syscalls.h>
  17. #include <linux/syscore_ops.h>
  18. #include <linux/uaccess.h>
  19. /*
  20. * this indicates whether you can reboot with ctrl-alt-del: the default is yes
  21. */
  22. static int C_A_D = 1;
  23. struct pid *cad_pid;
  24. EXPORT_SYMBOL(cad_pid);
  25. #if defined(CONFIG_ARM)
  26. #define DEFAULT_REBOOT_MODE = REBOOT_HARD
  27. #else
  28. #define DEFAULT_REBOOT_MODE
  29. #endif
  30. enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
  31. EXPORT_SYMBOL_GPL(reboot_mode);
  32. enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
  33. EXPORT_SYMBOL_GPL(panic_reboot_mode);
  34. /*
  35. * This variable is used privately to keep track of whether or not
  36. * reboot_type is still set to its default value (i.e., reboot= hasn't
  37. * been set on the command line). This is needed so that we can
  38. * suppress DMI scanning for reboot quirks. Without it, it's
  39. * impossible to override a faulty reboot quirk without recompiling.
  40. */
  41. int reboot_default = 1;
  42. int reboot_cpu;
  43. enum reboot_type reboot_type = BOOT_ACPI;
  44. int reboot_force;
  45. struct sys_off_handler {
  46. struct notifier_block nb;
  47. int (*sys_off_cb)(struct sys_off_data *data);
  48. void *cb_data;
  49. enum sys_off_mode mode;
  50. bool blocking;
  51. void *list;
  52. };
  53. /*
  54. * Temporary stub that prevents linkage failure while we're in process
  55. * of removing all uses of legacy pm_power_off() around the kernel.
  56. */
  57. void __weak (*pm_power_off)(void);
  58. /**
  59. * emergency_restart - reboot the system
  60. *
  61. * Without shutting down any hardware or taking any locks
  62. * reboot the system. This is called when we know we are in
  63. * trouble so this is our best effort to reboot. This is
  64. * safe to call in interrupt context.
  65. */
  66. void emergency_restart(void)
  67. {
  68. kmsg_dump(KMSG_DUMP_EMERG);
  69. system_state = SYSTEM_RESTART;
  70. machine_emergency_restart();
  71. }
  72. EXPORT_SYMBOL_GPL(emergency_restart);
  73. void kernel_restart_prepare(char *cmd)
  74. {
  75. blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
  76. system_state = SYSTEM_RESTART;
  77. usermodehelper_disable();
  78. device_shutdown();
  79. }
  80. /**
  81. * register_reboot_notifier - Register function to be called at reboot time
  82. * @nb: Info about notifier function to be called
  83. *
  84. * Registers a function with the list of functions
  85. * to be called at reboot time.
  86. *
  87. * Currently always returns zero, as blocking_notifier_chain_register()
  88. * always returns zero.
  89. */
  90. int register_reboot_notifier(struct notifier_block *nb)
  91. {
  92. return blocking_notifier_chain_register(&reboot_notifier_list, nb);
  93. }
  94. EXPORT_SYMBOL(register_reboot_notifier);
  95. /**
  96. * unregister_reboot_notifier - Unregister previously registered reboot notifier
  97. * @nb: Hook to be unregistered
  98. *
  99. * Unregisters a previously registered reboot
  100. * notifier function.
  101. *
  102. * Returns zero on success, or %-ENOENT on failure.
  103. */
  104. int unregister_reboot_notifier(struct notifier_block *nb)
  105. {
  106. return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
  107. }
  108. EXPORT_SYMBOL(unregister_reboot_notifier);
  109. static void devm_unregister_reboot_notifier(struct device *dev, void *res)
  110. {
  111. WARN_ON(unregister_reboot_notifier(*(struct notifier_block **)res));
  112. }
  113. int devm_register_reboot_notifier(struct device *dev, struct notifier_block *nb)
  114. {
  115. struct notifier_block **rcnb;
  116. int ret;
  117. rcnb = devres_alloc(devm_unregister_reboot_notifier,
  118. sizeof(*rcnb), GFP_KERNEL);
  119. if (!rcnb)
  120. return -ENOMEM;
  121. ret = register_reboot_notifier(nb);
  122. if (!ret) {
  123. *rcnb = nb;
  124. devres_add(dev, rcnb);
  125. } else {
  126. devres_free(rcnb);
  127. }
  128. return ret;
  129. }
  130. EXPORT_SYMBOL(devm_register_reboot_notifier);
  131. /*
  132. * Notifier list for kernel code which wants to be called
  133. * to restart the system.
  134. */
  135. static ATOMIC_NOTIFIER_HEAD(restart_handler_list);
  136. /**
  137. * register_restart_handler - Register function to be called to reset
  138. * the system
  139. * @nb: Info about handler function to be called
  140. * @nb->priority: Handler priority. Handlers should follow the
  141. * following guidelines for setting priorities.
  142. * 0: Restart handler of last resort,
  143. * with limited restart capabilities
  144. * 128: Default restart handler; use if no other
  145. * restart handler is expected to be available,
  146. * and/or if restart functionality is
  147. * sufficient to restart the entire system
  148. * 255: Highest priority restart handler, will
  149. * preempt all other restart handlers
  150. *
  151. * Registers a function with code to be called to restart the
  152. * system.
  153. *
  154. * Registered functions will be called from machine_restart as last
  155. * step of the restart sequence (if the architecture specific
  156. * machine_restart function calls do_kernel_restart - see below
  157. * for details).
  158. * Registered functions are expected to restart the system immediately.
  159. * If more than one function is registered, the restart handler priority
  160. * selects which function will be called first.
  161. *
  162. * Restart handlers are expected to be registered from non-architecture
  163. * code, typically from drivers. A typical use case would be a system
  164. * where restart functionality is provided through a watchdog. Multiple
  165. * restart handlers may exist; for example, one restart handler might
  166. * restart the entire system, while another only restarts the CPU.
  167. * In such cases, the restart handler which only restarts part of the
  168. * hardware is expected to register with low priority to ensure that
  169. * it only runs if no other means to restart the system is available.
  170. *
  171. * Currently always returns zero, as atomic_notifier_chain_register()
  172. * always returns zero.
  173. */
  174. int register_restart_handler(struct notifier_block *nb)
  175. {
  176. return atomic_notifier_chain_register(&restart_handler_list, nb);
  177. }
  178. EXPORT_SYMBOL(register_restart_handler);
  179. /**
  180. * unregister_restart_handler - Unregister previously registered
  181. * restart handler
  182. * @nb: Hook to be unregistered
  183. *
  184. * Unregisters a previously registered restart handler function.
  185. *
  186. * Returns zero on success, or %-ENOENT on failure.
  187. */
  188. int unregister_restart_handler(struct notifier_block *nb)
  189. {
  190. return atomic_notifier_chain_unregister(&restart_handler_list, nb);
  191. }
  192. EXPORT_SYMBOL(unregister_restart_handler);
  193. /**
  194. * do_kernel_restart - Execute kernel restart handler call chain
  195. *
  196. * Calls functions registered with register_restart_handler.
  197. *
  198. * Expected to be called from machine_restart as last step of the restart
  199. * sequence.
  200. *
  201. * Restarts the system immediately if a restart handler function has been
  202. * registered. Otherwise does nothing.
  203. */
  204. void do_kernel_restart(char *cmd)
  205. {
  206. atomic_notifier_call_chain(&restart_handler_list, reboot_mode, cmd);
  207. }
  208. void migrate_to_reboot_cpu(void)
  209. {
  210. /* The boot cpu is always logical cpu 0 */
  211. int cpu = reboot_cpu;
  212. cpu_hotplug_disable();
  213. /* Make certain the cpu I'm about to reboot on is online */
  214. if (!cpu_online(cpu))
  215. cpu = cpumask_first(cpu_online_mask);
  216. /* Prevent races with other tasks migrating this task */
  217. current->flags |= PF_NO_SETAFFINITY;
  218. /* Make certain I only run on the appropriate processor */
  219. set_cpus_allowed_ptr(current, cpumask_of(cpu));
  220. }
  221. /*
  222. * Notifier list for kernel code which wants to be called
  223. * to prepare system for restart.
  224. */
  225. static BLOCKING_NOTIFIER_HEAD(restart_prep_handler_list);
  226. static void do_kernel_restart_prepare(void)
  227. {
  228. blocking_notifier_call_chain(&restart_prep_handler_list, 0, NULL);
  229. }
  230. /**
  231. * kernel_restart - reboot the system
  232. * @cmd: pointer to buffer containing command to execute for restart
  233. * or %NULL
  234. *
  235. * Shutdown everything and perform a clean reboot.
  236. * This is not safe to call in interrupt context.
  237. */
  238. void kernel_restart(char *cmd)
  239. {
  240. kernel_restart_prepare(cmd);
  241. do_kernel_restart_prepare();
  242. migrate_to_reboot_cpu();
  243. syscore_shutdown();
  244. if (!cmd)
  245. pr_emerg("Restarting system\n");
  246. else
  247. pr_emerg("Restarting system with command '%s'\n", cmd);
  248. kmsg_dump(KMSG_DUMP_SHUTDOWN);
  249. machine_restart(cmd);
  250. }
  251. EXPORT_SYMBOL_GPL(kernel_restart);
  252. static void kernel_shutdown_prepare(enum system_states state)
  253. {
  254. blocking_notifier_call_chain(&reboot_notifier_list,
  255. (state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL);
  256. system_state = state;
  257. usermodehelper_disable();
  258. device_shutdown();
  259. }
  260. /**
  261. * kernel_halt - halt the system
  262. *
  263. * Shutdown everything and perform a clean system halt.
  264. */
  265. void kernel_halt(void)
  266. {
  267. kernel_shutdown_prepare(SYSTEM_HALT);
  268. migrate_to_reboot_cpu();
  269. syscore_shutdown();
  270. pr_emerg("System halted\n");
  271. kmsg_dump(KMSG_DUMP_SHUTDOWN);
  272. machine_halt();
  273. }
  274. EXPORT_SYMBOL_GPL(kernel_halt);
  275. /*
  276. * Notifier list for kernel code which wants to be called
  277. * to prepare system for power off.
  278. */
  279. static BLOCKING_NOTIFIER_HEAD(power_off_prep_handler_list);
  280. /*
  281. * Notifier list for kernel code which wants to be called
  282. * to power off system.
  283. */
  284. static ATOMIC_NOTIFIER_HEAD(power_off_handler_list);
  285. static int sys_off_notify(struct notifier_block *nb,
  286. unsigned long mode, void *cmd)
  287. {
  288. struct sys_off_handler *handler;
  289. struct sys_off_data data = {};
  290. handler = container_of(nb, struct sys_off_handler, nb);
  291. data.cb_data = handler->cb_data;
  292. data.mode = mode;
  293. data.cmd = cmd;
  294. return handler->sys_off_cb(&data);
  295. }
  296. static struct sys_off_handler platform_sys_off_handler;
  297. static struct sys_off_handler *alloc_sys_off_handler(int priority)
  298. {
  299. struct sys_off_handler *handler;
  300. gfp_t flags;
  301. /*
  302. * Platforms like m68k can't allocate sys_off handler dynamically
  303. * at the early boot time because memory allocator isn't available yet.
  304. */
  305. if (priority == SYS_OFF_PRIO_PLATFORM) {
  306. handler = &platform_sys_off_handler;
  307. if (handler->cb_data)
  308. return ERR_PTR(-EBUSY);
  309. } else {
  310. if (system_state > SYSTEM_RUNNING)
  311. flags = GFP_ATOMIC;
  312. else
  313. flags = GFP_KERNEL;
  314. handler = kzalloc(sizeof(*handler), flags);
  315. if (!handler)
  316. return ERR_PTR(-ENOMEM);
  317. }
  318. return handler;
  319. }
  320. static void free_sys_off_handler(struct sys_off_handler *handler)
  321. {
  322. if (handler == &platform_sys_off_handler)
  323. memset(handler, 0, sizeof(*handler));
  324. else
  325. kfree(handler);
  326. }
  327. /**
  328. * register_sys_off_handler - Register sys-off handler
  329. * @mode: Sys-off mode
  330. * @priority: Handler priority
  331. * @callback: Callback function
  332. * @cb_data: Callback argument
  333. *
  334. * Registers system power-off or restart handler that will be invoked
  335. * at the step corresponding to the given sys-off mode. Handler's callback
  336. * should return NOTIFY_DONE to permit execution of the next handler in
  337. * the call chain or NOTIFY_STOP to break the chain (in error case for
  338. * example).
  339. *
  340. * Multiple handlers can be registered at the default priority level.
  341. *
  342. * Only one handler can be registered at the non-default priority level,
  343. * otherwise ERR_PTR(-EBUSY) is returned.
  344. *
  345. * Returns a new instance of struct sys_off_handler on success, or
  346. * an ERR_PTR()-encoded error code otherwise.
  347. */
  348. struct sys_off_handler *
  349. register_sys_off_handler(enum sys_off_mode mode,
  350. int priority,
  351. int (*callback)(struct sys_off_data *data),
  352. void *cb_data)
  353. {
  354. struct sys_off_handler *handler;
  355. int err;
  356. handler = alloc_sys_off_handler(priority);
  357. if (IS_ERR(handler))
  358. return handler;
  359. switch (mode) {
  360. case SYS_OFF_MODE_POWER_OFF_PREPARE:
  361. handler->list = &power_off_prep_handler_list;
  362. handler->blocking = true;
  363. break;
  364. case SYS_OFF_MODE_POWER_OFF:
  365. handler->list = &power_off_handler_list;
  366. break;
  367. case SYS_OFF_MODE_RESTART_PREPARE:
  368. handler->list = &restart_prep_handler_list;
  369. handler->blocking = true;
  370. break;
  371. case SYS_OFF_MODE_RESTART:
  372. handler->list = &restart_handler_list;
  373. break;
  374. default:
  375. free_sys_off_handler(handler);
  376. return ERR_PTR(-EINVAL);
  377. }
  378. handler->nb.notifier_call = sys_off_notify;
  379. handler->nb.priority = priority;
  380. handler->sys_off_cb = callback;
  381. handler->cb_data = cb_data;
  382. handler->mode = mode;
  383. if (handler->blocking) {
  384. if (priority == SYS_OFF_PRIO_DEFAULT)
  385. err = blocking_notifier_chain_register(handler->list,
  386. &handler->nb);
  387. else
  388. err = blocking_notifier_chain_register_unique_prio(handler->list,
  389. &handler->nb);
  390. } else {
  391. if (priority == SYS_OFF_PRIO_DEFAULT)
  392. err = atomic_notifier_chain_register(handler->list,
  393. &handler->nb);
  394. else
  395. err = atomic_notifier_chain_register_unique_prio(handler->list,
  396. &handler->nb);
  397. }
  398. if (err) {
  399. free_sys_off_handler(handler);
  400. return ERR_PTR(err);
  401. }
  402. return handler;
  403. }
  404. EXPORT_SYMBOL_GPL(register_sys_off_handler);
  405. /**
  406. * unregister_sys_off_handler - Unregister sys-off handler
  407. * @handler: Sys-off handler
  408. *
  409. * Unregisters given sys-off handler.
  410. */
  411. void unregister_sys_off_handler(struct sys_off_handler *handler)
  412. {
  413. int err;
  414. if (IS_ERR_OR_NULL(handler))
  415. return;
  416. if (handler->blocking)
  417. err = blocking_notifier_chain_unregister(handler->list,
  418. &handler->nb);
  419. else
  420. err = atomic_notifier_chain_unregister(handler->list,
  421. &handler->nb);
  422. /* sanity check, shall never happen */
  423. WARN_ON(err);
  424. free_sys_off_handler(handler);
  425. }
  426. EXPORT_SYMBOL_GPL(unregister_sys_off_handler);
  427. static void devm_unregister_sys_off_handler(void *data)
  428. {
  429. struct sys_off_handler *handler = data;
  430. unregister_sys_off_handler(handler);
  431. }
  432. /**
  433. * devm_register_sys_off_handler - Register sys-off handler
  434. * @dev: Device that registers handler
  435. * @mode: Sys-off mode
  436. * @priority: Handler priority
  437. * @callback: Callback function
  438. * @cb_data: Callback argument
  439. *
  440. * Registers resource-managed sys-off handler.
  441. *
  442. * Returns zero on success, or error code on failure.
  443. */
  444. int devm_register_sys_off_handler(struct device *dev,
  445. enum sys_off_mode mode,
  446. int priority,
  447. int (*callback)(struct sys_off_data *data),
  448. void *cb_data)
  449. {
  450. struct sys_off_handler *handler;
  451. handler = register_sys_off_handler(mode, priority, callback, cb_data);
  452. if (IS_ERR(handler))
  453. return PTR_ERR(handler);
  454. return devm_add_action_or_reset(dev, devm_unregister_sys_off_handler,
  455. handler);
  456. }
  457. EXPORT_SYMBOL_GPL(devm_register_sys_off_handler);
  458. /**
  459. * devm_register_power_off_handler - Register power-off handler
  460. * @dev: Device that registers callback
  461. * @callback: Callback function
  462. * @cb_data: Callback's argument
  463. *
  464. * Registers resource-managed sys-off handler with a default priority
  465. * and using power-off mode.
  466. *
  467. * Returns zero on success, or error code on failure.
  468. */
  469. int devm_register_power_off_handler(struct device *dev,
  470. int (*callback)(struct sys_off_data *data),
  471. void *cb_data)
  472. {
  473. return devm_register_sys_off_handler(dev,
  474. SYS_OFF_MODE_POWER_OFF,
  475. SYS_OFF_PRIO_DEFAULT,
  476. callback, cb_data);
  477. }
  478. EXPORT_SYMBOL_GPL(devm_register_power_off_handler);
  479. /**
  480. * devm_register_restart_handler - Register restart handler
  481. * @dev: Device that registers callback
  482. * @callback: Callback function
  483. * @cb_data: Callback's argument
  484. *
  485. * Registers resource-managed sys-off handler with a default priority
  486. * and using restart mode.
  487. *
  488. * Returns zero on success, or error code on failure.
  489. */
  490. int devm_register_restart_handler(struct device *dev,
  491. int (*callback)(struct sys_off_data *data),
  492. void *cb_data)
  493. {
  494. return devm_register_sys_off_handler(dev,
  495. SYS_OFF_MODE_RESTART,
  496. SYS_OFF_PRIO_DEFAULT,
  497. callback, cb_data);
  498. }
  499. EXPORT_SYMBOL_GPL(devm_register_restart_handler);
  500. static struct sys_off_handler *platform_power_off_handler;
  501. static int platform_power_off_notify(struct sys_off_data *data)
  502. {
  503. void (*platform_power_power_off_cb)(void) = data->cb_data;
  504. platform_power_power_off_cb();
  505. return NOTIFY_DONE;
  506. }
  507. /**
  508. * register_platform_power_off - Register platform-level power-off callback
  509. * @power_off: Power-off callback
  510. *
  511. * Registers power-off callback that will be called as last step
  512. * of the power-off sequence. This callback is expected to be invoked
  513. * for the last resort. Only one platform power-off callback is allowed
  514. * to be registered at a time.
  515. *
  516. * Returns zero on success, or error code on failure.
  517. */
  518. int register_platform_power_off(void (*power_off)(void))
  519. {
  520. struct sys_off_handler *handler;
  521. handler = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
  522. SYS_OFF_PRIO_PLATFORM,
  523. platform_power_off_notify,
  524. power_off);
  525. if (IS_ERR(handler))
  526. return PTR_ERR(handler);
  527. platform_power_off_handler = handler;
  528. return 0;
  529. }
  530. EXPORT_SYMBOL_GPL(register_platform_power_off);
  531. /**
  532. * unregister_platform_power_off - Unregister platform-level power-off callback
  533. * @power_off: Power-off callback
  534. *
  535. * Unregisters previously registered platform power-off callback.
  536. */
  537. void unregister_platform_power_off(void (*power_off)(void))
  538. {
  539. if (platform_power_off_handler &&
  540. platform_power_off_handler->cb_data == power_off) {
  541. unregister_sys_off_handler(platform_power_off_handler);
  542. platform_power_off_handler = NULL;
  543. }
  544. }
  545. EXPORT_SYMBOL_GPL(unregister_platform_power_off);
  546. static int legacy_pm_power_off(struct sys_off_data *data)
  547. {
  548. if (pm_power_off)
  549. pm_power_off();
  550. return NOTIFY_DONE;
  551. }
  552. static void do_kernel_power_off_prepare(void)
  553. {
  554. blocking_notifier_call_chain(&power_off_prep_handler_list, 0, NULL);
  555. }
  556. /**
  557. * do_kernel_power_off - Execute kernel power-off handler call chain
  558. *
  559. * Expected to be called as last step of the power-off sequence.
  560. *
  561. * Powers off the system immediately if a power-off handler function has
  562. * been registered. Otherwise does nothing.
  563. */
  564. void do_kernel_power_off(void)
  565. {
  566. struct sys_off_handler *sys_off = NULL;
  567. /*
  568. * Register sys-off handlers for legacy PM callback. This allows
  569. * legacy PM callbacks temporary co-exist with the new sys-off API.
  570. *
  571. * TODO: Remove legacy handlers once all legacy PM users will be
  572. * switched to the sys-off based APIs.
  573. */
  574. if (pm_power_off)
  575. sys_off = register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
  576. SYS_OFF_PRIO_DEFAULT,
  577. legacy_pm_power_off, NULL);
  578. atomic_notifier_call_chain(&power_off_handler_list, 0, NULL);
  579. unregister_sys_off_handler(sys_off);
  580. }
  581. /**
  582. * kernel_can_power_off - check whether system can be powered off
  583. *
  584. * Returns true if power-off handler is registered and system can be
  585. * powered off, false otherwise.
  586. */
  587. bool kernel_can_power_off(void)
  588. {
  589. return !atomic_notifier_call_chain_is_empty(&power_off_handler_list) ||
  590. pm_power_off;
  591. }
  592. EXPORT_SYMBOL_GPL(kernel_can_power_off);
  593. /**
  594. * kernel_power_off - power_off the system
  595. *
  596. * Shutdown everything and perform a clean system power_off.
  597. */
  598. void kernel_power_off(void)
  599. {
  600. kernel_shutdown_prepare(SYSTEM_POWER_OFF);
  601. do_kernel_power_off_prepare();
  602. migrate_to_reboot_cpu();
  603. syscore_shutdown();
  604. pr_emerg("Power down\n");
  605. kmsg_dump(KMSG_DUMP_SHUTDOWN);
  606. machine_power_off();
  607. }
  608. EXPORT_SYMBOL_GPL(kernel_power_off);
  609. DEFINE_MUTEX(system_transition_mutex);
  610. /*
  611. * Reboot system call: for obvious reasons only root may call it,
  612. * and even root needs to set up some magic numbers in the registers
  613. * so that some mistake won't make this reboot the whole machine.
  614. * You can also set the meaning of the ctrl-alt-del-key here.
  615. *
  616. * reboot doesn't sync: do that yourself before calling this.
  617. */
  618. SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
  619. void __user *, arg)
  620. {
  621. struct pid_namespace *pid_ns = task_active_pid_ns(current);
  622. char buffer[256];
  623. int ret = 0;
  624. /* We only trust the superuser with rebooting the system. */
  625. if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT))
  626. return -EPERM;
  627. /* For safety, we require "magic" arguments. */
  628. if (magic1 != LINUX_REBOOT_MAGIC1 ||
  629. (magic2 != LINUX_REBOOT_MAGIC2 &&
  630. magic2 != LINUX_REBOOT_MAGIC2A &&
  631. magic2 != LINUX_REBOOT_MAGIC2B &&
  632. magic2 != LINUX_REBOOT_MAGIC2C))
  633. return -EINVAL;
  634. /*
  635. * If pid namespaces are enabled and the current task is in a child
  636. * pid_namespace, the command is handled by reboot_pid_ns() which will
  637. * call do_exit().
  638. */
  639. ret = reboot_pid_ns(pid_ns, cmd);
  640. if (ret)
  641. return ret;
  642. /* Instead of trying to make the power_off code look like
  643. * halt when pm_power_off is not set do it the easy way.
  644. */
  645. if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !kernel_can_power_off())
  646. cmd = LINUX_REBOOT_CMD_HALT;
  647. mutex_lock(&system_transition_mutex);
  648. switch (cmd) {
  649. case LINUX_REBOOT_CMD_RESTART:
  650. kernel_restart(NULL);
  651. break;
  652. case LINUX_REBOOT_CMD_CAD_ON:
  653. C_A_D = 1;
  654. break;
  655. case LINUX_REBOOT_CMD_CAD_OFF:
  656. C_A_D = 0;
  657. break;
  658. case LINUX_REBOOT_CMD_HALT:
  659. kernel_halt();
  660. do_exit(0);
  661. case LINUX_REBOOT_CMD_POWER_OFF:
  662. kernel_power_off();
  663. do_exit(0);
  664. break;
  665. case LINUX_REBOOT_CMD_RESTART2:
  666. ret = strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1);
  667. if (ret < 0) {
  668. ret = -EFAULT;
  669. break;
  670. }
  671. buffer[sizeof(buffer) - 1] = '\0';
  672. kernel_restart(buffer);
  673. break;
  674. #ifdef CONFIG_KEXEC_CORE
  675. case LINUX_REBOOT_CMD_KEXEC:
  676. ret = kernel_kexec();
  677. break;
  678. #endif
  679. #ifdef CONFIG_HIBERNATION
  680. case LINUX_REBOOT_CMD_SW_SUSPEND:
  681. ret = hibernate();
  682. break;
  683. #endif
  684. default:
  685. ret = -EINVAL;
  686. break;
  687. }
  688. mutex_unlock(&system_transition_mutex);
  689. return ret;
  690. }
  691. static void deferred_cad(struct work_struct *dummy)
  692. {
  693. kernel_restart(NULL);
  694. }
  695. /*
  696. * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
  697. * As it's called within an interrupt, it may NOT sync: the only choice
  698. * is whether to reboot at once, or just ignore the ctrl-alt-del.
  699. */
  700. void ctrl_alt_del(void)
  701. {
  702. static DECLARE_WORK(cad_work, deferred_cad);
  703. if (C_A_D)
  704. schedule_work(&cad_work);
  705. else
  706. kill_cad_pid(SIGINT, 1);
  707. }
  708. #define POWEROFF_CMD_PATH_LEN 256
  709. static char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
  710. static const char reboot_cmd[] = "/sbin/reboot";
  711. static int run_cmd(const char *cmd)
  712. {
  713. char **argv;
  714. static char *envp[] = {
  715. "HOME=/",
  716. "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
  717. NULL
  718. };
  719. int ret;
  720. argv = argv_split(GFP_KERNEL, cmd, NULL);
  721. if (argv) {
  722. ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
  723. argv_free(argv);
  724. } else {
  725. ret = -ENOMEM;
  726. }
  727. return ret;
  728. }
  729. static int __orderly_reboot(void)
  730. {
  731. int ret;
  732. ret = run_cmd(reboot_cmd);
  733. if (ret) {
  734. pr_warn("Failed to start orderly reboot: forcing the issue\n");
  735. emergency_sync();
  736. kernel_restart(NULL);
  737. }
  738. return ret;
  739. }
  740. static int __orderly_poweroff(bool force)
  741. {
  742. int ret;
  743. ret = run_cmd(poweroff_cmd);
  744. if (ret && force) {
  745. pr_warn("Failed to start orderly shutdown: forcing the issue\n");
  746. /*
  747. * I guess this should try to kick off some daemon to sync and
  748. * poweroff asap. Or not even bother syncing if we're doing an
  749. * emergency shutdown?
  750. */
  751. emergency_sync();
  752. kernel_power_off();
  753. }
  754. return ret;
  755. }
  756. static bool poweroff_force;
  757. static void poweroff_work_func(struct work_struct *work)
  758. {
  759. __orderly_poweroff(poweroff_force);
  760. }
  761. static DECLARE_WORK(poweroff_work, poweroff_work_func);
  762. /**
  763. * orderly_poweroff - Trigger an orderly system poweroff
  764. * @force: force poweroff if command execution fails
  765. *
  766. * This may be called from any context to trigger a system shutdown.
  767. * If the orderly shutdown fails, it will force an immediate shutdown.
  768. */
  769. void orderly_poweroff(bool force)
  770. {
  771. if (force) /* do not override the pending "true" */
  772. poweroff_force = true;
  773. schedule_work(&poweroff_work);
  774. }
  775. EXPORT_SYMBOL_GPL(orderly_poweroff);
  776. static void reboot_work_func(struct work_struct *work)
  777. {
  778. __orderly_reboot();
  779. }
  780. static DECLARE_WORK(reboot_work, reboot_work_func);
  781. /**
  782. * orderly_reboot - Trigger an orderly system reboot
  783. *
  784. * This may be called from any context to trigger a system reboot.
  785. * If the orderly reboot fails, it will force an immediate reboot.
  786. */
  787. void orderly_reboot(void)
  788. {
  789. schedule_work(&reboot_work);
  790. }
  791. EXPORT_SYMBOL_GPL(orderly_reboot);
  792. /**
  793. * hw_failure_emergency_poweroff_func - emergency poweroff work after a known delay
  794. * @work: work_struct associated with the emergency poweroff function
  795. *
  796. * This function is called in very critical situations to force
  797. * a kernel poweroff after a configurable timeout value.
  798. */
  799. static void hw_failure_emergency_poweroff_func(struct work_struct *work)
  800. {
  801. /*
  802. * We have reached here after the emergency shutdown waiting period has
  803. * expired. This means orderly_poweroff has not been able to shut off
  804. * the system for some reason.
  805. *
  806. * Try to shut down the system immediately using kernel_power_off
  807. * if populated
  808. */
  809. pr_emerg("Hardware protection timed-out. Trying forced poweroff\n");
  810. kernel_power_off();
  811. /*
  812. * Worst of the worst case trigger emergency restart
  813. */
  814. pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
  815. emergency_restart();
  816. }
  817. static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
  818. hw_failure_emergency_poweroff_func);
  819. /**
  820. * hw_failure_emergency_poweroff - Trigger an emergency system poweroff
  821. *
  822. * This may be called from any critical situation to trigger a system shutdown
  823. * after a given period of time. If time is negative this is not scheduled.
  824. */
  825. static void hw_failure_emergency_poweroff(int poweroff_delay_ms)
  826. {
  827. if (poweroff_delay_ms <= 0)
  828. return;
  829. schedule_delayed_work(&hw_failure_emergency_poweroff_work,
  830. msecs_to_jiffies(poweroff_delay_ms));
  831. }
  832. /**
  833. * hw_protection_shutdown - Trigger an emergency system poweroff
  834. *
  835. * @reason: Reason of emergency shutdown to be printed.
  836. * @ms_until_forced: Time to wait for orderly shutdown before tiggering a
  837. * forced shudown. Negative value disables the forced
  838. * shutdown.
  839. *
  840. * Initiate an emergency system shutdown in order to protect hardware from
  841. * further damage. Usage examples include a thermal protection or a voltage or
  842. * current regulator failures.
  843. * NOTE: The request is ignored if protection shutdown is already pending even
  844. * if the previous request has given a large timeout for forced shutdown.
  845. * Can be called from any context.
  846. */
  847. void hw_protection_shutdown(const char *reason, int ms_until_forced)
  848. {
  849. static atomic_t allow_proceed = ATOMIC_INIT(1);
  850. pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason);
  851. /* Shutdown should be initiated only once. */
  852. if (!atomic_dec_and_test(&allow_proceed))
  853. return;
  854. /*
  855. * Queue a backup emergency shutdown in the event of
  856. * orderly_poweroff failure
  857. */
  858. hw_failure_emergency_poweroff(ms_until_forced);
  859. orderly_poweroff(true);
  860. }
  861. EXPORT_SYMBOL_GPL(hw_protection_shutdown);
  862. static int __init reboot_setup(char *str)
  863. {
  864. for (;;) {
  865. enum reboot_mode *mode;
  866. /*
  867. * Having anything passed on the command line via
  868. * reboot= will cause us to disable DMI checking
  869. * below.
  870. */
  871. reboot_default = 0;
  872. if (!strncmp(str, "panic_", 6)) {
  873. mode = &panic_reboot_mode;
  874. str += 6;
  875. } else {
  876. mode = &reboot_mode;
  877. }
  878. switch (*str) {
  879. case 'w':
  880. *mode = REBOOT_WARM;
  881. break;
  882. case 'c':
  883. *mode = REBOOT_COLD;
  884. break;
  885. case 'h':
  886. *mode = REBOOT_HARD;
  887. break;
  888. case 's':
  889. /*
  890. * reboot_cpu is s[mp]#### with #### being the processor
  891. * to be used for rebooting. Skip 's' or 'smp' prefix.
  892. */
  893. str += str[1] == 'm' && str[2] == 'p' ? 3 : 1;
  894. if (isdigit(str[0])) {
  895. int cpu = simple_strtoul(str, NULL, 0);
  896. if (cpu >= num_possible_cpus()) {
  897. pr_err("Ignoring the CPU number in reboot= option. "
  898. "CPU %d exceeds possible cpu number %d\n",
  899. cpu, num_possible_cpus());
  900. break;
  901. }
  902. reboot_cpu = cpu;
  903. } else
  904. *mode = REBOOT_SOFT;
  905. break;
  906. case 'g':
  907. *mode = REBOOT_GPIO;
  908. break;
  909. case 'b':
  910. case 'a':
  911. case 'k':
  912. case 't':
  913. case 'e':
  914. case 'p':
  915. reboot_type = *str;
  916. break;
  917. case 'f':
  918. reboot_force = 1;
  919. break;
  920. }
  921. str = strchr(str, ',');
  922. if (str)
  923. str++;
  924. else
  925. break;
  926. }
  927. return 1;
  928. }
  929. __setup("reboot=", reboot_setup);
  930. #ifdef CONFIG_SYSFS
  931. #define REBOOT_COLD_STR "cold"
  932. #define REBOOT_WARM_STR "warm"
  933. #define REBOOT_HARD_STR "hard"
  934. #define REBOOT_SOFT_STR "soft"
  935. #define REBOOT_GPIO_STR "gpio"
  936. #define REBOOT_UNDEFINED_STR "undefined"
  937. #define BOOT_TRIPLE_STR "triple"
  938. #define BOOT_KBD_STR "kbd"
  939. #define BOOT_BIOS_STR "bios"
  940. #define BOOT_ACPI_STR "acpi"
  941. #define BOOT_EFI_STR "efi"
  942. #define BOOT_PCI_STR "pci"
  943. static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  944. {
  945. const char *val;
  946. switch (reboot_mode) {
  947. case REBOOT_COLD:
  948. val = REBOOT_COLD_STR;
  949. break;
  950. case REBOOT_WARM:
  951. val = REBOOT_WARM_STR;
  952. break;
  953. case REBOOT_HARD:
  954. val = REBOOT_HARD_STR;
  955. break;
  956. case REBOOT_SOFT:
  957. val = REBOOT_SOFT_STR;
  958. break;
  959. case REBOOT_GPIO:
  960. val = REBOOT_GPIO_STR;
  961. break;
  962. default:
  963. val = REBOOT_UNDEFINED_STR;
  964. }
  965. return sprintf(buf, "%s\n", val);
  966. }
  967. static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr,
  968. const char *buf, size_t count)
  969. {
  970. if (!capable(CAP_SYS_BOOT))
  971. return -EPERM;
  972. if (!strncmp(buf, REBOOT_COLD_STR, strlen(REBOOT_COLD_STR)))
  973. reboot_mode = REBOOT_COLD;
  974. else if (!strncmp(buf, REBOOT_WARM_STR, strlen(REBOOT_WARM_STR)))
  975. reboot_mode = REBOOT_WARM;
  976. else if (!strncmp(buf, REBOOT_HARD_STR, strlen(REBOOT_HARD_STR)))
  977. reboot_mode = REBOOT_HARD;
  978. else if (!strncmp(buf, REBOOT_SOFT_STR, strlen(REBOOT_SOFT_STR)))
  979. reboot_mode = REBOOT_SOFT;
  980. else if (!strncmp(buf, REBOOT_GPIO_STR, strlen(REBOOT_GPIO_STR)))
  981. reboot_mode = REBOOT_GPIO;
  982. else
  983. return -EINVAL;
  984. reboot_default = 0;
  985. return count;
  986. }
  987. static struct kobj_attribute reboot_mode_attr = __ATTR_RW(mode);
  988. #ifdef CONFIG_X86
  989. static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  990. {
  991. return sprintf(buf, "%d\n", reboot_force);
  992. }
  993. static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
  994. const char *buf, size_t count)
  995. {
  996. bool res;
  997. if (!capable(CAP_SYS_BOOT))
  998. return -EPERM;
  999. if (kstrtobool(buf, &res))
  1000. return -EINVAL;
  1001. reboot_default = 0;
  1002. reboot_force = res;
  1003. return count;
  1004. }
  1005. static struct kobj_attribute reboot_force_attr = __ATTR_RW(force);
  1006. static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  1007. {
  1008. const char *val;
  1009. switch (reboot_type) {
  1010. case BOOT_TRIPLE:
  1011. val = BOOT_TRIPLE_STR;
  1012. break;
  1013. case BOOT_KBD:
  1014. val = BOOT_KBD_STR;
  1015. break;
  1016. case BOOT_BIOS:
  1017. val = BOOT_BIOS_STR;
  1018. break;
  1019. case BOOT_ACPI:
  1020. val = BOOT_ACPI_STR;
  1021. break;
  1022. case BOOT_EFI:
  1023. val = BOOT_EFI_STR;
  1024. break;
  1025. case BOOT_CF9_FORCE:
  1026. val = BOOT_PCI_STR;
  1027. break;
  1028. default:
  1029. val = REBOOT_UNDEFINED_STR;
  1030. }
  1031. return sprintf(buf, "%s\n", val);
  1032. }
  1033. static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr,
  1034. const char *buf, size_t count)
  1035. {
  1036. if (!capable(CAP_SYS_BOOT))
  1037. return -EPERM;
  1038. if (!strncmp(buf, BOOT_TRIPLE_STR, strlen(BOOT_TRIPLE_STR)))
  1039. reboot_type = BOOT_TRIPLE;
  1040. else if (!strncmp(buf, BOOT_KBD_STR, strlen(BOOT_KBD_STR)))
  1041. reboot_type = BOOT_KBD;
  1042. else if (!strncmp(buf, BOOT_BIOS_STR, strlen(BOOT_BIOS_STR)))
  1043. reboot_type = BOOT_BIOS;
  1044. else if (!strncmp(buf, BOOT_ACPI_STR, strlen(BOOT_ACPI_STR)))
  1045. reboot_type = BOOT_ACPI;
  1046. else if (!strncmp(buf, BOOT_EFI_STR, strlen(BOOT_EFI_STR)))
  1047. reboot_type = BOOT_EFI;
  1048. else if (!strncmp(buf, BOOT_PCI_STR, strlen(BOOT_PCI_STR)))
  1049. reboot_type = BOOT_CF9_FORCE;
  1050. else
  1051. return -EINVAL;
  1052. reboot_default = 0;
  1053. return count;
  1054. }
  1055. static struct kobj_attribute reboot_type_attr = __ATTR_RW(type);
  1056. #endif
  1057. #ifdef CONFIG_SMP
  1058. static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  1059. {
  1060. return sprintf(buf, "%d\n", reboot_cpu);
  1061. }
  1062. static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr,
  1063. const char *buf, size_t count)
  1064. {
  1065. unsigned int cpunum;
  1066. int rc;
  1067. if (!capable(CAP_SYS_BOOT))
  1068. return -EPERM;
  1069. rc = kstrtouint(buf, 0, &cpunum);
  1070. if (rc)
  1071. return rc;
  1072. if (cpunum >= num_possible_cpus())
  1073. return -ERANGE;
  1074. reboot_default = 0;
  1075. reboot_cpu = cpunum;
  1076. return count;
  1077. }
  1078. static struct kobj_attribute reboot_cpu_attr = __ATTR_RW(cpu);
  1079. #endif
  1080. static struct attribute *reboot_attrs[] = {
  1081. &reboot_mode_attr.attr,
  1082. #ifdef CONFIG_X86
  1083. &reboot_force_attr.attr,
  1084. &reboot_type_attr.attr,
  1085. #endif
  1086. #ifdef CONFIG_SMP
  1087. &reboot_cpu_attr.attr,
  1088. #endif
  1089. NULL,
  1090. };
  1091. #ifdef CONFIG_SYSCTL
  1092. static struct ctl_table kern_reboot_table[] = {
  1093. {
  1094. .procname = "poweroff_cmd",
  1095. .data = &poweroff_cmd,
  1096. .maxlen = POWEROFF_CMD_PATH_LEN,
  1097. .mode = 0644,
  1098. .proc_handler = proc_dostring,
  1099. },
  1100. {
  1101. .procname = "ctrl-alt-del",
  1102. .data = &C_A_D,
  1103. .maxlen = sizeof(int),
  1104. .mode = 0644,
  1105. .proc_handler = proc_dointvec,
  1106. },
  1107. { }
  1108. };
  1109. static void __init kernel_reboot_sysctls_init(void)
  1110. {
  1111. register_sysctl_init("kernel", kern_reboot_table);
  1112. }
  1113. #else
  1114. #define kernel_reboot_sysctls_init() do { } while (0)
  1115. #endif /* CONFIG_SYSCTL */
  1116. static const struct attribute_group reboot_attr_group = {
  1117. .attrs = reboot_attrs,
  1118. };
  1119. static int __init reboot_ksysfs_init(void)
  1120. {
  1121. struct kobject *reboot_kobj;
  1122. int ret;
  1123. reboot_kobj = kobject_create_and_add("reboot", kernel_kobj);
  1124. if (!reboot_kobj)
  1125. return -ENOMEM;
  1126. ret = sysfs_create_group(reboot_kobj, &reboot_attr_group);
  1127. if (ret) {
  1128. kobject_put(reboot_kobj);
  1129. return ret;
  1130. }
  1131. kernel_reboot_sysctls_init();
  1132. return 0;
  1133. }
  1134. late_initcall(reboot_ksysfs_init);
  1135. #endif