watchdog_dev.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * watchdog_dev.c
  4. *
  5. * (c) Copyright 2008-2011 Alan Cox <[email protected]>,
  6. * All Rights Reserved.
  7. *
  8. * (c) Copyright 2008-2011 Wim Van Sebroeck <[email protected]>.
  9. *
  10. * (c) Copyright 2021 Hewlett Packard Enterprise Development LP.
  11. *
  12. * This source code is part of the generic code that can be used
  13. * by all the watchdog timer drivers.
  14. *
  15. * This part of the generic code takes care of the following
  16. * misc device: /dev/watchdog.
  17. *
  18. * Based on source code of the following authors:
  19. * Matt Domsch <[email protected]>,
  20. * Rob Radez <[email protected]>,
  21. * Rusty Lynch <[email protected]>
  22. * Satyam Sharma <[email protected]>
  23. * Randy Dunlap <[email protected]>
  24. *
  25. * Neither Alan Cox, CymruNet Ltd., Wim Van Sebroeck nor Iguana vzw.
  26. * admit liability nor provide warranty for any of this software.
  27. * This material is provided "AS-IS" and at no charge.
  28. */
  29. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30. #include <linux/cdev.h> /* For character device */
  31. #include <linux/errno.h> /* For the -ENODEV/... values */
  32. #include <linux/fs.h> /* For file operations */
  33. #include <linux/init.h> /* For __init/__exit/... */
  34. #include <linux/hrtimer.h> /* For hrtimers */
  35. #include <linux/kernel.h> /* For printk/panic/... */
  36. #include <linux/kthread.h> /* For kthread_work */
  37. #include <linux/miscdevice.h> /* For handling misc devices */
  38. #include <linux/module.h> /* For module stuff/... */
  39. #include <linux/mutex.h> /* For mutexes */
  40. #include <linux/slab.h> /* For memory functions */
  41. #include <linux/types.h> /* For standard types (like size_t) */
  42. #include <linux/watchdog.h> /* For watchdog specific items */
  43. #include <linux/uaccess.h> /* For copy_to_user/put_user/... */
  44. #include "watchdog_core.h"
  45. #include "watchdog_pretimeout.h"
  46. #include <trace/events/watchdog.h>
  47. /* the dev_t structure to store the dynamically allocated watchdog devices */
  48. static dev_t watchdog_devt;
  49. /* Reference to watchdog device behind /dev/watchdog */
  50. static struct watchdog_core_data *old_wd_data;
  51. static struct kthread_worker *watchdog_kworker;
  52. static bool handle_boot_enabled =
  53. IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED);
  54. static unsigned open_timeout = CONFIG_WATCHDOG_OPEN_TIMEOUT;
  55. static bool watchdog_past_open_deadline(struct watchdog_core_data *data)
  56. {
  57. return ktime_after(ktime_get(), data->open_deadline);
  58. }
  59. static void watchdog_set_open_deadline(struct watchdog_core_data *data)
  60. {
  61. data->open_deadline = open_timeout ?
  62. ktime_get() + ktime_set(open_timeout, 0) : KTIME_MAX;
  63. }
  64. static inline bool watchdog_need_worker(struct watchdog_device *wdd)
  65. {
  66. /* All variables in milli-seconds */
  67. unsigned int hm = wdd->max_hw_heartbeat_ms;
  68. unsigned int t = wdd->timeout * 1000;
  69. /*
  70. * A worker to generate heartbeat requests is needed if all of the
  71. * following conditions are true.
  72. * - Userspace activated the watchdog.
  73. * - The driver provided a value for the maximum hardware timeout, and
  74. * thus is aware that the framework supports generating heartbeat
  75. * requests.
  76. * - Userspace requests a longer timeout than the hardware can handle.
  77. *
  78. * Alternatively, if userspace has not opened the watchdog
  79. * device, we take care of feeding the watchdog if it is
  80. * running.
  81. */
  82. return (hm && watchdog_active(wdd) && t > hm) ||
  83. (t && !watchdog_active(wdd) && watchdog_hw_running(wdd));
  84. }
  85. static ktime_t watchdog_next_keepalive(struct watchdog_device *wdd)
  86. {
  87. struct watchdog_core_data *wd_data = wdd->wd_data;
  88. unsigned int timeout_ms = wdd->timeout * 1000;
  89. ktime_t keepalive_interval;
  90. ktime_t last_heartbeat, latest_heartbeat;
  91. ktime_t virt_timeout;
  92. unsigned int hw_heartbeat_ms;
  93. if (watchdog_active(wdd))
  94. virt_timeout = ktime_add(wd_data->last_keepalive,
  95. ms_to_ktime(timeout_ms));
  96. else
  97. virt_timeout = wd_data->open_deadline;
  98. hw_heartbeat_ms = min_not_zero(timeout_ms, wdd->max_hw_heartbeat_ms);
  99. keepalive_interval = ms_to_ktime(hw_heartbeat_ms / 2);
  100. /*
  101. * To ensure that the watchdog times out wdd->timeout seconds
  102. * after the most recent ping from userspace, the last
  103. * worker ping has to come in hw_heartbeat_ms before this timeout.
  104. */
  105. last_heartbeat = ktime_sub(virt_timeout, ms_to_ktime(hw_heartbeat_ms));
  106. latest_heartbeat = ktime_sub(last_heartbeat, ktime_get());
  107. if (ktime_before(latest_heartbeat, keepalive_interval))
  108. return latest_heartbeat;
  109. return keepalive_interval;
  110. }
  111. static inline void watchdog_update_worker(struct watchdog_device *wdd)
  112. {
  113. struct watchdog_core_data *wd_data = wdd->wd_data;
  114. if (watchdog_need_worker(wdd)) {
  115. ktime_t t = watchdog_next_keepalive(wdd);
  116. if (t > 0)
  117. hrtimer_start(&wd_data->timer, t,
  118. HRTIMER_MODE_REL_HARD);
  119. } else {
  120. hrtimer_cancel(&wd_data->timer);
  121. }
  122. }
  123. static int __watchdog_ping(struct watchdog_device *wdd)
  124. {
  125. struct watchdog_core_data *wd_data = wdd->wd_data;
  126. ktime_t earliest_keepalive, now;
  127. int err;
  128. earliest_keepalive = ktime_add(wd_data->last_hw_keepalive,
  129. ms_to_ktime(wdd->min_hw_heartbeat_ms));
  130. now = ktime_get();
  131. if (ktime_after(earliest_keepalive, now)) {
  132. hrtimer_start(&wd_data->timer,
  133. ktime_sub(earliest_keepalive, now),
  134. HRTIMER_MODE_REL_HARD);
  135. return 0;
  136. }
  137. wd_data->last_hw_keepalive = now;
  138. if (wdd->ops->ping) {
  139. err = wdd->ops->ping(wdd); /* ping the watchdog */
  140. trace_watchdog_ping(wdd, err);
  141. } else {
  142. err = wdd->ops->start(wdd); /* restart watchdog */
  143. trace_watchdog_start(wdd, err);
  144. }
  145. if (err == 0)
  146. watchdog_hrtimer_pretimeout_start(wdd);
  147. watchdog_update_worker(wdd);
  148. return err;
  149. }
  150. /*
  151. * watchdog_ping - ping the watchdog
  152. * @wdd: The watchdog device to ping
  153. *
  154. * If the watchdog has no own ping operation then it needs to be
  155. * restarted via the start operation. This wrapper function does
  156. * exactly that.
  157. * We only ping when the watchdog device is running.
  158. * The caller must hold wd_data->lock.
  159. *
  160. * Return: 0 on success, error otherwise.
  161. */
  162. static int watchdog_ping(struct watchdog_device *wdd)
  163. {
  164. struct watchdog_core_data *wd_data = wdd->wd_data;
  165. if (!watchdog_active(wdd) && !watchdog_hw_running(wdd))
  166. return 0;
  167. set_bit(_WDOG_KEEPALIVE, &wd_data->status);
  168. wd_data->last_keepalive = ktime_get();
  169. return __watchdog_ping(wdd);
  170. }
  171. static bool watchdog_worker_should_ping(struct watchdog_core_data *wd_data)
  172. {
  173. struct watchdog_device *wdd = wd_data->wdd;
  174. if (!wdd)
  175. return false;
  176. if (watchdog_active(wdd))
  177. return true;
  178. return watchdog_hw_running(wdd) && !watchdog_past_open_deadline(wd_data);
  179. }
  180. static void watchdog_ping_work(struct kthread_work *work)
  181. {
  182. struct watchdog_core_data *wd_data;
  183. wd_data = container_of(work, struct watchdog_core_data, work);
  184. mutex_lock(&wd_data->lock);
  185. if (watchdog_worker_should_ping(wd_data))
  186. __watchdog_ping(wd_data->wdd);
  187. mutex_unlock(&wd_data->lock);
  188. }
  189. static enum hrtimer_restart watchdog_timer_expired(struct hrtimer *timer)
  190. {
  191. struct watchdog_core_data *wd_data;
  192. wd_data = container_of(timer, struct watchdog_core_data, timer);
  193. kthread_queue_work(watchdog_kworker, &wd_data->work);
  194. return HRTIMER_NORESTART;
  195. }
  196. /*
  197. * watchdog_start - wrapper to start the watchdog
  198. * @wdd: The watchdog device to start
  199. *
  200. * Start the watchdog if it is not active and mark it active.
  201. * The caller must hold wd_data->lock.
  202. *
  203. * Return: 0 on success or a negative errno code for failure.
  204. */
  205. static int watchdog_start(struct watchdog_device *wdd)
  206. {
  207. struct watchdog_core_data *wd_data = wdd->wd_data;
  208. ktime_t started_at;
  209. int err;
  210. if (watchdog_active(wdd))
  211. return 0;
  212. set_bit(_WDOG_KEEPALIVE, &wd_data->status);
  213. started_at = ktime_get();
  214. if (watchdog_hw_running(wdd) && wdd->ops->ping) {
  215. err = __watchdog_ping(wdd);
  216. if (err == 0) {
  217. set_bit(WDOG_ACTIVE, &wdd->status);
  218. watchdog_hrtimer_pretimeout_start(wdd);
  219. }
  220. } else {
  221. err = wdd->ops->start(wdd);
  222. trace_watchdog_start(wdd, err);
  223. if (err == 0) {
  224. set_bit(WDOG_ACTIVE, &wdd->status);
  225. wd_data->last_keepalive = started_at;
  226. wd_data->last_hw_keepalive = started_at;
  227. watchdog_update_worker(wdd);
  228. watchdog_hrtimer_pretimeout_start(wdd);
  229. }
  230. }
  231. return err;
  232. }
  233. /*
  234. * watchdog_stop - wrapper to stop the watchdog
  235. * @wdd: The watchdog device to stop
  236. *
  237. * Stop the watchdog if it is still active and unmark it active.
  238. * If the 'nowayout' feature was set, the watchdog cannot be stopped.
  239. * The caller must hold wd_data->lock.
  240. *
  241. * Return: 0 on success or a negative errno code for failure.
  242. */
  243. static int watchdog_stop(struct watchdog_device *wdd)
  244. {
  245. int err = 0;
  246. if (!watchdog_active(wdd))
  247. return 0;
  248. if (test_bit(WDOG_NO_WAY_OUT, &wdd->status)) {
  249. pr_info("watchdog%d: nowayout prevents watchdog being stopped!\n",
  250. wdd->id);
  251. return -EBUSY;
  252. }
  253. if (wdd->ops->stop) {
  254. clear_bit(WDOG_HW_RUNNING, &wdd->status);
  255. err = wdd->ops->stop(wdd);
  256. trace_watchdog_stop(wdd, err);
  257. } else {
  258. set_bit(WDOG_HW_RUNNING, &wdd->status);
  259. }
  260. if (err == 0) {
  261. clear_bit(WDOG_ACTIVE, &wdd->status);
  262. watchdog_update_worker(wdd);
  263. watchdog_hrtimer_pretimeout_stop(wdd);
  264. }
  265. return err;
  266. }
  267. /*
  268. * watchdog_get_status - wrapper to get the watchdog status
  269. * @wdd: The watchdog device to get the status from
  270. *
  271. * Get the watchdog's status flags.
  272. * The caller must hold wd_data->lock.
  273. *
  274. * Return: watchdog's status flags.
  275. */
  276. static unsigned int watchdog_get_status(struct watchdog_device *wdd)
  277. {
  278. struct watchdog_core_data *wd_data = wdd->wd_data;
  279. unsigned int status;
  280. if (wdd->ops->status)
  281. status = wdd->ops->status(wdd);
  282. else
  283. status = wdd->bootstatus & (WDIOF_CARDRESET |
  284. WDIOF_OVERHEAT |
  285. WDIOF_FANFAULT |
  286. WDIOF_EXTERN1 |
  287. WDIOF_EXTERN2 |
  288. WDIOF_POWERUNDER |
  289. WDIOF_POWEROVER);
  290. if (test_bit(_WDOG_ALLOW_RELEASE, &wd_data->status))
  291. status |= WDIOF_MAGICCLOSE;
  292. if (test_and_clear_bit(_WDOG_KEEPALIVE, &wd_data->status))
  293. status |= WDIOF_KEEPALIVEPING;
  294. if (IS_ENABLED(CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT))
  295. status |= WDIOF_PRETIMEOUT;
  296. return status;
  297. }
  298. /*
  299. * watchdog_set_timeout - set the watchdog timer timeout
  300. * @wdd: The watchdog device to set the timeout for
  301. * @timeout: Timeout to set in seconds
  302. *
  303. * The caller must hold wd_data->lock.
  304. *
  305. * Return: 0 if successful, error otherwise.
  306. */
  307. static int watchdog_set_timeout(struct watchdog_device *wdd,
  308. unsigned int timeout)
  309. {
  310. int err = 0;
  311. if (!(wdd->info->options & WDIOF_SETTIMEOUT))
  312. return -EOPNOTSUPP;
  313. if (watchdog_timeout_invalid(wdd, timeout))
  314. return -EINVAL;
  315. if (wdd->ops->set_timeout) {
  316. err = wdd->ops->set_timeout(wdd, timeout);
  317. trace_watchdog_set_timeout(wdd, timeout, err);
  318. } else {
  319. wdd->timeout = timeout;
  320. /* Disable pretimeout if it doesn't fit the new timeout */
  321. if (wdd->pretimeout >= wdd->timeout)
  322. wdd->pretimeout = 0;
  323. }
  324. watchdog_update_worker(wdd);
  325. return err;
  326. }
  327. /*
  328. * watchdog_set_pretimeout - set the watchdog timer pretimeout
  329. * @wdd: The watchdog device to set the timeout for
  330. * @timeout: pretimeout to set in seconds
  331. *
  332. * Return: 0 if successful, error otherwise.
  333. */
  334. static int watchdog_set_pretimeout(struct watchdog_device *wdd,
  335. unsigned int timeout)
  336. {
  337. int err = 0;
  338. if (!watchdog_have_pretimeout(wdd))
  339. return -EOPNOTSUPP;
  340. if (watchdog_pretimeout_invalid(wdd, timeout))
  341. return -EINVAL;
  342. if (wdd->ops->set_pretimeout && (wdd->info->options & WDIOF_PRETIMEOUT))
  343. err = wdd->ops->set_pretimeout(wdd, timeout);
  344. else
  345. wdd->pretimeout = timeout;
  346. return err;
  347. }
  348. /*
  349. * watchdog_get_timeleft - wrapper to get the time left before a reboot
  350. * @wdd: The watchdog device to get the remaining time from
  351. * @timeleft: The time that's left
  352. *
  353. * Get the time before a watchdog will reboot (if not pinged).
  354. * The caller must hold wd_data->lock.
  355. *
  356. * Return: 0 if successful, error otherwise.
  357. */
  358. static int watchdog_get_timeleft(struct watchdog_device *wdd,
  359. unsigned int *timeleft)
  360. {
  361. *timeleft = 0;
  362. if (!wdd->ops->get_timeleft)
  363. return -EOPNOTSUPP;
  364. *timeleft = wdd->ops->get_timeleft(wdd);
  365. return 0;
  366. }
  367. #ifdef CONFIG_WATCHDOG_SYSFS
  368. static ssize_t nowayout_show(struct device *dev, struct device_attribute *attr,
  369. char *buf)
  370. {
  371. struct watchdog_device *wdd = dev_get_drvdata(dev);
  372. return sysfs_emit(buf, "%d\n", !!test_bit(WDOG_NO_WAY_OUT,
  373. &wdd->status));
  374. }
  375. static ssize_t nowayout_store(struct device *dev, struct device_attribute *attr,
  376. const char *buf, size_t len)
  377. {
  378. struct watchdog_device *wdd = dev_get_drvdata(dev);
  379. unsigned int value;
  380. int ret;
  381. ret = kstrtouint(buf, 0, &value);
  382. if (ret)
  383. return ret;
  384. if (value > 1)
  385. return -EINVAL;
  386. /* nowayout cannot be disabled once set */
  387. if (test_bit(WDOG_NO_WAY_OUT, &wdd->status) && !value)
  388. return -EPERM;
  389. watchdog_set_nowayout(wdd, value);
  390. return len;
  391. }
  392. static DEVICE_ATTR_RW(nowayout);
  393. static ssize_t status_show(struct device *dev, struct device_attribute *attr,
  394. char *buf)
  395. {
  396. struct watchdog_device *wdd = dev_get_drvdata(dev);
  397. struct watchdog_core_data *wd_data = wdd->wd_data;
  398. unsigned int status;
  399. mutex_lock(&wd_data->lock);
  400. status = watchdog_get_status(wdd);
  401. mutex_unlock(&wd_data->lock);
  402. return sysfs_emit(buf, "0x%x\n", status);
  403. }
  404. static DEVICE_ATTR_RO(status);
  405. static ssize_t bootstatus_show(struct device *dev,
  406. struct device_attribute *attr, char *buf)
  407. {
  408. struct watchdog_device *wdd = dev_get_drvdata(dev);
  409. return sysfs_emit(buf, "%u\n", wdd->bootstatus);
  410. }
  411. static DEVICE_ATTR_RO(bootstatus);
  412. static ssize_t timeleft_show(struct device *dev, struct device_attribute *attr,
  413. char *buf)
  414. {
  415. struct watchdog_device *wdd = dev_get_drvdata(dev);
  416. struct watchdog_core_data *wd_data = wdd->wd_data;
  417. ssize_t status;
  418. unsigned int val;
  419. mutex_lock(&wd_data->lock);
  420. status = watchdog_get_timeleft(wdd, &val);
  421. mutex_unlock(&wd_data->lock);
  422. if (!status)
  423. status = sysfs_emit(buf, "%u\n", val);
  424. return status;
  425. }
  426. static DEVICE_ATTR_RO(timeleft);
  427. static ssize_t timeout_show(struct device *dev, struct device_attribute *attr,
  428. char *buf)
  429. {
  430. struct watchdog_device *wdd = dev_get_drvdata(dev);
  431. return sysfs_emit(buf, "%u\n", wdd->timeout);
  432. }
  433. static DEVICE_ATTR_RO(timeout);
  434. static ssize_t min_timeout_show(struct device *dev,
  435. struct device_attribute *attr, char *buf)
  436. {
  437. struct watchdog_device *wdd = dev_get_drvdata(dev);
  438. return sysfs_emit(buf, "%u\n", wdd->min_timeout);
  439. }
  440. static DEVICE_ATTR_RO(min_timeout);
  441. static ssize_t max_timeout_show(struct device *dev,
  442. struct device_attribute *attr, char *buf)
  443. {
  444. struct watchdog_device *wdd = dev_get_drvdata(dev);
  445. return sysfs_emit(buf, "%u\n", wdd->max_timeout);
  446. }
  447. static DEVICE_ATTR_RO(max_timeout);
  448. static ssize_t pretimeout_show(struct device *dev,
  449. struct device_attribute *attr, char *buf)
  450. {
  451. struct watchdog_device *wdd = dev_get_drvdata(dev);
  452. return sysfs_emit(buf, "%u\n", wdd->pretimeout);
  453. }
  454. static DEVICE_ATTR_RO(pretimeout);
  455. static ssize_t identity_show(struct device *dev, struct device_attribute *attr,
  456. char *buf)
  457. {
  458. struct watchdog_device *wdd = dev_get_drvdata(dev);
  459. return sysfs_emit(buf, "%s\n", wdd->info->identity);
  460. }
  461. static DEVICE_ATTR_RO(identity);
  462. static ssize_t state_show(struct device *dev, struct device_attribute *attr,
  463. char *buf)
  464. {
  465. struct watchdog_device *wdd = dev_get_drvdata(dev);
  466. if (watchdog_active(wdd))
  467. return sysfs_emit(buf, "active\n");
  468. return sysfs_emit(buf, "inactive\n");
  469. }
  470. static DEVICE_ATTR_RO(state);
  471. static ssize_t pretimeout_available_governors_show(struct device *dev,
  472. struct device_attribute *attr, char *buf)
  473. {
  474. return watchdog_pretimeout_available_governors_get(buf);
  475. }
  476. static DEVICE_ATTR_RO(pretimeout_available_governors);
  477. static ssize_t pretimeout_governor_show(struct device *dev,
  478. struct device_attribute *attr,
  479. char *buf)
  480. {
  481. struct watchdog_device *wdd = dev_get_drvdata(dev);
  482. return watchdog_pretimeout_governor_get(wdd, buf);
  483. }
  484. static ssize_t pretimeout_governor_store(struct device *dev,
  485. struct device_attribute *attr,
  486. const char *buf, size_t count)
  487. {
  488. struct watchdog_device *wdd = dev_get_drvdata(dev);
  489. int ret = watchdog_pretimeout_governor_set(wdd, buf);
  490. if (!ret)
  491. ret = count;
  492. return ret;
  493. }
  494. static DEVICE_ATTR_RW(pretimeout_governor);
  495. static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr,
  496. int n)
  497. {
  498. struct device *dev = kobj_to_dev(kobj);
  499. struct watchdog_device *wdd = dev_get_drvdata(dev);
  500. umode_t mode = attr->mode;
  501. if (attr == &dev_attr_timeleft.attr && !wdd->ops->get_timeleft)
  502. mode = 0;
  503. else if (attr == &dev_attr_pretimeout.attr && !watchdog_have_pretimeout(wdd))
  504. mode = 0;
  505. else if ((attr == &dev_attr_pretimeout_governor.attr ||
  506. attr == &dev_attr_pretimeout_available_governors.attr) &&
  507. (!watchdog_have_pretimeout(wdd) || !IS_ENABLED(CONFIG_WATCHDOG_PRETIMEOUT_GOV)))
  508. mode = 0;
  509. return mode;
  510. }
  511. static struct attribute *wdt_attrs[] = {
  512. &dev_attr_state.attr,
  513. &dev_attr_identity.attr,
  514. &dev_attr_timeout.attr,
  515. &dev_attr_min_timeout.attr,
  516. &dev_attr_max_timeout.attr,
  517. &dev_attr_pretimeout.attr,
  518. &dev_attr_timeleft.attr,
  519. &dev_attr_bootstatus.attr,
  520. &dev_attr_status.attr,
  521. &dev_attr_nowayout.attr,
  522. &dev_attr_pretimeout_governor.attr,
  523. &dev_attr_pretimeout_available_governors.attr,
  524. NULL,
  525. };
  526. static const struct attribute_group wdt_group = {
  527. .attrs = wdt_attrs,
  528. .is_visible = wdt_is_visible,
  529. };
  530. __ATTRIBUTE_GROUPS(wdt);
  531. #else
  532. #define wdt_groups NULL
  533. #endif
  534. /*
  535. * watchdog_ioctl_op - call the watchdog drivers ioctl op if defined
  536. * @wdd: The watchdog device to do the ioctl on
  537. * @cmd: Watchdog command
  538. * @arg: Argument pointer
  539. *
  540. * The caller must hold wd_data->lock.
  541. *
  542. * Return: 0 if successful, error otherwise.
  543. */
  544. static int watchdog_ioctl_op(struct watchdog_device *wdd, unsigned int cmd,
  545. unsigned long arg)
  546. {
  547. if (!wdd->ops->ioctl)
  548. return -ENOIOCTLCMD;
  549. return wdd->ops->ioctl(wdd, cmd, arg);
  550. }
  551. /*
  552. * watchdog_write - writes to the watchdog
  553. * @file: File from VFS
  554. * @data: User address of data
  555. * @len: Length of data
  556. * @ppos: Pointer to the file offset
  557. *
  558. * A write to a watchdog device is defined as a keepalive ping.
  559. * Writing the magic 'V' sequence allows the next close to turn
  560. * off the watchdog (if 'nowayout' is not set).
  561. *
  562. * Return: @len if successful, error otherwise.
  563. */
  564. static ssize_t watchdog_write(struct file *file, const char __user *data,
  565. size_t len, loff_t *ppos)
  566. {
  567. struct watchdog_core_data *wd_data = file->private_data;
  568. struct watchdog_device *wdd;
  569. int err;
  570. size_t i;
  571. char c;
  572. if (len == 0)
  573. return 0;
  574. /*
  575. * Note: just in case someone wrote the magic character
  576. * five months ago...
  577. */
  578. clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
  579. /* scan to see whether or not we got the magic character */
  580. for (i = 0; i != len; i++) {
  581. if (get_user(c, data + i))
  582. return -EFAULT;
  583. if (c == 'V')
  584. set_bit(_WDOG_ALLOW_RELEASE, &wd_data->status);
  585. }
  586. /* someone wrote to us, so we send the watchdog a keepalive ping */
  587. err = -ENODEV;
  588. mutex_lock(&wd_data->lock);
  589. wdd = wd_data->wdd;
  590. if (wdd)
  591. err = watchdog_ping(wdd);
  592. mutex_unlock(&wd_data->lock);
  593. if (err < 0)
  594. return err;
  595. return len;
  596. }
  597. /*
  598. * watchdog_ioctl - handle the different ioctl's for the watchdog device
  599. * @file: File handle to the device
  600. * @cmd: Watchdog command
  601. * @arg: Argument pointer
  602. *
  603. * The watchdog API defines a common set of functions for all watchdogs
  604. * according to their available features.
  605. *
  606. * Return: 0 if successful, error otherwise.
  607. */
  608. static long watchdog_ioctl(struct file *file, unsigned int cmd,
  609. unsigned long arg)
  610. {
  611. struct watchdog_core_data *wd_data = file->private_data;
  612. void __user *argp = (void __user *)arg;
  613. struct watchdog_device *wdd;
  614. int __user *p = argp;
  615. unsigned int val;
  616. int err;
  617. mutex_lock(&wd_data->lock);
  618. wdd = wd_data->wdd;
  619. if (!wdd) {
  620. err = -ENODEV;
  621. goto out_ioctl;
  622. }
  623. err = watchdog_ioctl_op(wdd, cmd, arg);
  624. if (err != -ENOIOCTLCMD)
  625. goto out_ioctl;
  626. switch (cmd) {
  627. case WDIOC_GETSUPPORT:
  628. err = copy_to_user(argp, wdd->info,
  629. sizeof(struct watchdog_info)) ? -EFAULT : 0;
  630. break;
  631. case WDIOC_GETSTATUS:
  632. val = watchdog_get_status(wdd);
  633. err = put_user(val, p);
  634. break;
  635. case WDIOC_GETBOOTSTATUS:
  636. err = put_user(wdd->bootstatus, p);
  637. break;
  638. case WDIOC_SETOPTIONS:
  639. if (get_user(val, p)) {
  640. err = -EFAULT;
  641. break;
  642. }
  643. if (val & WDIOS_DISABLECARD) {
  644. err = watchdog_stop(wdd);
  645. if (err < 0)
  646. break;
  647. }
  648. if (val & WDIOS_ENABLECARD)
  649. err = watchdog_start(wdd);
  650. break;
  651. case WDIOC_KEEPALIVE:
  652. if (!(wdd->info->options & WDIOF_KEEPALIVEPING)) {
  653. err = -EOPNOTSUPP;
  654. break;
  655. }
  656. err = watchdog_ping(wdd);
  657. break;
  658. case WDIOC_SETTIMEOUT:
  659. if (get_user(val, p)) {
  660. err = -EFAULT;
  661. break;
  662. }
  663. err = watchdog_set_timeout(wdd, val);
  664. if (err < 0)
  665. break;
  666. /* If the watchdog is active then we send a keepalive ping
  667. * to make sure that the watchdog keep's running (and if
  668. * possible that it takes the new timeout) */
  669. err = watchdog_ping(wdd);
  670. if (err < 0)
  671. break;
  672. fallthrough;
  673. case WDIOC_GETTIMEOUT:
  674. /* timeout == 0 means that we don't know the timeout */
  675. if (wdd->timeout == 0) {
  676. err = -EOPNOTSUPP;
  677. break;
  678. }
  679. err = put_user(wdd->timeout, p);
  680. break;
  681. case WDIOC_GETTIMELEFT:
  682. err = watchdog_get_timeleft(wdd, &val);
  683. if (err < 0)
  684. break;
  685. err = put_user(val, p);
  686. break;
  687. case WDIOC_SETPRETIMEOUT:
  688. if (get_user(val, p)) {
  689. err = -EFAULT;
  690. break;
  691. }
  692. err = watchdog_set_pretimeout(wdd, val);
  693. break;
  694. case WDIOC_GETPRETIMEOUT:
  695. err = put_user(wdd->pretimeout, p);
  696. break;
  697. default:
  698. err = -ENOTTY;
  699. break;
  700. }
  701. out_ioctl:
  702. mutex_unlock(&wd_data->lock);
  703. return err;
  704. }
  705. /*
  706. * watchdog_open - open the /dev/watchdog* devices
  707. * @inode: Inode of device
  708. * @file: File handle to device
  709. *
  710. * When the /dev/watchdog* device gets opened, we start the watchdog.
  711. * Watch out: the /dev/watchdog device is single open, so we make sure
  712. * it can only be opened once.
  713. *
  714. * Return: 0 if successful, error otherwise.
  715. */
  716. static int watchdog_open(struct inode *inode, struct file *file)
  717. {
  718. struct watchdog_core_data *wd_data;
  719. struct watchdog_device *wdd;
  720. bool hw_running;
  721. int err;
  722. /* Get the corresponding watchdog device */
  723. if (imajor(inode) == MISC_MAJOR)
  724. wd_data = old_wd_data;
  725. else
  726. wd_data = container_of(inode->i_cdev, struct watchdog_core_data,
  727. cdev);
  728. /* the watchdog is single open! */
  729. if (test_and_set_bit(_WDOG_DEV_OPEN, &wd_data->status))
  730. return -EBUSY;
  731. wdd = wd_data->wdd;
  732. /*
  733. * If the /dev/watchdog device is open, we don't want the module
  734. * to be unloaded.
  735. */
  736. hw_running = watchdog_hw_running(wdd);
  737. if (!hw_running && !try_module_get(wdd->ops->owner)) {
  738. err = -EBUSY;
  739. goto out_clear;
  740. }
  741. err = watchdog_start(wdd);
  742. if (err < 0)
  743. goto out_mod;
  744. file->private_data = wd_data;
  745. if (!hw_running)
  746. get_device(&wd_data->dev);
  747. /*
  748. * open_timeout only applies for the first open from
  749. * userspace. Set open_deadline to infinity so that the kernel
  750. * will take care of an always-running hardware watchdog in
  751. * case the device gets magic-closed or WDIOS_DISABLECARD is
  752. * applied.
  753. */
  754. wd_data->open_deadline = KTIME_MAX;
  755. /* dev/watchdog is a virtual (and thus non-seekable) filesystem */
  756. return stream_open(inode, file);
  757. out_mod:
  758. module_put(wd_data->wdd->ops->owner);
  759. out_clear:
  760. clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
  761. return err;
  762. }
  763. static void watchdog_core_data_release(struct device *dev)
  764. {
  765. struct watchdog_core_data *wd_data;
  766. wd_data = container_of(dev, struct watchdog_core_data, dev);
  767. kfree(wd_data);
  768. }
  769. /*
  770. * watchdog_release - release the watchdog device
  771. * @inode: Inode of device
  772. * @file: File handle to device
  773. *
  774. * This is the code for when /dev/watchdog gets closed. We will only
  775. * stop the watchdog when we have received the magic char (and nowayout
  776. * was not set), else the watchdog will keep running.
  777. *
  778. * Always returns 0.
  779. */
  780. static int watchdog_release(struct inode *inode, struct file *file)
  781. {
  782. struct watchdog_core_data *wd_data = file->private_data;
  783. struct watchdog_device *wdd;
  784. int err = -EBUSY;
  785. bool running;
  786. mutex_lock(&wd_data->lock);
  787. wdd = wd_data->wdd;
  788. if (!wdd)
  789. goto done;
  790. /*
  791. * We only stop the watchdog if we received the magic character
  792. * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then
  793. * watchdog_stop will fail.
  794. */
  795. if (!watchdog_active(wdd))
  796. err = 0;
  797. else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) ||
  798. !(wdd->info->options & WDIOF_MAGICCLOSE))
  799. err = watchdog_stop(wdd);
  800. /* If the watchdog was not stopped, send a keepalive ping */
  801. if (err < 0) {
  802. pr_crit("watchdog%d: watchdog did not stop!\n", wdd->id);
  803. watchdog_ping(wdd);
  804. }
  805. watchdog_update_worker(wdd);
  806. /* make sure that /dev/watchdog can be re-opened */
  807. clear_bit(_WDOG_DEV_OPEN, &wd_data->status);
  808. done:
  809. running = wdd && watchdog_hw_running(wdd);
  810. mutex_unlock(&wd_data->lock);
  811. /*
  812. * Allow the owner module to be unloaded again unless the watchdog
  813. * is still running. If the watchdog is still running, it can not
  814. * be stopped, and its driver must not be unloaded.
  815. */
  816. if (!running) {
  817. module_put(wd_data->cdev.owner);
  818. put_device(&wd_data->dev);
  819. }
  820. return 0;
  821. }
  822. static const struct file_operations watchdog_fops = {
  823. .owner = THIS_MODULE,
  824. .write = watchdog_write,
  825. .unlocked_ioctl = watchdog_ioctl,
  826. .compat_ioctl = compat_ptr_ioctl,
  827. .open = watchdog_open,
  828. .release = watchdog_release,
  829. };
  830. static struct miscdevice watchdog_miscdev = {
  831. .minor = WATCHDOG_MINOR,
  832. .name = "watchdog",
  833. .fops = &watchdog_fops,
  834. };
  835. static struct class watchdog_class = {
  836. .name = "watchdog",
  837. .owner = THIS_MODULE,
  838. .dev_groups = wdt_groups,
  839. };
  840. /*
  841. * watchdog_cdev_register - register watchdog character device
  842. * @wdd: Watchdog device
  843. *
  844. * Register a watchdog character device including handling the legacy
  845. * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
  846. * thus we set it up like that.
  847. *
  848. * Return: 0 if successful, error otherwise.
  849. */
  850. static int watchdog_cdev_register(struct watchdog_device *wdd)
  851. {
  852. struct watchdog_core_data *wd_data;
  853. int err;
  854. wd_data = kzalloc(sizeof(struct watchdog_core_data), GFP_KERNEL);
  855. if (!wd_data)
  856. return -ENOMEM;
  857. mutex_init(&wd_data->lock);
  858. wd_data->wdd = wdd;
  859. wdd->wd_data = wd_data;
  860. if (IS_ERR_OR_NULL(watchdog_kworker)) {
  861. kfree(wd_data);
  862. return -ENODEV;
  863. }
  864. device_initialize(&wd_data->dev);
  865. wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id);
  866. wd_data->dev.class = &watchdog_class;
  867. wd_data->dev.parent = wdd->parent;
  868. wd_data->dev.groups = wdd->groups;
  869. wd_data->dev.release = watchdog_core_data_release;
  870. dev_set_drvdata(&wd_data->dev, wdd);
  871. err = dev_set_name(&wd_data->dev, "watchdog%d", wdd->id);
  872. if (err) {
  873. put_device(&wd_data->dev);
  874. return err;
  875. }
  876. kthread_init_work(&wd_data->work, watchdog_ping_work);
  877. hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
  878. wd_data->timer.function = watchdog_timer_expired;
  879. watchdog_hrtimer_pretimeout_init(wdd);
  880. if (wdd->id == 0) {
  881. old_wd_data = wd_data;
  882. watchdog_miscdev.parent = wdd->parent;
  883. err = misc_register(&watchdog_miscdev);
  884. if (err != 0) {
  885. pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
  886. wdd->info->identity, WATCHDOG_MINOR, err);
  887. if (err == -EBUSY)
  888. pr_err("%s: a legacy watchdog module is probably present.\n",
  889. wdd->info->identity);
  890. old_wd_data = NULL;
  891. put_device(&wd_data->dev);
  892. return err;
  893. }
  894. }
  895. /* Fill in the data structures */
  896. cdev_init(&wd_data->cdev, &watchdog_fops);
  897. /* Add the device */
  898. err = cdev_device_add(&wd_data->cdev, &wd_data->dev);
  899. if (err) {
  900. pr_err("watchdog%d unable to add device %d:%d\n",
  901. wdd->id, MAJOR(watchdog_devt), wdd->id);
  902. if (wdd->id == 0) {
  903. misc_deregister(&watchdog_miscdev);
  904. old_wd_data = NULL;
  905. }
  906. put_device(&wd_data->dev);
  907. return err;
  908. }
  909. wd_data->cdev.owner = wdd->ops->owner;
  910. /* Record time of most recent heartbeat as 'just before now'. */
  911. wd_data->last_hw_keepalive = ktime_sub(ktime_get(), 1);
  912. watchdog_set_open_deadline(wd_data);
  913. /*
  914. * If the watchdog is running, prevent its driver from being unloaded,
  915. * and schedule an immediate ping.
  916. */
  917. if (watchdog_hw_running(wdd)) {
  918. __module_get(wdd->ops->owner);
  919. get_device(&wd_data->dev);
  920. if (handle_boot_enabled)
  921. hrtimer_start(&wd_data->timer, 0,
  922. HRTIMER_MODE_REL_HARD);
  923. else
  924. pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n",
  925. wdd->id);
  926. }
  927. return 0;
  928. }
  929. /*
  930. * watchdog_cdev_unregister - unregister watchdog character device
  931. * @wdd: Watchdog device
  932. *
  933. * Unregister watchdog character device and if needed the legacy
  934. * /dev/watchdog device.
  935. */
  936. static void watchdog_cdev_unregister(struct watchdog_device *wdd)
  937. {
  938. struct watchdog_core_data *wd_data = wdd->wd_data;
  939. cdev_device_del(&wd_data->cdev, &wd_data->dev);
  940. if (wdd->id == 0) {
  941. misc_deregister(&watchdog_miscdev);
  942. old_wd_data = NULL;
  943. }
  944. if (watchdog_active(wdd) &&
  945. test_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status)) {
  946. watchdog_stop(wdd);
  947. }
  948. watchdog_hrtimer_pretimeout_stop(wdd);
  949. mutex_lock(&wd_data->lock);
  950. wd_data->wdd = NULL;
  951. wdd->wd_data = NULL;
  952. mutex_unlock(&wd_data->lock);
  953. hrtimer_cancel(&wd_data->timer);
  954. kthread_cancel_work_sync(&wd_data->work);
  955. put_device(&wd_data->dev);
  956. }
  957. /**
  958. * watchdog_dev_register - register a watchdog device
  959. * @wdd: Watchdog device
  960. *
  961. * Register a watchdog device including handling the legacy
  962. * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
  963. * thus we set it up like that.
  964. *
  965. * Return: 0 if successful, error otherwise.
  966. */
  967. int watchdog_dev_register(struct watchdog_device *wdd)
  968. {
  969. int ret;
  970. ret = watchdog_cdev_register(wdd);
  971. if (ret)
  972. return ret;
  973. ret = watchdog_register_pretimeout(wdd);
  974. if (ret)
  975. watchdog_cdev_unregister(wdd);
  976. return ret;
  977. }
  978. /**
  979. * watchdog_dev_unregister - unregister a watchdog device
  980. * @wdd: watchdog device
  981. *
  982. * Unregister watchdog device and if needed the legacy
  983. * /dev/watchdog device.
  984. */
  985. void watchdog_dev_unregister(struct watchdog_device *wdd)
  986. {
  987. watchdog_unregister_pretimeout(wdd);
  988. watchdog_cdev_unregister(wdd);
  989. }
  990. /**
  991. * watchdog_set_last_hw_keepalive - set last HW keepalive time for watchdog
  992. * @wdd: Watchdog device
  993. * @last_ping_ms: Time since last HW heartbeat
  994. *
  995. * Adjusts the last known HW keepalive time for a watchdog timer.
  996. * This is needed if the watchdog is already running when the probe
  997. * function is called, and it can't be pinged immediately. This
  998. * function must be called immediately after watchdog registration,
  999. * and min_hw_heartbeat_ms must be set for this to be useful.
  1000. *
  1001. * Return: 0 if successful, error otherwise.
  1002. */
  1003. int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd,
  1004. unsigned int last_ping_ms)
  1005. {
  1006. struct watchdog_core_data *wd_data;
  1007. ktime_t now;
  1008. if (!wdd)
  1009. return -EINVAL;
  1010. wd_data = wdd->wd_data;
  1011. now = ktime_get();
  1012. wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms));
  1013. if (watchdog_hw_running(wdd) && handle_boot_enabled)
  1014. return __watchdog_ping(wdd);
  1015. return 0;
  1016. }
  1017. EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive);
  1018. /**
  1019. * watchdog_dev_init - init dev part of watchdog core
  1020. *
  1021. * Allocate a range of chardev nodes to use for watchdog devices.
  1022. *
  1023. * Return: 0 if successful, error otherwise.
  1024. */
  1025. int __init watchdog_dev_init(void)
  1026. {
  1027. int err;
  1028. watchdog_kworker = kthread_create_worker(0, "watchdogd");
  1029. if (IS_ERR(watchdog_kworker)) {
  1030. pr_err("Failed to create watchdog kworker\n");
  1031. return PTR_ERR(watchdog_kworker);
  1032. }
  1033. sched_set_fifo(watchdog_kworker->task);
  1034. err = class_register(&watchdog_class);
  1035. if (err < 0) {
  1036. pr_err("couldn't register class\n");
  1037. goto err_register;
  1038. }
  1039. err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
  1040. if (err < 0) {
  1041. pr_err("watchdog: unable to allocate char dev region\n");
  1042. goto err_alloc;
  1043. }
  1044. return 0;
  1045. err_alloc:
  1046. class_unregister(&watchdog_class);
  1047. err_register:
  1048. kthread_destroy_worker(watchdog_kworker);
  1049. return err;
  1050. }
  1051. /**
  1052. * watchdog_dev_exit - exit dev part of watchdog core
  1053. *
  1054. * Release the range of chardev nodes used for watchdog devices.
  1055. */
  1056. void __exit watchdog_dev_exit(void)
  1057. {
  1058. unregister_chrdev_region(watchdog_devt, MAX_DOGS);
  1059. class_unregister(&watchdog_class);
  1060. kthread_destroy_worker(watchdog_kworker);
  1061. }
  1062. int watchdog_dev_suspend(struct watchdog_device *wdd)
  1063. {
  1064. struct watchdog_core_data *wd_data = wdd->wd_data;
  1065. int ret = 0;
  1066. if (!wdd->wd_data)
  1067. return -ENODEV;
  1068. /* ping for the last time before suspend */
  1069. mutex_lock(&wd_data->lock);
  1070. if (watchdog_worker_should_ping(wd_data))
  1071. ret = __watchdog_ping(wd_data->wdd);
  1072. mutex_unlock(&wd_data->lock);
  1073. if (ret)
  1074. return ret;
  1075. /*
  1076. * make sure that watchdog worker will not kick in when the wdog is
  1077. * suspended
  1078. */
  1079. hrtimer_cancel(&wd_data->timer);
  1080. kthread_cancel_work_sync(&wd_data->work);
  1081. return 0;
  1082. }
  1083. int watchdog_dev_resume(struct watchdog_device *wdd)
  1084. {
  1085. struct watchdog_core_data *wd_data = wdd->wd_data;
  1086. int ret = 0;
  1087. if (!wdd->wd_data)
  1088. return -ENODEV;
  1089. /*
  1090. * __watchdog_ping will also retrigger hrtimer and therefore restore the
  1091. * ping worker if needed.
  1092. */
  1093. mutex_lock(&wd_data->lock);
  1094. if (watchdog_worker_should_ping(wd_data))
  1095. ret = __watchdog_ping(wd_data->wdd);
  1096. mutex_unlock(&wd_data->lock);
  1097. return ret;
  1098. }
  1099. module_param(handle_boot_enabled, bool, 0444);
  1100. MODULE_PARM_DESC(handle_boot_enabled,
  1101. "Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default="
  1102. __MODULE_STRING(IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) ")");
  1103. module_param(open_timeout, uint, 0644);
  1104. MODULE_PARM_DESC(open_timeout,
  1105. "Maximum time (in seconds, 0 means infinity) for userspace to take over a running watchdog (default="
  1106. __MODULE_STRING(CONFIG_WATCHDOG_OPEN_TIMEOUT) ")");