sclp.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * core function to access sclp interface
  4. *
  5. * Copyright IBM Corp. 1999, 2009
  6. *
  7. * Author(s): Martin Peschke <[email protected]>
  8. * Martin Schwidefsky <[email protected]>
  9. */
  10. #include <linux/kernel_stat.h>
  11. #include <linux/module.h>
  12. #include <linux/err.h>
  13. #include <linux/panic_notifier.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/timer.h>
  17. #include <linux/reboot.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/init.h>
  20. #include <linux/platform_device.h>
  21. #include <asm/types.h>
  22. #include <asm/irq.h>
  23. #include <asm/debug.h>
  24. #include "sclp.h"
  25. #define SCLP_HEADER "sclp: "
  26. struct sclp_trace_entry {
  27. char id[4] __nonstring;
  28. u32 a;
  29. u64 b;
  30. };
  31. #define SCLP_TRACE_ENTRY_SIZE sizeof(struct sclp_trace_entry)
  32. #define SCLP_TRACE_MAX_SIZE 128
  33. #define SCLP_TRACE_EVENT_MAX_SIZE 64
  34. /* Debug trace area intended for all entries in abbreviated form. */
  35. DEFINE_STATIC_DEBUG_INFO(sclp_debug, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE,
  36. &debug_hex_ascii_view);
  37. /* Error trace area intended for full entries relating to failed requests. */
  38. DEFINE_STATIC_DEBUG_INFO(sclp_debug_err, "sclp_err", 4, 1,
  39. SCLP_TRACE_ENTRY_SIZE, &debug_hex_ascii_view);
  40. /* Lock to protect internal data consistency. */
  41. static DEFINE_SPINLOCK(sclp_lock);
  42. /* Mask of events that we can send to the sclp interface. */
  43. static sccb_mask_t sclp_receive_mask;
  44. /* Mask of events that we can receive from the sclp interface. */
  45. static sccb_mask_t sclp_send_mask;
  46. /* List of registered event listeners and senders. */
  47. static LIST_HEAD(sclp_reg_list);
  48. /* List of queued requests. */
  49. static LIST_HEAD(sclp_req_queue);
  50. /* Data for read and init requests. */
  51. static struct sclp_req sclp_read_req;
  52. static struct sclp_req sclp_init_req;
  53. static void *sclp_read_sccb;
  54. static struct init_sccb *sclp_init_sccb;
  55. /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
  56. int sclp_console_pages = SCLP_CONSOLE_PAGES;
  57. /* Flag to indicate if buffer pages are dropped on buffer full condition */
  58. int sclp_console_drop = 1;
  59. /* Number of times the console dropped buffer pages */
  60. unsigned long sclp_console_full;
  61. /* The currently active SCLP command word. */
  62. static sclp_cmdw_t active_cmd;
  63. static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
  64. {
  65. struct sclp_trace_entry e;
  66. memset(&e, 0, sizeof(e));
  67. strncpy(e.id, id, sizeof(e.id));
  68. e.a = a;
  69. e.b = b;
  70. debug_event(&sclp_debug, prio, &e, sizeof(e));
  71. if (err)
  72. debug_event(&sclp_debug_err, 0, &e, sizeof(e));
  73. }
  74. static inline int no_zeroes_len(void *data, int len)
  75. {
  76. char *d = data;
  77. /* Minimize trace area usage by not tracing trailing zeroes. */
  78. while (len > SCLP_TRACE_ENTRY_SIZE && d[len - 1] == 0)
  79. len--;
  80. return len;
  81. }
  82. static inline void sclp_trace_bin(int prio, void *d, int len, int errlen)
  83. {
  84. debug_event(&sclp_debug, prio, d, no_zeroes_len(d, len));
  85. if (errlen)
  86. debug_event(&sclp_debug_err, 0, d, no_zeroes_len(d, errlen));
  87. }
  88. static inline int abbrev_len(sclp_cmdw_t cmd, struct sccb_header *sccb)
  89. {
  90. struct evbuf_header *evbuf = (struct evbuf_header *)(sccb + 1);
  91. int len = sccb->length, limit = SCLP_TRACE_MAX_SIZE;
  92. /* Full SCCB tracing if debug level is set to max. */
  93. if (sclp_debug.level == DEBUG_MAX_LEVEL)
  94. return len;
  95. /* Minimal tracing for console writes. */
  96. if (cmd == SCLP_CMDW_WRITE_EVENT_DATA &&
  97. (evbuf->type == EVTYP_MSG || evbuf->type == EVTYP_VT220MSG))
  98. limit = SCLP_TRACE_ENTRY_SIZE;
  99. return min(len, limit);
  100. }
  101. static inline void sclp_trace_sccb(int prio, char *id, u32 a, u64 b,
  102. sclp_cmdw_t cmd, struct sccb_header *sccb,
  103. bool err)
  104. {
  105. sclp_trace(prio, id, a, b, err);
  106. if (sccb) {
  107. sclp_trace_bin(prio + 1, sccb, abbrev_len(cmd, sccb),
  108. err ? sccb->length : 0);
  109. }
  110. }
  111. static inline void sclp_trace_evbuf(int prio, char *id, u32 a, u64 b,
  112. struct evbuf_header *evbuf, bool err)
  113. {
  114. sclp_trace(prio, id, a, b, err);
  115. sclp_trace_bin(prio + 1, evbuf,
  116. min((int)evbuf->length, (int)SCLP_TRACE_EVENT_MAX_SIZE),
  117. err ? evbuf->length : 0);
  118. }
  119. static inline void sclp_trace_req(int prio, char *id, struct sclp_req *req,
  120. bool err)
  121. {
  122. struct sccb_header *sccb = req->sccb;
  123. union {
  124. struct {
  125. u16 status;
  126. u16 response;
  127. u16 timeout;
  128. u16 start_count;
  129. };
  130. u64 b;
  131. } summary;
  132. summary.status = req->status;
  133. summary.response = sccb ? sccb->response_code : 0;
  134. summary.timeout = (u16)req->queue_timeout;
  135. summary.start_count = (u16)req->start_count;
  136. sclp_trace(prio, id, __pa(sccb), summary.b, err);
  137. }
  138. static inline void sclp_trace_register(int prio, char *id, u32 a, u64 b,
  139. struct sclp_register *reg)
  140. {
  141. struct {
  142. u64 receive;
  143. u64 send;
  144. } d;
  145. d.receive = reg->receive_mask;
  146. d.send = reg->send_mask;
  147. sclp_trace(prio, id, a, b, false);
  148. sclp_trace_bin(prio, &d, sizeof(d), 0);
  149. }
  150. static int __init sclp_setup_console_pages(char *str)
  151. {
  152. int pages, rc;
  153. rc = kstrtoint(str, 0, &pages);
  154. if (!rc && pages >= SCLP_CONSOLE_PAGES)
  155. sclp_console_pages = pages;
  156. return 1;
  157. }
  158. __setup("sclp_con_pages=", sclp_setup_console_pages);
  159. static int __init sclp_setup_console_drop(char *str)
  160. {
  161. int drop, rc;
  162. rc = kstrtoint(str, 0, &drop);
  163. if (!rc)
  164. sclp_console_drop = drop;
  165. return 1;
  166. }
  167. __setup("sclp_con_drop=", sclp_setup_console_drop);
  168. /* Timer for request retries. */
  169. static struct timer_list sclp_request_timer;
  170. /* Timer for queued requests. */
  171. static struct timer_list sclp_queue_timer;
  172. /* Internal state: is a request active at the sclp? */
  173. static volatile enum sclp_running_state_t {
  174. sclp_running_state_idle,
  175. sclp_running_state_running,
  176. sclp_running_state_reset_pending
  177. } sclp_running_state = sclp_running_state_idle;
  178. /* Internal state: is a read request pending? */
  179. static volatile enum sclp_reading_state_t {
  180. sclp_reading_state_idle,
  181. sclp_reading_state_reading
  182. } sclp_reading_state = sclp_reading_state_idle;
  183. /* Internal state: is the driver currently serving requests? */
  184. static volatile enum sclp_activation_state_t {
  185. sclp_activation_state_active,
  186. sclp_activation_state_deactivating,
  187. sclp_activation_state_inactive,
  188. sclp_activation_state_activating
  189. } sclp_activation_state = sclp_activation_state_active;
  190. /* Internal state: is an init mask request pending? */
  191. static volatile enum sclp_mask_state_t {
  192. sclp_mask_state_idle,
  193. sclp_mask_state_initializing
  194. } sclp_mask_state = sclp_mask_state_idle;
  195. /* Maximum retry counts */
  196. #define SCLP_INIT_RETRY 3
  197. #define SCLP_MASK_RETRY 3
  198. /* Timeout intervals in seconds.*/
  199. #define SCLP_BUSY_INTERVAL 10
  200. #define SCLP_RETRY_INTERVAL 30
  201. static void sclp_request_timeout(bool force_restart);
  202. static void sclp_process_queue(void);
  203. static void __sclp_make_read_req(void);
  204. static int sclp_init_mask(int calculate);
  205. static int sclp_init(void);
  206. static void
  207. __sclp_queue_read_req(void)
  208. {
  209. if (sclp_reading_state == sclp_reading_state_idle) {
  210. sclp_reading_state = sclp_reading_state_reading;
  211. __sclp_make_read_req();
  212. /* Add request to head of queue */
  213. list_add(&sclp_read_req.list, &sclp_req_queue);
  214. }
  215. }
  216. /* Set up request retry timer. Called while sclp_lock is locked. */
  217. static inline void
  218. __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
  219. {
  220. del_timer(&sclp_request_timer);
  221. sclp_request_timer.function = cb;
  222. sclp_request_timer.expires = jiffies + time;
  223. add_timer(&sclp_request_timer);
  224. }
  225. static void sclp_request_timeout_restart(struct timer_list *unused)
  226. {
  227. sclp_request_timeout(true);
  228. }
  229. static void sclp_request_timeout_normal(struct timer_list *unused)
  230. {
  231. sclp_request_timeout(false);
  232. }
  233. /* Request timeout handler. Restart the request queue. If force_restart,
  234. * force restart of running request. */
  235. static void sclp_request_timeout(bool force_restart)
  236. {
  237. unsigned long flags;
  238. /* TMO: A timeout occurred (a=force_restart) */
  239. sclp_trace(2, "TMO", force_restart, 0, true);
  240. spin_lock_irqsave(&sclp_lock, flags);
  241. if (force_restart) {
  242. if (sclp_running_state == sclp_running_state_running) {
  243. /* Break running state and queue NOP read event request
  244. * to get a defined interface state. */
  245. __sclp_queue_read_req();
  246. sclp_running_state = sclp_running_state_idle;
  247. }
  248. } else {
  249. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  250. sclp_request_timeout_normal);
  251. }
  252. spin_unlock_irqrestore(&sclp_lock, flags);
  253. sclp_process_queue();
  254. }
  255. /*
  256. * Returns the expire value in jiffies of the next pending request timeout,
  257. * if any. Needs to be called with sclp_lock.
  258. */
  259. static unsigned long __sclp_req_queue_find_next_timeout(void)
  260. {
  261. unsigned long expires_next = 0;
  262. struct sclp_req *req;
  263. list_for_each_entry(req, &sclp_req_queue, list) {
  264. if (!req->queue_expires)
  265. continue;
  266. if (!expires_next ||
  267. (time_before(req->queue_expires, expires_next)))
  268. expires_next = req->queue_expires;
  269. }
  270. return expires_next;
  271. }
  272. /*
  273. * Returns expired request, if any, and removes it from the list.
  274. */
  275. static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
  276. {
  277. unsigned long flags, now;
  278. struct sclp_req *req;
  279. spin_lock_irqsave(&sclp_lock, flags);
  280. now = jiffies;
  281. /* Don't need list_for_each_safe because we break out after list_del */
  282. list_for_each_entry(req, &sclp_req_queue, list) {
  283. if (!req->queue_expires)
  284. continue;
  285. if (time_before_eq(req->queue_expires, now)) {
  286. if (req->status == SCLP_REQ_QUEUED) {
  287. req->status = SCLP_REQ_QUEUED_TIMEOUT;
  288. list_del(&req->list);
  289. goto out;
  290. }
  291. }
  292. }
  293. req = NULL;
  294. out:
  295. spin_unlock_irqrestore(&sclp_lock, flags);
  296. return req;
  297. }
  298. /*
  299. * Timeout handler for queued requests. Removes request from list and
  300. * invokes callback. This timer can be set per request in situations where
  301. * waiting too long would be harmful to the system, e.g. during SE reboot.
  302. */
  303. static void sclp_req_queue_timeout(struct timer_list *unused)
  304. {
  305. unsigned long flags, expires_next;
  306. struct sclp_req *req;
  307. do {
  308. req = __sclp_req_queue_remove_expired_req();
  309. if (req) {
  310. /* RQTM: Request timed out (a=sccb, b=summary) */
  311. sclp_trace_req(2, "RQTM", req, true);
  312. }
  313. if (req && req->callback)
  314. req->callback(req, req->callback_data);
  315. } while (req);
  316. spin_lock_irqsave(&sclp_lock, flags);
  317. expires_next = __sclp_req_queue_find_next_timeout();
  318. if (expires_next)
  319. mod_timer(&sclp_queue_timer, expires_next);
  320. spin_unlock_irqrestore(&sclp_lock, flags);
  321. }
  322. static int sclp_service_call_trace(sclp_cmdw_t command, void *sccb)
  323. {
  324. static u64 srvc_count;
  325. int rc;
  326. /* SRV1: Service call about to be issued (a=command, b=sccb address) */
  327. sclp_trace_sccb(0, "SRV1", command, (u64)sccb, command, sccb, false);
  328. rc = sclp_service_call(command, sccb);
  329. /* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */
  330. sclp_trace(0, "SRV2", -rc, ++srvc_count, rc != 0);
  331. if (rc == 0)
  332. active_cmd = command;
  333. return rc;
  334. }
  335. /* Try to start a request. Return zero if the request was successfully
  336. * started or if it will be started at a later time. Return non-zero otherwise.
  337. * Called while sclp_lock is locked. */
  338. static int
  339. __sclp_start_request(struct sclp_req *req)
  340. {
  341. int rc;
  342. if (sclp_running_state != sclp_running_state_idle)
  343. return 0;
  344. del_timer(&sclp_request_timer);
  345. rc = sclp_service_call_trace(req->command, req->sccb);
  346. req->start_count++;
  347. if (rc == 0) {
  348. /* Successfully started request */
  349. req->status = SCLP_REQ_RUNNING;
  350. sclp_running_state = sclp_running_state_running;
  351. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  352. sclp_request_timeout_restart);
  353. return 0;
  354. } else if (rc == -EBUSY) {
  355. /* Try again later */
  356. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  357. sclp_request_timeout_normal);
  358. return 0;
  359. }
  360. /* Request failed */
  361. req->status = SCLP_REQ_FAILED;
  362. return rc;
  363. }
  364. /* Try to start queued requests. */
  365. static void
  366. sclp_process_queue(void)
  367. {
  368. struct sclp_req *req;
  369. int rc;
  370. unsigned long flags;
  371. spin_lock_irqsave(&sclp_lock, flags);
  372. if (sclp_running_state != sclp_running_state_idle) {
  373. spin_unlock_irqrestore(&sclp_lock, flags);
  374. return;
  375. }
  376. del_timer(&sclp_request_timer);
  377. while (!list_empty(&sclp_req_queue)) {
  378. req = list_entry(sclp_req_queue.next, struct sclp_req, list);
  379. rc = __sclp_start_request(req);
  380. if (rc == 0)
  381. break;
  382. /* Request failed */
  383. if (req->start_count > 1) {
  384. /* Cannot abort already submitted request - could still
  385. * be active at the SCLP */
  386. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  387. sclp_request_timeout_normal);
  388. break;
  389. }
  390. /* Post-processing for aborted request */
  391. list_del(&req->list);
  392. /* RQAB: Request aborted (a=sccb, b=summary) */
  393. sclp_trace_req(2, "RQAB", req, true);
  394. if (req->callback) {
  395. spin_unlock_irqrestore(&sclp_lock, flags);
  396. req->callback(req, req->callback_data);
  397. spin_lock_irqsave(&sclp_lock, flags);
  398. }
  399. }
  400. spin_unlock_irqrestore(&sclp_lock, flags);
  401. }
  402. static int __sclp_can_add_request(struct sclp_req *req)
  403. {
  404. if (req == &sclp_init_req)
  405. return 1;
  406. if (sclp_init_state != sclp_init_state_initialized)
  407. return 0;
  408. if (sclp_activation_state != sclp_activation_state_active)
  409. return 0;
  410. return 1;
  411. }
  412. /* Queue a new request. Return zero on success, non-zero otherwise. */
  413. int
  414. sclp_add_request(struct sclp_req *req)
  415. {
  416. unsigned long flags;
  417. int rc;
  418. spin_lock_irqsave(&sclp_lock, flags);
  419. if (!__sclp_can_add_request(req)) {
  420. spin_unlock_irqrestore(&sclp_lock, flags);
  421. return -EIO;
  422. }
  423. /* RQAD: Request was added (a=sccb, b=caller) */
  424. sclp_trace(2, "RQAD", __pa(req->sccb), _RET_IP_, false);
  425. req->status = SCLP_REQ_QUEUED;
  426. req->start_count = 0;
  427. list_add_tail(&req->list, &sclp_req_queue);
  428. rc = 0;
  429. if (req->queue_timeout) {
  430. req->queue_expires = jiffies + req->queue_timeout * HZ;
  431. if (!timer_pending(&sclp_queue_timer) ||
  432. time_after(sclp_queue_timer.expires, req->queue_expires))
  433. mod_timer(&sclp_queue_timer, req->queue_expires);
  434. } else
  435. req->queue_expires = 0;
  436. /* Start if request is first in list */
  437. if (sclp_running_state == sclp_running_state_idle &&
  438. req->list.prev == &sclp_req_queue) {
  439. rc = __sclp_start_request(req);
  440. if (rc)
  441. list_del(&req->list);
  442. }
  443. spin_unlock_irqrestore(&sclp_lock, flags);
  444. return rc;
  445. }
  446. EXPORT_SYMBOL(sclp_add_request);
  447. /* Dispatch events found in request buffer to registered listeners. Return 0
  448. * if all events were dispatched, non-zero otherwise. */
  449. static int
  450. sclp_dispatch_evbufs(struct sccb_header *sccb)
  451. {
  452. unsigned long flags;
  453. struct evbuf_header *evbuf;
  454. struct list_head *l;
  455. struct sclp_register *reg;
  456. int offset;
  457. int rc;
  458. spin_lock_irqsave(&sclp_lock, flags);
  459. rc = 0;
  460. for (offset = sizeof(struct sccb_header); offset < sccb->length;
  461. offset += evbuf->length) {
  462. evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
  463. /* Check for malformed hardware response */
  464. if (evbuf->length == 0)
  465. break;
  466. /* Search for event handler */
  467. reg = NULL;
  468. list_for_each(l, &sclp_reg_list) {
  469. reg = list_entry(l, struct sclp_register, list);
  470. if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
  471. break;
  472. else
  473. reg = NULL;
  474. }
  475. /* EVNT: Event callback (b=receiver) */
  476. sclp_trace_evbuf(2, "EVNT", 0, reg ? (u64)reg->receiver_fn : 0,
  477. evbuf, !reg);
  478. if (reg && reg->receiver_fn) {
  479. spin_unlock_irqrestore(&sclp_lock, flags);
  480. reg->receiver_fn(evbuf);
  481. spin_lock_irqsave(&sclp_lock, flags);
  482. } else if (reg == NULL)
  483. rc = -EOPNOTSUPP;
  484. }
  485. spin_unlock_irqrestore(&sclp_lock, flags);
  486. return rc;
  487. }
  488. /* Read event data request callback. */
  489. static void
  490. sclp_read_cb(struct sclp_req *req, void *data)
  491. {
  492. unsigned long flags;
  493. struct sccb_header *sccb;
  494. sccb = (struct sccb_header *) req->sccb;
  495. if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
  496. sccb->response_code == 0x220))
  497. sclp_dispatch_evbufs(sccb);
  498. spin_lock_irqsave(&sclp_lock, flags);
  499. sclp_reading_state = sclp_reading_state_idle;
  500. spin_unlock_irqrestore(&sclp_lock, flags);
  501. }
  502. /* Prepare read event data request. Called while sclp_lock is locked. */
  503. static void __sclp_make_read_req(void)
  504. {
  505. struct sccb_header *sccb;
  506. sccb = (struct sccb_header *) sclp_read_sccb;
  507. clear_page(sccb);
  508. memset(&sclp_read_req, 0, sizeof(struct sclp_req));
  509. sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
  510. sclp_read_req.status = SCLP_REQ_QUEUED;
  511. sclp_read_req.start_count = 0;
  512. sclp_read_req.callback = sclp_read_cb;
  513. sclp_read_req.sccb = sccb;
  514. sccb->length = PAGE_SIZE;
  515. sccb->function_code = 0;
  516. sccb->control_mask[2] = 0x80;
  517. }
  518. /* Search request list for request with matching sccb. Return request if found,
  519. * NULL otherwise. Called while sclp_lock is locked. */
  520. static inline struct sclp_req *
  521. __sclp_find_req(u32 sccb)
  522. {
  523. struct list_head *l;
  524. struct sclp_req *req;
  525. list_for_each(l, &sclp_req_queue) {
  526. req = list_entry(l, struct sclp_req, list);
  527. if (sccb == __pa(req->sccb))
  528. return req;
  529. }
  530. return NULL;
  531. }
  532. static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
  533. {
  534. struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int);
  535. struct evbuf_header *evbuf;
  536. u16 response;
  537. if (!sccb)
  538. return true;
  539. /* Check SCCB response. */
  540. response = sccb->response_code & 0xff;
  541. if (response != 0x10 && response != 0x20)
  542. return false;
  543. /* Check event-processed flag on outgoing events. */
  544. if (cmd == SCLP_CMDW_WRITE_EVENT_DATA) {
  545. evbuf = (struct evbuf_header *)(sccb + 1);
  546. if (!(evbuf->flags & 0x80))
  547. return false;
  548. }
  549. return true;
  550. }
  551. /* Handler for external interruption. Perform request post-processing.
  552. * Prepare read event data request if necessary. Start processing of next
  553. * request on queue. */
  554. static void sclp_interrupt_handler(struct ext_code ext_code,
  555. unsigned int param32, unsigned long param64)
  556. {
  557. struct sclp_req *req;
  558. u32 finished_sccb;
  559. u32 evbuf_pending;
  560. inc_irq_stat(IRQEXT_SCP);
  561. spin_lock(&sclp_lock);
  562. finished_sccb = param32 & 0xfffffff8;
  563. evbuf_pending = param32 & 0x3;
  564. /* INT: Interrupt received (a=intparm, b=cmd) */
  565. sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
  566. (struct sccb_header *)__va(finished_sccb),
  567. !ok_response(finished_sccb, active_cmd));
  568. if (finished_sccb) {
  569. del_timer(&sclp_request_timer);
  570. sclp_running_state = sclp_running_state_reset_pending;
  571. req = __sclp_find_req(finished_sccb);
  572. if (req) {
  573. /* Request post-processing */
  574. list_del(&req->list);
  575. req->status = SCLP_REQ_DONE;
  576. /* RQOK: Request success (a=sccb, b=summary) */
  577. sclp_trace_req(2, "RQOK", req, false);
  578. if (req->callback) {
  579. spin_unlock(&sclp_lock);
  580. req->callback(req, req->callback_data);
  581. spin_lock(&sclp_lock);
  582. }
  583. } else {
  584. /* UNEX: Unexpected SCCB completion (a=sccb address) */
  585. sclp_trace(0, "UNEX", finished_sccb, 0, true);
  586. }
  587. sclp_running_state = sclp_running_state_idle;
  588. active_cmd = 0;
  589. }
  590. if (evbuf_pending &&
  591. sclp_activation_state == sclp_activation_state_active)
  592. __sclp_queue_read_req();
  593. spin_unlock(&sclp_lock);
  594. sclp_process_queue();
  595. }
  596. /* Convert interval in jiffies to TOD ticks. */
  597. static inline u64
  598. sclp_tod_from_jiffies(unsigned long jiffies)
  599. {
  600. return (u64) (jiffies / HZ) << 32;
  601. }
  602. /* Wait until a currently running request finished. Note: while this function
  603. * is running, no timers are served on the calling CPU. */
  604. void
  605. sclp_sync_wait(void)
  606. {
  607. unsigned long long old_tick;
  608. unsigned long flags;
  609. unsigned long cr0, cr0_sync;
  610. static u64 sync_count;
  611. u64 timeout;
  612. int irq_context;
  613. /* SYN1: Synchronous wait start (a=runstate, b=sync count) */
  614. sclp_trace(4, "SYN1", sclp_running_state, ++sync_count, false);
  615. /* We'll be disabling timer interrupts, so we need a custom timeout
  616. * mechanism */
  617. timeout = 0;
  618. if (timer_pending(&sclp_request_timer)) {
  619. /* Get timeout TOD value */
  620. timeout = get_tod_clock_fast() +
  621. sclp_tod_from_jiffies(sclp_request_timer.expires -
  622. jiffies);
  623. }
  624. local_irq_save(flags);
  625. /* Prevent bottom half from executing once we force interrupts open */
  626. irq_context = in_interrupt();
  627. if (!irq_context)
  628. local_bh_disable();
  629. /* Enable service-signal interruption, disable timer interrupts */
  630. old_tick = local_tick_disable();
  631. trace_hardirqs_on();
  632. __ctl_store(cr0, 0, 0);
  633. cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
  634. cr0_sync |= 1UL << (63 - 54);
  635. __ctl_load(cr0_sync, 0, 0);
  636. __arch_local_irq_stosm(0x01);
  637. /* Loop until driver state indicates finished request */
  638. while (sclp_running_state != sclp_running_state_idle) {
  639. /* Check for expired request timer */
  640. if (get_tod_clock_fast() > timeout && del_timer(&sclp_request_timer))
  641. sclp_request_timer.function(&sclp_request_timer);
  642. cpu_relax();
  643. }
  644. local_irq_disable();
  645. __ctl_load(cr0, 0, 0);
  646. if (!irq_context)
  647. _local_bh_enable();
  648. local_tick_enable(old_tick);
  649. local_irq_restore(flags);
  650. /* SYN2: Synchronous wait end (a=runstate, b=sync_count) */
  651. sclp_trace(4, "SYN2", sclp_running_state, sync_count, false);
  652. }
  653. EXPORT_SYMBOL(sclp_sync_wait);
  654. /* Dispatch changes in send and receive mask to registered listeners. */
  655. static void
  656. sclp_dispatch_state_change(void)
  657. {
  658. struct list_head *l;
  659. struct sclp_register *reg;
  660. unsigned long flags;
  661. sccb_mask_t receive_mask;
  662. sccb_mask_t send_mask;
  663. do {
  664. spin_lock_irqsave(&sclp_lock, flags);
  665. reg = NULL;
  666. list_for_each(l, &sclp_reg_list) {
  667. reg = list_entry(l, struct sclp_register, list);
  668. receive_mask = reg->send_mask & sclp_receive_mask;
  669. send_mask = reg->receive_mask & sclp_send_mask;
  670. if (reg->sclp_receive_mask != receive_mask ||
  671. reg->sclp_send_mask != send_mask) {
  672. reg->sclp_receive_mask = receive_mask;
  673. reg->sclp_send_mask = send_mask;
  674. break;
  675. } else
  676. reg = NULL;
  677. }
  678. spin_unlock_irqrestore(&sclp_lock, flags);
  679. if (reg && reg->state_change_fn) {
  680. /* STCG: State-change callback (b=callback) */
  681. sclp_trace(2, "STCG", 0, (u64)reg->state_change_fn,
  682. false);
  683. reg->state_change_fn(reg);
  684. }
  685. } while (reg);
  686. }
  687. struct sclp_statechangebuf {
  688. struct evbuf_header header;
  689. u8 validity_sclp_active_facility_mask : 1;
  690. u8 validity_sclp_receive_mask : 1;
  691. u8 validity_sclp_send_mask : 1;
  692. u8 validity_read_data_function_mask : 1;
  693. u16 _zeros : 12;
  694. u16 mask_length;
  695. u64 sclp_active_facility_mask;
  696. u8 masks[2 * 1021 + 4]; /* variable length */
  697. /*
  698. * u8 sclp_receive_mask[mask_length];
  699. * u8 sclp_send_mask[mask_length];
  700. * u32 read_data_function_mask;
  701. */
  702. } __attribute__((packed));
  703. /* State change event callback. Inform listeners of changes. */
  704. static void
  705. sclp_state_change_cb(struct evbuf_header *evbuf)
  706. {
  707. unsigned long flags;
  708. struct sclp_statechangebuf *scbuf;
  709. BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
  710. scbuf = (struct sclp_statechangebuf *) evbuf;
  711. spin_lock_irqsave(&sclp_lock, flags);
  712. if (scbuf->validity_sclp_receive_mask)
  713. sclp_receive_mask = sccb_get_recv_mask(scbuf);
  714. if (scbuf->validity_sclp_send_mask)
  715. sclp_send_mask = sccb_get_send_mask(scbuf);
  716. spin_unlock_irqrestore(&sclp_lock, flags);
  717. if (scbuf->validity_sclp_active_facility_mask)
  718. sclp.facilities = scbuf->sclp_active_facility_mask;
  719. sclp_dispatch_state_change();
  720. }
  721. static struct sclp_register sclp_state_change_event = {
  722. .receive_mask = EVTYP_STATECHANGE_MASK,
  723. .receiver_fn = sclp_state_change_cb
  724. };
  725. /* Calculate receive and send mask of currently registered listeners.
  726. * Called while sclp_lock is locked. */
  727. static inline void
  728. __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
  729. {
  730. struct list_head *l;
  731. struct sclp_register *t;
  732. *receive_mask = 0;
  733. *send_mask = 0;
  734. list_for_each(l, &sclp_reg_list) {
  735. t = list_entry(l, struct sclp_register, list);
  736. *receive_mask |= t->receive_mask;
  737. *send_mask |= t->send_mask;
  738. }
  739. }
  740. /* Register event listener. Return 0 on success, non-zero otherwise. */
  741. int
  742. sclp_register(struct sclp_register *reg)
  743. {
  744. unsigned long flags;
  745. sccb_mask_t receive_mask;
  746. sccb_mask_t send_mask;
  747. int rc;
  748. /* REG: Event listener registered (b=caller) */
  749. sclp_trace_register(2, "REG", 0, _RET_IP_, reg);
  750. rc = sclp_init();
  751. if (rc)
  752. return rc;
  753. spin_lock_irqsave(&sclp_lock, flags);
  754. /* Check event mask for collisions */
  755. __sclp_get_mask(&receive_mask, &send_mask);
  756. if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
  757. spin_unlock_irqrestore(&sclp_lock, flags);
  758. return -EBUSY;
  759. }
  760. /* Trigger initial state change callback */
  761. reg->sclp_receive_mask = 0;
  762. reg->sclp_send_mask = 0;
  763. list_add(&reg->list, &sclp_reg_list);
  764. spin_unlock_irqrestore(&sclp_lock, flags);
  765. rc = sclp_init_mask(1);
  766. if (rc) {
  767. spin_lock_irqsave(&sclp_lock, flags);
  768. list_del(&reg->list);
  769. spin_unlock_irqrestore(&sclp_lock, flags);
  770. }
  771. return rc;
  772. }
  773. EXPORT_SYMBOL(sclp_register);
  774. /* Unregister event listener. */
  775. void
  776. sclp_unregister(struct sclp_register *reg)
  777. {
  778. unsigned long flags;
  779. /* UREG: Event listener unregistered (b=caller) */
  780. sclp_trace_register(2, "UREG", 0, _RET_IP_, reg);
  781. spin_lock_irqsave(&sclp_lock, flags);
  782. list_del(&reg->list);
  783. spin_unlock_irqrestore(&sclp_lock, flags);
  784. sclp_init_mask(1);
  785. }
  786. EXPORT_SYMBOL(sclp_unregister);
  787. /* Remove event buffers which are marked processed. Return the number of
  788. * remaining event buffers. */
  789. int
  790. sclp_remove_processed(struct sccb_header *sccb)
  791. {
  792. struct evbuf_header *evbuf;
  793. int unprocessed;
  794. u16 remaining;
  795. evbuf = (struct evbuf_header *) (sccb + 1);
  796. unprocessed = 0;
  797. remaining = sccb->length - sizeof(struct sccb_header);
  798. while (remaining > 0) {
  799. remaining -= evbuf->length;
  800. if (evbuf->flags & 0x80) {
  801. sccb->length -= evbuf->length;
  802. memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
  803. remaining);
  804. } else {
  805. unprocessed++;
  806. evbuf = (struct evbuf_header *)
  807. ((addr_t) evbuf + evbuf->length);
  808. }
  809. }
  810. return unprocessed;
  811. }
  812. EXPORT_SYMBOL(sclp_remove_processed);
  813. /* Prepare init mask request. Called while sclp_lock is locked. */
  814. static inline void
  815. __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
  816. {
  817. struct init_sccb *sccb = sclp_init_sccb;
  818. clear_page(sccb);
  819. memset(&sclp_init_req, 0, sizeof(struct sclp_req));
  820. sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
  821. sclp_init_req.status = SCLP_REQ_FILLED;
  822. sclp_init_req.start_count = 0;
  823. sclp_init_req.callback = NULL;
  824. sclp_init_req.callback_data = NULL;
  825. sclp_init_req.sccb = sccb;
  826. sccb->header.length = sizeof(*sccb);
  827. if (sclp_mask_compat_mode)
  828. sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
  829. else
  830. sccb->mask_length = sizeof(sccb_mask_t);
  831. sccb_set_recv_mask(sccb, receive_mask);
  832. sccb_set_send_mask(sccb, send_mask);
  833. sccb_set_sclp_recv_mask(sccb, 0);
  834. sccb_set_sclp_send_mask(sccb, 0);
  835. }
  836. /* Start init mask request. If calculate is non-zero, calculate the mask as
  837. * requested by registered listeners. Use zero mask otherwise. Return 0 on
  838. * success, non-zero otherwise. */
  839. static int
  840. sclp_init_mask(int calculate)
  841. {
  842. unsigned long flags;
  843. struct init_sccb *sccb = sclp_init_sccb;
  844. sccb_mask_t receive_mask;
  845. sccb_mask_t send_mask;
  846. int retry;
  847. int rc;
  848. unsigned long wait;
  849. spin_lock_irqsave(&sclp_lock, flags);
  850. /* Check if interface is in appropriate state */
  851. if (sclp_mask_state != sclp_mask_state_idle) {
  852. spin_unlock_irqrestore(&sclp_lock, flags);
  853. return -EBUSY;
  854. }
  855. if (sclp_activation_state == sclp_activation_state_inactive) {
  856. spin_unlock_irqrestore(&sclp_lock, flags);
  857. return -EINVAL;
  858. }
  859. sclp_mask_state = sclp_mask_state_initializing;
  860. /* Determine mask */
  861. if (calculate)
  862. __sclp_get_mask(&receive_mask, &send_mask);
  863. else {
  864. receive_mask = 0;
  865. send_mask = 0;
  866. }
  867. rc = -EIO;
  868. for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
  869. /* Prepare request */
  870. __sclp_make_init_req(receive_mask, send_mask);
  871. spin_unlock_irqrestore(&sclp_lock, flags);
  872. if (sclp_add_request(&sclp_init_req)) {
  873. /* Try again later */
  874. wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
  875. while (time_before(jiffies, wait))
  876. sclp_sync_wait();
  877. spin_lock_irqsave(&sclp_lock, flags);
  878. continue;
  879. }
  880. while (sclp_init_req.status != SCLP_REQ_DONE &&
  881. sclp_init_req.status != SCLP_REQ_FAILED)
  882. sclp_sync_wait();
  883. spin_lock_irqsave(&sclp_lock, flags);
  884. if (sclp_init_req.status == SCLP_REQ_DONE &&
  885. sccb->header.response_code == 0x20) {
  886. /* Successful request */
  887. if (calculate) {
  888. sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
  889. sclp_send_mask = sccb_get_sclp_send_mask(sccb);
  890. } else {
  891. sclp_receive_mask = 0;
  892. sclp_send_mask = 0;
  893. }
  894. spin_unlock_irqrestore(&sclp_lock, flags);
  895. sclp_dispatch_state_change();
  896. spin_lock_irqsave(&sclp_lock, flags);
  897. rc = 0;
  898. break;
  899. }
  900. }
  901. sclp_mask_state = sclp_mask_state_idle;
  902. spin_unlock_irqrestore(&sclp_lock, flags);
  903. return rc;
  904. }
  905. /* Deactivate SCLP interface. On success, new requests will be rejected,
  906. * events will no longer be dispatched. Return 0 on success, non-zero
  907. * otherwise. */
  908. int
  909. sclp_deactivate(void)
  910. {
  911. unsigned long flags;
  912. int rc;
  913. spin_lock_irqsave(&sclp_lock, flags);
  914. /* Deactivate can only be called when active */
  915. if (sclp_activation_state != sclp_activation_state_active) {
  916. spin_unlock_irqrestore(&sclp_lock, flags);
  917. return -EINVAL;
  918. }
  919. sclp_activation_state = sclp_activation_state_deactivating;
  920. spin_unlock_irqrestore(&sclp_lock, flags);
  921. rc = sclp_init_mask(0);
  922. spin_lock_irqsave(&sclp_lock, flags);
  923. if (rc == 0)
  924. sclp_activation_state = sclp_activation_state_inactive;
  925. else
  926. sclp_activation_state = sclp_activation_state_active;
  927. spin_unlock_irqrestore(&sclp_lock, flags);
  928. return rc;
  929. }
  930. EXPORT_SYMBOL(sclp_deactivate);
  931. /* Reactivate SCLP interface after sclp_deactivate. On success, new
  932. * requests will be accepted, events will be dispatched again. Return 0 on
  933. * success, non-zero otherwise. */
  934. int
  935. sclp_reactivate(void)
  936. {
  937. unsigned long flags;
  938. int rc;
  939. spin_lock_irqsave(&sclp_lock, flags);
  940. /* Reactivate can only be called when inactive */
  941. if (sclp_activation_state != sclp_activation_state_inactive) {
  942. spin_unlock_irqrestore(&sclp_lock, flags);
  943. return -EINVAL;
  944. }
  945. sclp_activation_state = sclp_activation_state_activating;
  946. spin_unlock_irqrestore(&sclp_lock, flags);
  947. rc = sclp_init_mask(1);
  948. spin_lock_irqsave(&sclp_lock, flags);
  949. if (rc == 0)
  950. sclp_activation_state = sclp_activation_state_active;
  951. else
  952. sclp_activation_state = sclp_activation_state_inactive;
  953. spin_unlock_irqrestore(&sclp_lock, flags);
  954. return rc;
  955. }
  956. EXPORT_SYMBOL(sclp_reactivate);
  957. /* Handler for external interruption used during initialization. Modify
  958. * request state to done. */
  959. static void sclp_check_handler(struct ext_code ext_code,
  960. unsigned int param32, unsigned long param64)
  961. {
  962. u32 finished_sccb;
  963. inc_irq_stat(IRQEXT_SCP);
  964. finished_sccb = param32 & 0xfffffff8;
  965. /* Is this the interrupt we are waiting for? */
  966. if (finished_sccb == 0)
  967. return;
  968. if (finished_sccb != __pa(sclp_init_sccb))
  969. panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
  970. finished_sccb);
  971. spin_lock(&sclp_lock);
  972. if (sclp_running_state == sclp_running_state_running) {
  973. sclp_init_req.status = SCLP_REQ_DONE;
  974. sclp_running_state = sclp_running_state_idle;
  975. }
  976. spin_unlock(&sclp_lock);
  977. }
  978. /* Initial init mask request timed out. Modify request state to failed. */
  979. static void
  980. sclp_check_timeout(struct timer_list *unused)
  981. {
  982. unsigned long flags;
  983. spin_lock_irqsave(&sclp_lock, flags);
  984. if (sclp_running_state == sclp_running_state_running) {
  985. sclp_init_req.status = SCLP_REQ_FAILED;
  986. sclp_running_state = sclp_running_state_idle;
  987. }
  988. spin_unlock_irqrestore(&sclp_lock, flags);
  989. }
  990. /* Perform a check of the SCLP interface. Return zero if the interface is
  991. * available and there are no pending requests from a previous instance.
  992. * Return non-zero otherwise. */
  993. static int
  994. sclp_check_interface(void)
  995. {
  996. struct init_sccb *sccb;
  997. unsigned long flags;
  998. int retry;
  999. int rc;
  1000. spin_lock_irqsave(&sclp_lock, flags);
  1001. /* Prepare init mask command */
  1002. rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
  1003. if (rc) {
  1004. spin_unlock_irqrestore(&sclp_lock, flags);
  1005. return rc;
  1006. }
  1007. for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
  1008. __sclp_make_init_req(0, 0);
  1009. sccb = (struct init_sccb *) sclp_init_req.sccb;
  1010. rc = sclp_service_call_trace(sclp_init_req.command, sccb);
  1011. if (rc == -EIO)
  1012. break;
  1013. sclp_init_req.status = SCLP_REQ_RUNNING;
  1014. sclp_running_state = sclp_running_state_running;
  1015. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  1016. sclp_check_timeout);
  1017. spin_unlock_irqrestore(&sclp_lock, flags);
  1018. /* Enable service-signal interruption - needs to happen
  1019. * with IRQs enabled. */
  1020. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  1021. /* Wait for signal from interrupt or timeout */
  1022. sclp_sync_wait();
  1023. /* Disable service-signal interruption - needs to happen
  1024. * with IRQs enabled. */
  1025. irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
  1026. spin_lock_irqsave(&sclp_lock, flags);
  1027. del_timer(&sclp_request_timer);
  1028. rc = -EBUSY;
  1029. if (sclp_init_req.status == SCLP_REQ_DONE) {
  1030. if (sccb->header.response_code == 0x20) {
  1031. rc = 0;
  1032. break;
  1033. } else if (sccb->header.response_code == 0x74f0) {
  1034. if (!sclp_mask_compat_mode) {
  1035. sclp_mask_compat_mode = true;
  1036. retry = 0;
  1037. }
  1038. }
  1039. }
  1040. }
  1041. unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
  1042. spin_unlock_irqrestore(&sclp_lock, flags);
  1043. return rc;
  1044. }
  1045. /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
  1046. * events from interfering with rebooted system. */
  1047. static int
  1048. sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
  1049. {
  1050. sclp_deactivate();
  1051. return NOTIFY_DONE;
  1052. }
  1053. static struct notifier_block sclp_reboot_notifier = {
  1054. .notifier_call = sclp_reboot_event
  1055. };
  1056. static ssize_t con_pages_show(struct device_driver *dev, char *buf)
  1057. {
  1058. return sprintf(buf, "%i\n", sclp_console_pages);
  1059. }
  1060. static DRIVER_ATTR_RO(con_pages);
  1061. static ssize_t con_drop_show(struct device_driver *dev, char *buf)
  1062. {
  1063. return sprintf(buf, "%i\n", sclp_console_drop);
  1064. }
  1065. static DRIVER_ATTR_RO(con_drop);
  1066. static ssize_t con_full_show(struct device_driver *dev, char *buf)
  1067. {
  1068. return sprintf(buf, "%lu\n", sclp_console_full);
  1069. }
  1070. static DRIVER_ATTR_RO(con_full);
  1071. static struct attribute *sclp_drv_attrs[] = {
  1072. &driver_attr_con_pages.attr,
  1073. &driver_attr_con_drop.attr,
  1074. &driver_attr_con_full.attr,
  1075. NULL,
  1076. };
  1077. static struct attribute_group sclp_drv_attr_group = {
  1078. .attrs = sclp_drv_attrs,
  1079. };
  1080. static const struct attribute_group *sclp_drv_attr_groups[] = {
  1081. &sclp_drv_attr_group,
  1082. NULL,
  1083. };
  1084. static struct platform_driver sclp_pdrv = {
  1085. .driver = {
  1086. .name = "sclp",
  1087. .groups = sclp_drv_attr_groups,
  1088. },
  1089. };
  1090. /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  1091. * otherwise. */
  1092. static int
  1093. sclp_init(void)
  1094. {
  1095. unsigned long flags;
  1096. int rc = 0;
  1097. spin_lock_irqsave(&sclp_lock, flags);
  1098. /* Check for previous or running initialization */
  1099. if (sclp_init_state != sclp_init_state_uninitialized)
  1100. goto fail_unlock;
  1101. sclp_init_state = sclp_init_state_initializing;
  1102. sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
  1103. sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA);
  1104. BUG_ON(!sclp_read_sccb || !sclp_init_sccb);
  1105. /* Set up variables */
  1106. list_add(&sclp_state_change_event.list, &sclp_reg_list);
  1107. timer_setup(&sclp_request_timer, NULL, 0);
  1108. timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
  1109. /* Check interface */
  1110. spin_unlock_irqrestore(&sclp_lock, flags);
  1111. rc = sclp_check_interface();
  1112. spin_lock_irqsave(&sclp_lock, flags);
  1113. if (rc)
  1114. goto fail_init_state_uninitialized;
  1115. /* Register reboot handler */
  1116. rc = register_reboot_notifier(&sclp_reboot_notifier);
  1117. if (rc)
  1118. goto fail_init_state_uninitialized;
  1119. /* Register interrupt handler */
  1120. rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
  1121. if (rc)
  1122. goto fail_unregister_reboot_notifier;
  1123. sclp_init_state = sclp_init_state_initialized;
  1124. spin_unlock_irqrestore(&sclp_lock, flags);
  1125. /* Enable service-signal external interruption - needs to happen with
  1126. * IRQs enabled. */
  1127. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  1128. sclp_init_mask(1);
  1129. return 0;
  1130. fail_unregister_reboot_notifier:
  1131. unregister_reboot_notifier(&sclp_reboot_notifier);
  1132. fail_init_state_uninitialized:
  1133. sclp_init_state = sclp_init_state_uninitialized;
  1134. free_page((unsigned long) sclp_read_sccb);
  1135. free_page((unsigned long) sclp_init_sccb);
  1136. fail_unlock:
  1137. spin_unlock_irqrestore(&sclp_lock, flags);
  1138. return rc;
  1139. }
  1140. static __init int sclp_initcall(void)
  1141. {
  1142. int rc;
  1143. rc = platform_driver_register(&sclp_pdrv);
  1144. if (rc)
  1145. return rc;
  1146. return sclp_init();
  1147. }
  1148. arch_initcall(sclp_initcall);