sclp.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239
  1. /*
  2. * core function to access sclp interface
  3. *
  4. * Copyright IBM Corp. 1999, 2009
  5. *
  6. * Author(s): Martin Peschke <mpeschke@de.ibm.com>
  7. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  8. */
  9. #include <linux/kernel_stat.h>
  10. #include <linux/module.h>
  11. #include <linux/err.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/timer.h>
  15. #include <linux/reboot.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/init.h>
  18. #include <linux/suspend.h>
  19. #include <linux/completion.h>
  20. #include <linux/platform_device.h>
  21. #include <asm/types.h>
  22. #include <asm/irq.h>
  23. #include "sclp.h"
  24. #define SCLP_HEADER "sclp: "
  25. /* Lock to protect internal data consistency. */
  26. static DEFINE_SPINLOCK(sclp_lock);
  27. /* Mask of events that we can send to the sclp interface. */
  28. static sccb_mask_t sclp_receive_mask;
  29. /* Mask of events that we can receive from the sclp interface. */
  30. static sccb_mask_t sclp_send_mask;
  31. /* List of registered event listeners and senders. */
  32. static struct list_head sclp_reg_list;
  33. /* List of queued requests. */
  34. static struct list_head sclp_req_queue;
  35. /* Data for read and and init requests. */
  36. static struct sclp_req sclp_read_req;
  37. static struct sclp_req sclp_init_req;
  38. static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  39. static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
  40. /* Suspend request */
  41. static DECLARE_COMPLETION(sclp_request_queue_flushed);
  42. /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
  43. int sclp_console_pages = SCLP_CONSOLE_PAGES;
  44. /* Flag to indicate if buffer pages are dropped on buffer full condition */
  45. int sclp_console_drop = 1;
  46. /* Number of times the console dropped buffer pages */
  47. unsigned long sclp_console_full;
  48. static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
  49. {
  50. complete(&sclp_request_queue_flushed);
  51. }
  52. static int __init sclp_setup_console_pages(char *str)
  53. {
  54. int pages, rc;
  55. rc = kstrtoint(str, 0, &pages);
  56. if (!rc && pages >= SCLP_CONSOLE_PAGES)
  57. sclp_console_pages = pages;
  58. return 1;
  59. }
  60. __setup("sclp_con_pages=", sclp_setup_console_pages);
  61. static int __init sclp_setup_console_drop(char *str)
  62. {
  63. int drop, rc;
  64. rc = kstrtoint(str, 0, &drop);
  65. if (!rc)
  66. sclp_console_drop = drop;
  67. return 1;
  68. }
  69. __setup("sclp_con_drop=", sclp_setup_console_drop);
  70. static struct sclp_req sclp_suspend_req;
  71. /* Timer for request retries. */
  72. static struct timer_list sclp_request_timer;
  73. /* Timer for queued requests. */
  74. static struct timer_list sclp_queue_timer;
  75. /* Internal state: is a request active at the sclp? */
  76. static volatile enum sclp_running_state_t {
  77. sclp_running_state_idle,
  78. sclp_running_state_running,
  79. sclp_running_state_reset_pending
  80. } sclp_running_state = sclp_running_state_idle;
  81. /* Internal state: is a read request pending? */
  82. static volatile enum sclp_reading_state_t {
  83. sclp_reading_state_idle,
  84. sclp_reading_state_reading
  85. } sclp_reading_state = sclp_reading_state_idle;
  86. /* Internal state: is the driver currently serving requests? */
  87. static volatile enum sclp_activation_state_t {
  88. sclp_activation_state_active,
  89. sclp_activation_state_deactivating,
  90. sclp_activation_state_inactive,
  91. sclp_activation_state_activating
  92. } sclp_activation_state = sclp_activation_state_active;
  93. /* Internal state: is an init mask request pending? */
  94. static volatile enum sclp_mask_state_t {
  95. sclp_mask_state_idle,
  96. sclp_mask_state_initializing
  97. } sclp_mask_state = sclp_mask_state_idle;
  98. /* Internal state: is the driver suspended? */
  99. static enum sclp_suspend_state_t {
  100. sclp_suspend_state_running,
  101. sclp_suspend_state_suspended,
  102. } sclp_suspend_state = sclp_suspend_state_running;
  103. /* Maximum retry counts */
  104. #define SCLP_INIT_RETRY 3
  105. #define SCLP_MASK_RETRY 3
  106. /* Timeout intervals in seconds.*/
  107. #define SCLP_BUSY_INTERVAL 10
  108. #define SCLP_RETRY_INTERVAL 30
  109. static void sclp_process_queue(void);
  110. static void __sclp_make_read_req(void);
  111. static int sclp_init_mask(int calculate);
  112. static int sclp_init(void);
  113. static void
  114. __sclp_queue_read_req(void)
  115. {
  116. if (sclp_reading_state == sclp_reading_state_idle) {
  117. sclp_reading_state = sclp_reading_state_reading;
  118. __sclp_make_read_req();
  119. /* Add request to head of queue */
  120. list_add(&sclp_read_req.list, &sclp_req_queue);
  121. }
  122. }
  123. /* Set up request retry timer. Called while sclp_lock is locked. */
  124. static inline void
  125. __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
  126. unsigned long data)
  127. {
  128. del_timer(&sclp_request_timer);
  129. sclp_request_timer.function = function;
  130. sclp_request_timer.data = data;
  131. sclp_request_timer.expires = jiffies + time;
  132. add_timer(&sclp_request_timer);
  133. }
  134. /* Request timeout handler. Restart the request queue. If DATA is non-zero,
  135. * force restart of running request. */
  136. static void
  137. sclp_request_timeout(unsigned long data)
  138. {
  139. unsigned long flags;
  140. spin_lock_irqsave(&sclp_lock, flags);
  141. if (data) {
  142. if (sclp_running_state == sclp_running_state_running) {
  143. /* Break running state and queue NOP read event request
  144. * to get a defined interface state. */
  145. __sclp_queue_read_req();
  146. sclp_running_state = sclp_running_state_idle;
  147. }
  148. } else {
  149. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  150. sclp_request_timeout, 0);
  151. }
  152. spin_unlock_irqrestore(&sclp_lock, flags);
  153. sclp_process_queue();
  154. }
  155. /*
  156. * Returns the expire value in jiffies of the next pending request timeout,
  157. * if any. Needs to be called with sclp_lock.
  158. */
  159. static unsigned long __sclp_req_queue_find_next_timeout(void)
  160. {
  161. unsigned long expires_next = 0;
  162. struct sclp_req *req;
  163. list_for_each_entry(req, &sclp_req_queue, list) {
  164. if (!req->queue_expires)
  165. continue;
  166. if (!expires_next ||
  167. (time_before(req->queue_expires, expires_next)))
  168. expires_next = req->queue_expires;
  169. }
  170. return expires_next;
  171. }
  172. /*
  173. * Returns expired request, if any, and removes it from the list.
  174. */
  175. static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
  176. {
  177. unsigned long flags, now;
  178. struct sclp_req *req;
  179. spin_lock_irqsave(&sclp_lock, flags);
  180. now = jiffies;
  181. /* Don't need list_for_each_safe because we break out after list_del */
  182. list_for_each_entry(req, &sclp_req_queue, list) {
  183. if (!req->queue_expires)
  184. continue;
  185. if (time_before_eq(req->queue_expires, now)) {
  186. if (req->status == SCLP_REQ_QUEUED) {
  187. req->status = SCLP_REQ_QUEUED_TIMEOUT;
  188. list_del(&req->list);
  189. goto out;
  190. }
  191. }
  192. }
  193. req = NULL;
  194. out:
  195. spin_unlock_irqrestore(&sclp_lock, flags);
  196. return req;
  197. }
  198. /*
  199. * Timeout handler for queued requests. Removes request from list and
  200. * invokes callback. This timer can be set per request in situations where
  201. * waiting too long would be harmful to the system, e.g. during SE reboot.
  202. */
  203. static void sclp_req_queue_timeout(unsigned long data)
  204. {
  205. unsigned long flags, expires_next;
  206. struct sclp_req *req;
  207. do {
  208. req = __sclp_req_queue_remove_expired_req();
  209. if (req && req->callback)
  210. req->callback(req, req->callback_data);
  211. } while (req);
  212. spin_lock_irqsave(&sclp_lock, flags);
  213. expires_next = __sclp_req_queue_find_next_timeout();
  214. if (expires_next)
  215. mod_timer(&sclp_queue_timer, expires_next);
  216. spin_unlock_irqrestore(&sclp_lock, flags);
  217. }
  218. /* Try to start a request. Return zero if the request was successfully
  219. * started or if it will be started at a later time. Return non-zero otherwise.
  220. * Called while sclp_lock is locked. */
  221. static int
  222. __sclp_start_request(struct sclp_req *req)
  223. {
  224. int rc;
  225. if (sclp_running_state != sclp_running_state_idle)
  226. return 0;
  227. del_timer(&sclp_request_timer);
  228. rc = sclp_service_call(req->command, req->sccb);
  229. req->start_count++;
  230. if (rc == 0) {
  231. /* Successfully started request */
  232. req->status = SCLP_REQ_RUNNING;
  233. sclp_running_state = sclp_running_state_running;
  234. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  235. sclp_request_timeout, 1);
  236. return 0;
  237. } else if (rc == -EBUSY) {
  238. /* Try again later */
  239. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  240. sclp_request_timeout, 0);
  241. return 0;
  242. }
  243. /* Request failed */
  244. req->status = SCLP_REQ_FAILED;
  245. return rc;
  246. }
  247. /* Try to start queued requests. */
  248. static void
  249. sclp_process_queue(void)
  250. {
  251. struct sclp_req *req;
  252. int rc;
  253. unsigned long flags;
  254. spin_lock_irqsave(&sclp_lock, flags);
  255. if (sclp_running_state != sclp_running_state_idle) {
  256. spin_unlock_irqrestore(&sclp_lock, flags);
  257. return;
  258. }
  259. del_timer(&sclp_request_timer);
  260. while (!list_empty(&sclp_req_queue)) {
  261. req = list_entry(sclp_req_queue.next, struct sclp_req, list);
  262. if (!req->sccb)
  263. goto do_post;
  264. rc = __sclp_start_request(req);
  265. if (rc == 0)
  266. break;
  267. /* Request failed */
  268. if (req->start_count > 1) {
  269. /* Cannot abort already submitted request - could still
  270. * be active at the SCLP */
  271. __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
  272. sclp_request_timeout, 0);
  273. break;
  274. }
  275. do_post:
  276. /* Post-processing for aborted request */
  277. list_del(&req->list);
  278. if (req->callback) {
  279. spin_unlock_irqrestore(&sclp_lock, flags);
  280. req->callback(req, req->callback_data);
  281. spin_lock_irqsave(&sclp_lock, flags);
  282. }
  283. }
  284. spin_unlock_irqrestore(&sclp_lock, flags);
  285. }
  286. static int __sclp_can_add_request(struct sclp_req *req)
  287. {
  288. if (req == &sclp_suspend_req || req == &sclp_init_req)
  289. return 1;
  290. if (sclp_suspend_state != sclp_suspend_state_running)
  291. return 0;
  292. if (sclp_init_state != sclp_init_state_initialized)
  293. return 0;
  294. if (sclp_activation_state != sclp_activation_state_active)
  295. return 0;
  296. return 1;
  297. }
  298. /* Queue a new request. Return zero on success, non-zero otherwise. */
  299. int
  300. sclp_add_request(struct sclp_req *req)
  301. {
  302. unsigned long flags;
  303. int rc;
  304. spin_lock_irqsave(&sclp_lock, flags);
  305. if (!__sclp_can_add_request(req)) {
  306. spin_unlock_irqrestore(&sclp_lock, flags);
  307. return -EIO;
  308. }
  309. req->status = SCLP_REQ_QUEUED;
  310. req->start_count = 0;
  311. list_add_tail(&req->list, &sclp_req_queue);
  312. rc = 0;
  313. if (req->queue_timeout) {
  314. req->queue_expires = jiffies + req->queue_timeout * HZ;
  315. if (!timer_pending(&sclp_queue_timer) ||
  316. time_after(sclp_queue_timer.expires, req->queue_expires))
  317. mod_timer(&sclp_queue_timer, req->queue_expires);
  318. } else
  319. req->queue_expires = 0;
  320. /* Start if request is first in list */
  321. if (sclp_running_state == sclp_running_state_idle &&
  322. req->list.prev == &sclp_req_queue) {
  323. if (!req->sccb) {
  324. list_del(&req->list);
  325. rc = -ENODATA;
  326. goto out;
  327. }
  328. rc = __sclp_start_request(req);
  329. if (rc)
  330. list_del(&req->list);
  331. }
  332. out:
  333. spin_unlock_irqrestore(&sclp_lock, flags);
  334. return rc;
  335. }
  336. EXPORT_SYMBOL(sclp_add_request);
  337. /* Dispatch events found in request buffer to registered listeners. Return 0
  338. * if all events were dispatched, non-zero otherwise. */
  339. static int
  340. sclp_dispatch_evbufs(struct sccb_header *sccb)
  341. {
  342. unsigned long flags;
  343. struct evbuf_header *evbuf;
  344. struct list_head *l;
  345. struct sclp_register *reg;
  346. int offset;
  347. int rc;
  348. spin_lock_irqsave(&sclp_lock, flags);
  349. rc = 0;
  350. for (offset = sizeof(struct sccb_header); offset < sccb->length;
  351. offset += evbuf->length) {
  352. evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
  353. /* Check for malformed hardware response */
  354. if (evbuf->length == 0)
  355. break;
  356. /* Search for event handler */
  357. reg = NULL;
  358. list_for_each(l, &sclp_reg_list) {
  359. reg = list_entry(l, struct sclp_register, list);
  360. if (reg->receive_mask & (1 << (32 - evbuf->type)))
  361. break;
  362. else
  363. reg = NULL;
  364. }
  365. if (reg && reg->receiver_fn) {
  366. spin_unlock_irqrestore(&sclp_lock, flags);
  367. reg->receiver_fn(evbuf);
  368. spin_lock_irqsave(&sclp_lock, flags);
  369. } else if (reg == NULL)
  370. rc = -EOPNOTSUPP;
  371. }
  372. spin_unlock_irqrestore(&sclp_lock, flags);
  373. return rc;
  374. }
  375. /* Read event data request callback. */
  376. static void
  377. sclp_read_cb(struct sclp_req *req, void *data)
  378. {
  379. unsigned long flags;
  380. struct sccb_header *sccb;
  381. sccb = (struct sccb_header *) req->sccb;
  382. if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
  383. sccb->response_code == 0x220))
  384. sclp_dispatch_evbufs(sccb);
  385. spin_lock_irqsave(&sclp_lock, flags);
  386. sclp_reading_state = sclp_reading_state_idle;
  387. spin_unlock_irqrestore(&sclp_lock, flags);
  388. }
  389. /* Prepare read event data request. Called while sclp_lock is locked. */
  390. static void __sclp_make_read_req(void)
  391. {
  392. struct sccb_header *sccb;
  393. sccb = (struct sccb_header *) sclp_read_sccb;
  394. clear_page(sccb);
  395. memset(&sclp_read_req, 0, sizeof(struct sclp_req));
  396. sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
  397. sclp_read_req.status = SCLP_REQ_QUEUED;
  398. sclp_read_req.start_count = 0;
  399. sclp_read_req.callback = sclp_read_cb;
  400. sclp_read_req.sccb = sccb;
  401. sccb->length = PAGE_SIZE;
  402. sccb->function_code = 0;
  403. sccb->control_mask[2] = 0x80;
  404. }
  405. /* Search request list for request with matching sccb. Return request if found,
  406. * NULL otherwise. Called while sclp_lock is locked. */
  407. static inline struct sclp_req *
  408. __sclp_find_req(u32 sccb)
  409. {
  410. struct list_head *l;
  411. struct sclp_req *req;
  412. list_for_each(l, &sclp_req_queue) {
  413. req = list_entry(l, struct sclp_req, list);
  414. if (sccb == (u32) (addr_t) req->sccb)
  415. return req;
  416. }
  417. return NULL;
  418. }
  419. /* Handler for external interruption. Perform request post-processing.
  420. * Prepare read event data request if necessary. Start processing of next
  421. * request on queue. */
  422. static void sclp_interrupt_handler(struct ext_code ext_code,
  423. unsigned int param32, unsigned long param64)
  424. {
  425. struct sclp_req *req;
  426. u32 finished_sccb;
  427. u32 evbuf_pending;
  428. inc_irq_stat(IRQEXT_SCP);
  429. spin_lock(&sclp_lock);
  430. finished_sccb = param32 & 0xfffffff8;
  431. evbuf_pending = param32 & 0x3;
  432. if (finished_sccb) {
  433. del_timer(&sclp_request_timer);
  434. sclp_running_state = sclp_running_state_reset_pending;
  435. req = __sclp_find_req(finished_sccb);
  436. if (req) {
  437. /* Request post-processing */
  438. list_del(&req->list);
  439. req->status = SCLP_REQ_DONE;
  440. if (req->callback) {
  441. spin_unlock(&sclp_lock);
  442. req->callback(req, req->callback_data);
  443. spin_lock(&sclp_lock);
  444. }
  445. }
  446. sclp_running_state = sclp_running_state_idle;
  447. }
  448. if (evbuf_pending &&
  449. sclp_activation_state == sclp_activation_state_active)
  450. __sclp_queue_read_req();
  451. spin_unlock(&sclp_lock);
  452. sclp_process_queue();
  453. }
  454. /* Convert interval in jiffies to TOD ticks. */
  455. static inline u64
  456. sclp_tod_from_jiffies(unsigned long jiffies)
  457. {
  458. return (u64) (jiffies / HZ) << 32;
  459. }
  460. /* Wait until a currently running request finished. Note: while this function
  461. * is running, no timers are served on the calling CPU. */
  462. void
  463. sclp_sync_wait(void)
  464. {
  465. unsigned long long old_tick;
  466. unsigned long flags;
  467. unsigned long cr0, cr0_sync;
  468. u64 timeout;
  469. int irq_context;
  470. /* We'll be disabling timer interrupts, so we need a custom timeout
  471. * mechanism */
  472. timeout = 0;
  473. if (timer_pending(&sclp_request_timer)) {
  474. /* Get timeout TOD value */
  475. timeout = get_tod_clock_fast() +
  476. sclp_tod_from_jiffies(sclp_request_timer.expires -
  477. jiffies);
  478. }
  479. local_irq_save(flags);
  480. /* Prevent bottom half from executing once we force interrupts open */
  481. irq_context = in_interrupt();
  482. if (!irq_context)
  483. local_bh_disable();
  484. /* Enable service-signal interruption, disable timer interrupts */
  485. old_tick = local_tick_disable();
  486. trace_hardirqs_on();
  487. __ctl_store(cr0, 0, 0);
  488. cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
  489. cr0_sync |= 1UL << (63 - 54);
  490. __ctl_load(cr0_sync, 0, 0);
  491. __arch_local_irq_stosm(0x01);
  492. /* Loop until driver state indicates finished request */
  493. while (sclp_running_state != sclp_running_state_idle) {
  494. /* Check for expired request timer */
  495. if (timer_pending(&sclp_request_timer) &&
  496. get_tod_clock_fast() > timeout &&
  497. del_timer(&sclp_request_timer))
  498. sclp_request_timer.function(sclp_request_timer.data);
  499. cpu_relax();
  500. }
  501. local_irq_disable();
  502. __ctl_load(cr0, 0, 0);
  503. if (!irq_context)
  504. _local_bh_enable();
  505. local_tick_enable(old_tick);
  506. local_irq_restore(flags);
  507. }
  508. EXPORT_SYMBOL(sclp_sync_wait);
  509. /* Dispatch changes in send and receive mask to registered listeners. */
  510. static void
  511. sclp_dispatch_state_change(void)
  512. {
  513. struct list_head *l;
  514. struct sclp_register *reg;
  515. unsigned long flags;
  516. sccb_mask_t receive_mask;
  517. sccb_mask_t send_mask;
  518. do {
  519. spin_lock_irqsave(&sclp_lock, flags);
  520. reg = NULL;
  521. list_for_each(l, &sclp_reg_list) {
  522. reg = list_entry(l, struct sclp_register, list);
  523. receive_mask = reg->send_mask & sclp_receive_mask;
  524. send_mask = reg->receive_mask & sclp_send_mask;
  525. if (reg->sclp_receive_mask != receive_mask ||
  526. reg->sclp_send_mask != send_mask) {
  527. reg->sclp_receive_mask = receive_mask;
  528. reg->sclp_send_mask = send_mask;
  529. break;
  530. } else
  531. reg = NULL;
  532. }
  533. spin_unlock_irqrestore(&sclp_lock, flags);
  534. if (reg && reg->state_change_fn)
  535. reg->state_change_fn(reg);
  536. } while (reg);
  537. }
  538. struct sclp_statechangebuf {
  539. struct evbuf_header header;
  540. u8 validity_sclp_active_facility_mask : 1;
  541. u8 validity_sclp_receive_mask : 1;
  542. u8 validity_sclp_send_mask : 1;
  543. u8 validity_read_data_function_mask : 1;
  544. u16 _zeros : 12;
  545. u16 mask_length;
  546. u64 sclp_active_facility_mask;
  547. sccb_mask_t sclp_receive_mask;
  548. sccb_mask_t sclp_send_mask;
  549. u32 read_data_function_mask;
  550. } __attribute__((packed));
  551. /* State change event callback. Inform listeners of changes. */
  552. static void
  553. sclp_state_change_cb(struct evbuf_header *evbuf)
  554. {
  555. unsigned long flags;
  556. struct sclp_statechangebuf *scbuf;
  557. scbuf = (struct sclp_statechangebuf *) evbuf;
  558. if (scbuf->mask_length != sizeof(sccb_mask_t))
  559. return;
  560. spin_lock_irqsave(&sclp_lock, flags);
  561. if (scbuf->validity_sclp_receive_mask)
  562. sclp_receive_mask = scbuf->sclp_receive_mask;
  563. if (scbuf->validity_sclp_send_mask)
  564. sclp_send_mask = scbuf->sclp_send_mask;
  565. spin_unlock_irqrestore(&sclp_lock, flags);
  566. if (scbuf->validity_sclp_active_facility_mask)
  567. sclp.facilities = scbuf->sclp_active_facility_mask;
  568. sclp_dispatch_state_change();
  569. }
  570. static struct sclp_register sclp_state_change_event = {
  571. .receive_mask = EVTYP_STATECHANGE_MASK,
  572. .receiver_fn = sclp_state_change_cb
  573. };
  574. /* Calculate receive and send mask of currently registered listeners.
  575. * Called while sclp_lock is locked. */
  576. static inline void
  577. __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
  578. {
  579. struct list_head *l;
  580. struct sclp_register *t;
  581. *receive_mask = 0;
  582. *send_mask = 0;
  583. list_for_each(l, &sclp_reg_list) {
  584. t = list_entry(l, struct sclp_register, list);
  585. *receive_mask |= t->receive_mask;
  586. *send_mask |= t->send_mask;
  587. }
  588. }
  589. /* Register event listener. Return 0 on success, non-zero otherwise. */
  590. int
  591. sclp_register(struct sclp_register *reg)
  592. {
  593. unsigned long flags;
  594. sccb_mask_t receive_mask;
  595. sccb_mask_t send_mask;
  596. int rc;
  597. rc = sclp_init();
  598. if (rc)
  599. return rc;
  600. spin_lock_irqsave(&sclp_lock, flags);
  601. /* Check event mask for collisions */
  602. __sclp_get_mask(&receive_mask, &send_mask);
  603. if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
  604. spin_unlock_irqrestore(&sclp_lock, flags);
  605. return -EBUSY;
  606. }
  607. /* Trigger initial state change callback */
  608. reg->sclp_receive_mask = 0;
  609. reg->sclp_send_mask = 0;
  610. reg->pm_event_posted = 0;
  611. list_add(&reg->list, &sclp_reg_list);
  612. spin_unlock_irqrestore(&sclp_lock, flags);
  613. rc = sclp_init_mask(1);
  614. if (rc) {
  615. spin_lock_irqsave(&sclp_lock, flags);
  616. list_del(&reg->list);
  617. spin_unlock_irqrestore(&sclp_lock, flags);
  618. }
  619. return rc;
  620. }
  621. EXPORT_SYMBOL(sclp_register);
  622. /* Unregister event listener. */
  623. void
  624. sclp_unregister(struct sclp_register *reg)
  625. {
  626. unsigned long flags;
  627. spin_lock_irqsave(&sclp_lock, flags);
  628. list_del(&reg->list);
  629. spin_unlock_irqrestore(&sclp_lock, flags);
  630. sclp_init_mask(1);
  631. }
  632. EXPORT_SYMBOL(sclp_unregister);
  633. /* Remove event buffers which are marked processed. Return the number of
  634. * remaining event buffers. */
  635. int
  636. sclp_remove_processed(struct sccb_header *sccb)
  637. {
  638. struct evbuf_header *evbuf;
  639. int unprocessed;
  640. u16 remaining;
  641. evbuf = (struct evbuf_header *) (sccb + 1);
  642. unprocessed = 0;
  643. remaining = sccb->length - sizeof(struct sccb_header);
  644. while (remaining > 0) {
  645. remaining -= evbuf->length;
  646. if (evbuf->flags & 0x80) {
  647. sccb->length -= evbuf->length;
  648. memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
  649. remaining);
  650. } else {
  651. unprocessed++;
  652. evbuf = (struct evbuf_header *)
  653. ((addr_t) evbuf + evbuf->length);
  654. }
  655. }
  656. return unprocessed;
  657. }
  658. EXPORT_SYMBOL(sclp_remove_processed);
  659. /* Prepare init mask request. Called while sclp_lock is locked. */
  660. static inline void
  661. __sclp_make_init_req(u32 receive_mask, u32 send_mask)
  662. {
  663. struct init_sccb *sccb;
  664. sccb = (struct init_sccb *) sclp_init_sccb;
  665. clear_page(sccb);
  666. memset(&sclp_init_req, 0, sizeof(struct sclp_req));
  667. sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
  668. sclp_init_req.status = SCLP_REQ_FILLED;
  669. sclp_init_req.start_count = 0;
  670. sclp_init_req.callback = NULL;
  671. sclp_init_req.callback_data = NULL;
  672. sclp_init_req.sccb = sccb;
  673. sccb->header.length = sizeof(struct init_sccb);
  674. sccb->mask_length = sizeof(sccb_mask_t);
  675. sccb->receive_mask = receive_mask;
  676. sccb->send_mask = send_mask;
  677. sccb->sclp_receive_mask = 0;
  678. sccb->sclp_send_mask = 0;
  679. }
  680. /* Start init mask request. If calculate is non-zero, calculate the mask as
  681. * requested by registered listeners. Use zero mask otherwise. Return 0 on
  682. * success, non-zero otherwise. */
  683. static int
  684. sclp_init_mask(int calculate)
  685. {
  686. unsigned long flags;
  687. struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
  688. sccb_mask_t receive_mask;
  689. sccb_mask_t send_mask;
  690. int retry;
  691. int rc;
  692. unsigned long wait;
  693. spin_lock_irqsave(&sclp_lock, flags);
  694. /* Check if interface is in appropriate state */
  695. if (sclp_mask_state != sclp_mask_state_idle) {
  696. spin_unlock_irqrestore(&sclp_lock, flags);
  697. return -EBUSY;
  698. }
  699. if (sclp_activation_state == sclp_activation_state_inactive) {
  700. spin_unlock_irqrestore(&sclp_lock, flags);
  701. return -EINVAL;
  702. }
  703. sclp_mask_state = sclp_mask_state_initializing;
  704. /* Determine mask */
  705. if (calculate)
  706. __sclp_get_mask(&receive_mask, &send_mask);
  707. else {
  708. receive_mask = 0;
  709. send_mask = 0;
  710. }
  711. rc = -EIO;
  712. for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
  713. /* Prepare request */
  714. __sclp_make_init_req(receive_mask, send_mask);
  715. spin_unlock_irqrestore(&sclp_lock, flags);
  716. if (sclp_add_request(&sclp_init_req)) {
  717. /* Try again later */
  718. wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
  719. while (time_before(jiffies, wait))
  720. sclp_sync_wait();
  721. spin_lock_irqsave(&sclp_lock, flags);
  722. continue;
  723. }
  724. while (sclp_init_req.status != SCLP_REQ_DONE &&
  725. sclp_init_req.status != SCLP_REQ_FAILED)
  726. sclp_sync_wait();
  727. spin_lock_irqsave(&sclp_lock, flags);
  728. if (sclp_init_req.status == SCLP_REQ_DONE &&
  729. sccb->header.response_code == 0x20) {
  730. /* Successful request */
  731. if (calculate) {
  732. sclp_receive_mask = sccb->sclp_receive_mask;
  733. sclp_send_mask = sccb->sclp_send_mask;
  734. } else {
  735. sclp_receive_mask = 0;
  736. sclp_send_mask = 0;
  737. }
  738. spin_unlock_irqrestore(&sclp_lock, flags);
  739. sclp_dispatch_state_change();
  740. spin_lock_irqsave(&sclp_lock, flags);
  741. rc = 0;
  742. break;
  743. }
  744. }
  745. sclp_mask_state = sclp_mask_state_idle;
  746. spin_unlock_irqrestore(&sclp_lock, flags);
  747. return rc;
  748. }
  749. /* Deactivate SCLP interface. On success, new requests will be rejected,
  750. * events will no longer be dispatched. Return 0 on success, non-zero
  751. * otherwise. */
  752. int
  753. sclp_deactivate(void)
  754. {
  755. unsigned long flags;
  756. int rc;
  757. spin_lock_irqsave(&sclp_lock, flags);
  758. /* Deactivate can only be called when active */
  759. if (sclp_activation_state != sclp_activation_state_active) {
  760. spin_unlock_irqrestore(&sclp_lock, flags);
  761. return -EINVAL;
  762. }
  763. sclp_activation_state = sclp_activation_state_deactivating;
  764. spin_unlock_irqrestore(&sclp_lock, flags);
  765. rc = sclp_init_mask(0);
  766. spin_lock_irqsave(&sclp_lock, flags);
  767. if (rc == 0)
  768. sclp_activation_state = sclp_activation_state_inactive;
  769. else
  770. sclp_activation_state = sclp_activation_state_active;
  771. spin_unlock_irqrestore(&sclp_lock, flags);
  772. return rc;
  773. }
  774. EXPORT_SYMBOL(sclp_deactivate);
  775. /* Reactivate SCLP interface after sclp_deactivate. On success, new
  776. * requests will be accepted, events will be dispatched again. Return 0 on
  777. * success, non-zero otherwise. */
  778. int
  779. sclp_reactivate(void)
  780. {
  781. unsigned long flags;
  782. int rc;
  783. spin_lock_irqsave(&sclp_lock, flags);
  784. /* Reactivate can only be called when inactive */
  785. if (sclp_activation_state != sclp_activation_state_inactive) {
  786. spin_unlock_irqrestore(&sclp_lock, flags);
  787. return -EINVAL;
  788. }
  789. sclp_activation_state = sclp_activation_state_activating;
  790. spin_unlock_irqrestore(&sclp_lock, flags);
  791. rc = sclp_init_mask(1);
  792. spin_lock_irqsave(&sclp_lock, flags);
  793. if (rc == 0)
  794. sclp_activation_state = sclp_activation_state_active;
  795. else
  796. sclp_activation_state = sclp_activation_state_inactive;
  797. spin_unlock_irqrestore(&sclp_lock, flags);
  798. return rc;
  799. }
  800. EXPORT_SYMBOL(sclp_reactivate);
  801. /* Handler for external interruption used during initialization. Modify
  802. * request state to done. */
  803. static void sclp_check_handler(struct ext_code ext_code,
  804. unsigned int param32, unsigned long param64)
  805. {
  806. u32 finished_sccb;
  807. inc_irq_stat(IRQEXT_SCP);
  808. finished_sccb = param32 & 0xfffffff8;
  809. /* Is this the interrupt we are waiting for? */
  810. if (finished_sccb == 0)
  811. return;
  812. if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
  813. panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
  814. finished_sccb);
  815. spin_lock(&sclp_lock);
  816. if (sclp_running_state == sclp_running_state_running) {
  817. sclp_init_req.status = SCLP_REQ_DONE;
  818. sclp_running_state = sclp_running_state_idle;
  819. }
  820. spin_unlock(&sclp_lock);
  821. }
  822. /* Initial init mask request timed out. Modify request state to failed. */
  823. static void
  824. sclp_check_timeout(unsigned long data)
  825. {
  826. unsigned long flags;
  827. spin_lock_irqsave(&sclp_lock, flags);
  828. if (sclp_running_state == sclp_running_state_running) {
  829. sclp_init_req.status = SCLP_REQ_FAILED;
  830. sclp_running_state = sclp_running_state_idle;
  831. }
  832. spin_unlock_irqrestore(&sclp_lock, flags);
  833. }
  834. /* Perform a check of the SCLP interface. Return zero if the interface is
  835. * available and there are no pending requests from a previous instance.
  836. * Return non-zero otherwise. */
  837. static int
  838. sclp_check_interface(void)
  839. {
  840. struct init_sccb *sccb;
  841. unsigned long flags;
  842. int retry;
  843. int rc;
  844. spin_lock_irqsave(&sclp_lock, flags);
  845. /* Prepare init mask command */
  846. rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
  847. if (rc) {
  848. spin_unlock_irqrestore(&sclp_lock, flags);
  849. return rc;
  850. }
  851. for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
  852. __sclp_make_init_req(0, 0);
  853. sccb = (struct init_sccb *) sclp_init_req.sccb;
  854. rc = sclp_service_call(sclp_init_req.command, sccb);
  855. if (rc == -EIO)
  856. break;
  857. sclp_init_req.status = SCLP_REQ_RUNNING;
  858. sclp_running_state = sclp_running_state_running;
  859. __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
  860. sclp_check_timeout, 0);
  861. spin_unlock_irqrestore(&sclp_lock, flags);
  862. /* Enable service-signal interruption - needs to happen
  863. * with IRQs enabled. */
  864. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  865. /* Wait for signal from interrupt or timeout */
  866. sclp_sync_wait();
  867. /* Disable service-signal interruption - needs to happen
  868. * with IRQs enabled. */
  869. irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
  870. spin_lock_irqsave(&sclp_lock, flags);
  871. del_timer(&sclp_request_timer);
  872. if (sclp_init_req.status == SCLP_REQ_DONE &&
  873. sccb->header.response_code == 0x20) {
  874. rc = 0;
  875. break;
  876. } else
  877. rc = -EBUSY;
  878. }
  879. unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
  880. spin_unlock_irqrestore(&sclp_lock, flags);
  881. return rc;
  882. }
  883. /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
  884. * events from interfering with rebooted system. */
  885. static int
  886. sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
  887. {
  888. sclp_deactivate();
  889. return NOTIFY_DONE;
  890. }
  891. static struct notifier_block sclp_reboot_notifier = {
  892. .notifier_call = sclp_reboot_event
  893. };
  894. /*
  895. * Suspend/resume SCLP notifier implementation
  896. */
  897. static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
  898. {
  899. struct sclp_register *reg;
  900. unsigned long flags;
  901. if (!rollback) {
  902. spin_lock_irqsave(&sclp_lock, flags);
  903. list_for_each_entry(reg, &sclp_reg_list, list)
  904. reg->pm_event_posted = 0;
  905. spin_unlock_irqrestore(&sclp_lock, flags);
  906. }
  907. do {
  908. spin_lock_irqsave(&sclp_lock, flags);
  909. list_for_each_entry(reg, &sclp_reg_list, list) {
  910. if (rollback && reg->pm_event_posted)
  911. goto found;
  912. if (!rollback && !reg->pm_event_posted)
  913. goto found;
  914. }
  915. spin_unlock_irqrestore(&sclp_lock, flags);
  916. return;
  917. found:
  918. spin_unlock_irqrestore(&sclp_lock, flags);
  919. if (reg->pm_event_fn)
  920. reg->pm_event_fn(reg, sclp_pm_event);
  921. reg->pm_event_posted = rollback ? 0 : 1;
  922. } while (1);
  923. }
  924. /*
  925. * Susend/resume callbacks for platform device
  926. */
  927. static int sclp_freeze(struct device *dev)
  928. {
  929. unsigned long flags;
  930. int rc;
  931. sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
  932. spin_lock_irqsave(&sclp_lock, flags);
  933. sclp_suspend_state = sclp_suspend_state_suspended;
  934. spin_unlock_irqrestore(&sclp_lock, flags);
  935. /* Init supend data */
  936. memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
  937. sclp_suspend_req.callback = sclp_suspend_req_cb;
  938. sclp_suspend_req.status = SCLP_REQ_FILLED;
  939. init_completion(&sclp_request_queue_flushed);
  940. rc = sclp_add_request(&sclp_suspend_req);
  941. if (rc == 0)
  942. wait_for_completion(&sclp_request_queue_flushed);
  943. else if (rc != -ENODATA)
  944. goto fail_thaw;
  945. rc = sclp_deactivate();
  946. if (rc)
  947. goto fail_thaw;
  948. return 0;
  949. fail_thaw:
  950. spin_lock_irqsave(&sclp_lock, flags);
  951. sclp_suspend_state = sclp_suspend_state_running;
  952. spin_unlock_irqrestore(&sclp_lock, flags);
  953. sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
  954. return rc;
  955. }
  956. static int sclp_undo_suspend(enum sclp_pm_event event)
  957. {
  958. unsigned long flags;
  959. int rc;
  960. rc = sclp_reactivate();
  961. if (rc)
  962. return rc;
  963. spin_lock_irqsave(&sclp_lock, flags);
  964. sclp_suspend_state = sclp_suspend_state_running;
  965. spin_unlock_irqrestore(&sclp_lock, flags);
  966. sclp_pm_event(event, 0);
  967. return 0;
  968. }
  969. static int sclp_thaw(struct device *dev)
  970. {
  971. return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
  972. }
  973. static int sclp_restore(struct device *dev)
  974. {
  975. return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
  976. }
  977. static const struct dev_pm_ops sclp_pm_ops = {
  978. .freeze = sclp_freeze,
  979. .thaw = sclp_thaw,
  980. .restore = sclp_restore,
  981. };
  982. static ssize_t con_pages_show(struct device_driver *dev, char *buf)
  983. {
  984. return sprintf(buf, "%i\n", sclp_console_pages);
  985. }
  986. static DRIVER_ATTR_RO(con_pages);
  987. static ssize_t con_drop_show(struct device_driver *dev, char *buf)
  988. {
  989. return sprintf(buf, "%i\n", sclp_console_drop);
  990. }
  991. static DRIVER_ATTR_RO(con_drop);
  992. static ssize_t con_full_show(struct device_driver *dev, char *buf)
  993. {
  994. return sprintf(buf, "%lu\n", sclp_console_full);
  995. }
  996. static DRIVER_ATTR_RO(con_full);
  997. static struct attribute *sclp_drv_attrs[] = {
  998. &driver_attr_con_pages.attr,
  999. &driver_attr_con_drop.attr,
  1000. &driver_attr_con_full.attr,
  1001. NULL,
  1002. };
  1003. static struct attribute_group sclp_drv_attr_group = {
  1004. .attrs = sclp_drv_attrs,
  1005. };
  1006. static const struct attribute_group *sclp_drv_attr_groups[] = {
  1007. &sclp_drv_attr_group,
  1008. NULL,
  1009. };
  1010. static struct platform_driver sclp_pdrv = {
  1011. .driver = {
  1012. .name = "sclp",
  1013. .pm = &sclp_pm_ops,
  1014. .groups = sclp_drv_attr_groups,
  1015. },
  1016. };
  1017. static struct platform_device *sclp_pdev;
  1018. /* Initialize SCLP driver. Return zero if driver is operational, non-zero
  1019. * otherwise. */
  1020. static int
  1021. sclp_init(void)
  1022. {
  1023. unsigned long flags;
  1024. int rc = 0;
  1025. spin_lock_irqsave(&sclp_lock, flags);
  1026. /* Check for previous or running initialization */
  1027. if (sclp_init_state != sclp_init_state_uninitialized)
  1028. goto fail_unlock;
  1029. sclp_init_state = sclp_init_state_initializing;
  1030. /* Set up variables */
  1031. INIT_LIST_HEAD(&sclp_req_queue);
  1032. INIT_LIST_HEAD(&sclp_reg_list);
  1033. list_add(&sclp_state_change_event.list, &sclp_reg_list);
  1034. init_timer(&sclp_request_timer);
  1035. init_timer(&sclp_queue_timer);
  1036. sclp_queue_timer.function = sclp_req_queue_timeout;
  1037. /* Check interface */
  1038. spin_unlock_irqrestore(&sclp_lock, flags);
  1039. rc = sclp_check_interface();
  1040. spin_lock_irqsave(&sclp_lock, flags);
  1041. if (rc)
  1042. goto fail_init_state_uninitialized;
  1043. /* Register reboot handler */
  1044. rc = register_reboot_notifier(&sclp_reboot_notifier);
  1045. if (rc)
  1046. goto fail_init_state_uninitialized;
  1047. /* Register interrupt handler */
  1048. rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
  1049. if (rc)
  1050. goto fail_unregister_reboot_notifier;
  1051. sclp_init_state = sclp_init_state_initialized;
  1052. spin_unlock_irqrestore(&sclp_lock, flags);
  1053. /* Enable service-signal external interruption - needs to happen with
  1054. * IRQs enabled. */
  1055. irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
  1056. sclp_init_mask(1);
  1057. return 0;
  1058. fail_unregister_reboot_notifier:
  1059. unregister_reboot_notifier(&sclp_reboot_notifier);
  1060. fail_init_state_uninitialized:
  1061. sclp_init_state = sclp_init_state_uninitialized;
  1062. fail_unlock:
  1063. spin_unlock_irqrestore(&sclp_lock, flags);
  1064. return rc;
  1065. }
  1066. /*
  1067. * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
  1068. * to print the panic message.
  1069. */
  1070. static int sclp_panic_notify(struct notifier_block *self,
  1071. unsigned long event, void *data)
  1072. {
  1073. if (sclp_suspend_state == sclp_suspend_state_suspended)
  1074. sclp_undo_suspend(SCLP_PM_EVENT_THAW);
  1075. return NOTIFY_OK;
  1076. }
  1077. static struct notifier_block sclp_on_panic_nb = {
  1078. .notifier_call = sclp_panic_notify,
  1079. .priority = SCLP_PANIC_PRIO,
  1080. };
  1081. static __init int sclp_initcall(void)
  1082. {
  1083. int rc;
  1084. rc = platform_driver_register(&sclp_pdrv);
  1085. if (rc)
  1086. return rc;
  1087. sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
  1088. rc = PTR_ERR_OR_ZERO(sclp_pdev);
  1089. if (rc)
  1090. goto fail_platform_driver_unregister;
  1091. rc = atomic_notifier_chain_register(&panic_notifier_list,
  1092. &sclp_on_panic_nb);
  1093. if (rc)
  1094. goto fail_platform_device_unregister;
  1095. return sclp_init();
  1096. fail_platform_device_unregister:
  1097. platform_device_unregister(sclp_pdev);
  1098. fail_platform_driver_unregister:
  1099. platform_driver_unregister(&sclp_pdrv);
  1100. return rc;
  1101. }
  1102. arch_initcall(sclp_initcall);