device_fsm.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * finite state machine for device handling
  4. *
  5. * Copyright IBM Corp. 2002, 2008
  6. * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. */
  9. #include <linux/module.h>
  10. #include <linux/init.h>
  11. #include <linux/jiffies.h>
  12. #include <linux/string.h>
  13. #include <asm/ccwdev.h>
  14. #include <asm/cio.h>
  15. #include <asm/chpid.h>
  16. #include "cio.h"
  17. #include "cio_debug.h"
  18. #include "css.h"
  19. #include "device.h"
  20. #include "chsc.h"
  21. #include "ioasm.h"
  22. #include "chp.h"
  23. static int timeout_log_enabled;
  24. static int __init ccw_timeout_log_setup(char *unused)
  25. {
  26. timeout_log_enabled = 1;
  27. return 1;
  28. }
  29. __setup("ccw_timeout_log", ccw_timeout_log_setup);
  30. static void ccw_timeout_log(struct ccw_device *cdev)
  31. {
  32. struct schib schib;
  33. struct subchannel *sch;
  34. struct io_subchannel_private *private;
  35. union orb *orb;
  36. int cc;
  37. sch = to_subchannel(cdev->dev.parent);
  38. private = to_io_private(sch);
  39. orb = &private->orb;
  40. cc = stsch(sch->schid, &schib);
  41. printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
  42. "device information:\n", get_tod_clock());
  43. printk(KERN_WARNING "cio: orb:\n");
  44. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  45. orb, sizeof(*orb), 0);
  46. printk(KERN_WARNING "cio: ccw device bus id: %s\n",
  47. dev_name(&cdev->dev));
  48. printk(KERN_WARNING "cio: subchannel bus id: %s\n",
  49. dev_name(&sch->dev));
  50. printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
  51. "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
  52. if (orb->tm.b) {
  53. printk(KERN_WARNING "cio: orb indicates transport mode\n");
  54. printk(KERN_WARNING "cio: last tcw:\n");
  55. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  56. (void *)(addr_t)orb->tm.tcw,
  57. sizeof(struct tcw), 0);
  58. } else {
  59. printk(KERN_WARNING "cio: orb indicates command mode\n");
  60. if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
  61. (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
  62. printk(KERN_WARNING "cio: last channel program "
  63. "(intern):\n");
  64. else
  65. printk(KERN_WARNING "cio: last channel program:\n");
  66. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  67. (void *)(addr_t)orb->cmd.cpa,
  68. sizeof(struct ccw1), 0);
  69. }
  70. printk(KERN_WARNING "cio: ccw device state: %d\n",
  71. cdev->private->state);
  72. printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
  73. printk(KERN_WARNING "cio: schib:\n");
  74. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  75. &schib, sizeof(schib), 0);
  76. printk(KERN_WARNING "cio: ccw device flags:\n");
  77. print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
  78. &cdev->private->flags, sizeof(cdev->private->flags), 0);
  79. }
  80. /*
  81. * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
  82. */
  83. void
  84. ccw_device_timeout(struct timer_list *t)
  85. {
  86. struct ccw_device_private *priv = from_timer(priv, t, timer);
  87. struct ccw_device *cdev = priv->cdev;
  88. spin_lock_irq(cdev->ccwlock);
  89. if (timeout_log_enabled)
  90. ccw_timeout_log(cdev);
  91. dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
  92. spin_unlock_irq(cdev->ccwlock);
  93. }
  94. /*
  95. * Set timeout
  96. */
  97. void
  98. ccw_device_set_timeout(struct ccw_device *cdev, int expires)
  99. {
  100. if (expires == 0) {
  101. del_timer(&cdev->private->timer);
  102. return;
  103. }
  104. if (timer_pending(&cdev->private->timer)) {
  105. if (mod_timer(&cdev->private->timer, jiffies + expires))
  106. return;
  107. }
  108. cdev->private->timer.expires = jiffies + expires;
  109. add_timer(&cdev->private->timer);
  110. }
  111. int
  112. ccw_device_cancel_halt_clear(struct ccw_device *cdev)
  113. {
  114. struct subchannel *sch;
  115. int ret;
  116. sch = to_subchannel(cdev->dev.parent);
  117. ret = cio_cancel_halt_clear(sch, &cdev->private->iretry);
  118. if (ret == -EIO)
  119. CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
  120. cdev->private->dev_id.ssid,
  121. cdev->private->dev_id.devno);
  122. return ret;
  123. }
  124. void ccw_device_update_sense_data(struct ccw_device *cdev)
  125. {
  126. memset(&cdev->id, 0, sizeof(cdev->id));
  127. cdev->id.cu_type = cdev->private->senseid.cu_type;
  128. cdev->id.cu_model = cdev->private->senseid.cu_model;
  129. cdev->id.dev_type = cdev->private->senseid.dev_type;
  130. cdev->id.dev_model = cdev->private->senseid.dev_model;
  131. }
  132. int ccw_device_test_sense_data(struct ccw_device *cdev)
  133. {
  134. return cdev->id.cu_type == cdev->private->senseid.cu_type &&
  135. cdev->id.cu_model == cdev->private->senseid.cu_model &&
  136. cdev->id.dev_type == cdev->private->senseid.dev_type &&
  137. cdev->id.dev_model == cdev->private->senseid.dev_model;
  138. }
  139. /*
  140. * The machine won't give us any notification by machine check if a chpid has
  141. * been varied online on the SE so we have to find out by magic (i. e. driving
  142. * the channel subsystem to device selection and updating our path masks).
  143. */
  144. static void
  145. __recover_lost_chpids(struct subchannel *sch, int old_lpm)
  146. {
  147. int mask, i;
  148. struct chp_id chpid;
  149. chp_id_init(&chpid);
  150. for (i = 0; i<8; i++) {
  151. mask = 0x80 >> i;
  152. if (!(sch->lpm & mask))
  153. continue;
  154. if (old_lpm & mask)
  155. continue;
  156. chpid.id = sch->schib.pmcw.chpid[i];
  157. if (!chp_is_registered(chpid))
  158. css_schedule_eval_all();
  159. }
  160. }
  161. /*
  162. * Stop device recognition.
  163. */
  164. static void
  165. ccw_device_recog_done(struct ccw_device *cdev, int state)
  166. {
  167. struct subchannel *sch;
  168. int old_lpm;
  169. sch = to_subchannel(cdev->dev.parent);
  170. if (cio_disable_subchannel(sch))
  171. state = DEV_STATE_NOT_OPER;
  172. /*
  173. * Now that we tried recognition, we have performed device selection
  174. * through ssch() and the path information is up to date.
  175. */
  176. old_lpm = sch->lpm;
  177. /* Check since device may again have become not operational. */
  178. if (cio_update_schib(sch))
  179. state = DEV_STATE_NOT_OPER;
  180. else
  181. sch->lpm = sch->schib.pmcw.pam & sch->opm;
  182. if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
  183. /* Force reprobe on all chpids. */
  184. old_lpm = 0;
  185. if (sch->lpm != old_lpm)
  186. __recover_lost_chpids(sch, old_lpm);
  187. if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
  188. (state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
  189. cdev->private->flags.recog_done = 1;
  190. cdev->private->state = DEV_STATE_DISCONNECTED;
  191. wake_up(&cdev->private->wait_q);
  192. return;
  193. }
  194. if (cdev->private->flags.resuming) {
  195. cdev->private->state = state;
  196. cdev->private->flags.recog_done = 1;
  197. wake_up(&cdev->private->wait_q);
  198. return;
  199. }
  200. switch (state) {
  201. case DEV_STATE_NOT_OPER:
  202. break;
  203. case DEV_STATE_OFFLINE:
  204. if (!cdev->online) {
  205. ccw_device_update_sense_data(cdev);
  206. break;
  207. }
  208. cdev->private->state = DEV_STATE_OFFLINE;
  209. cdev->private->flags.recog_done = 1;
  210. if (ccw_device_test_sense_data(cdev)) {
  211. cdev->private->flags.donotify = 1;
  212. ccw_device_online(cdev);
  213. wake_up(&cdev->private->wait_q);
  214. } else {
  215. ccw_device_update_sense_data(cdev);
  216. ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
  217. }
  218. return;
  219. case DEV_STATE_BOXED:
  220. if (cdev->id.cu_type != 0) { /* device was recognized before */
  221. cdev->private->flags.recog_done = 1;
  222. cdev->private->state = DEV_STATE_BOXED;
  223. wake_up(&cdev->private->wait_q);
  224. return;
  225. }
  226. break;
  227. }
  228. cdev->private->state = state;
  229. io_subchannel_recog_done(cdev);
  230. wake_up(&cdev->private->wait_q);
  231. }
  232. /*
  233. * Function called from device_id.c after sense id has completed.
  234. */
  235. void
  236. ccw_device_sense_id_done(struct ccw_device *cdev, int err)
  237. {
  238. switch (err) {
  239. case 0:
  240. ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
  241. break;
  242. case -ETIME: /* Sense id stopped by timeout. */
  243. ccw_device_recog_done(cdev, DEV_STATE_BOXED);
  244. break;
  245. default:
  246. ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
  247. break;
  248. }
  249. }
  250. /**
  251. * ccw_device_notify() - inform the device's driver about an event
  252. * @cdev: device for which an event occurred
  253. * @event: event that occurred
  254. *
  255. * Returns:
  256. * -%EINVAL if the device is offline or has no driver.
  257. * -%EOPNOTSUPP if the device's driver has no notifier registered.
  258. * %NOTIFY_OK if the driver wants to keep the device.
  259. * %NOTIFY_BAD if the driver doesn't want to keep the device.
  260. */
  261. int ccw_device_notify(struct ccw_device *cdev, int event)
  262. {
  263. int ret = -EINVAL;
  264. if (!cdev->drv)
  265. goto out;
  266. if (!cdev->online)
  267. goto out;
  268. CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
  269. cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
  270. event);
  271. if (!cdev->drv->notify) {
  272. ret = -EOPNOTSUPP;
  273. goto out;
  274. }
  275. if (cdev->drv->notify(cdev, event))
  276. ret = NOTIFY_OK;
  277. else
  278. ret = NOTIFY_BAD;
  279. out:
  280. return ret;
  281. }
  282. static void ccw_device_oper_notify(struct ccw_device *cdev)
  283. {
  284. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  285. if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
  286. /* Reenable channel measurements, if needed. */
  287. ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
  288. /* Save indication for new paths. */
  289. cdev->private->path_new_mask = sch->vpm;
  290. return;
  291. }
  292. /* Driver doesn't want device back. */
  293. ccw_device_set_notoper(cdev);
  294. ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
  295. }
  296. /*
  297. * Finished with online/offline processing.
  298. */
  299. static void
  300. ccw_device_done(struct ccw_device *cdev, int state)
  301. {
  302. struct subchannel *sch;
  303. sch = to_subchannel(cdev->dev.parent);
  304. ccw_device_set_timeout(cdev, 0);
  305. if (state != DEV_STATE_ONLINE)
  306. cio_disable_subchannel(sch);
  307. /* Reset device status. */
  308. memset(&cdev->private->irb, 0, sizeof(struct irb));
  309. cdev->private->state = state;
  310. switch (state) {
  311. case DEV_STATE_BOXED:
  312. CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
  313. cdev->private->dev_id.devno, sch->schid.sch_no);
  314. if (cdev->online &&
  315. ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
  316. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  317. cdev->private->flags.donotify = 0;
  318. break;
  319. case DEV_STATE_NOT_OPER:
  320. CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
  321. cdev->private->dev_id.devno, sch->schid.sch_no);
  322. if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
  323. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  324. else
  325. ccw_device_set_disconnected(cdev);
  326. cdev->private->flags.donotify = 0;
  327. break;
  328. case DEV_STATE_DISCONNECTED:
  329. CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
  330. "%04x\n", cdev->private->dev_id.devno,
  331. sch->schid.sch_no);
  332. if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
  333. cdev->private->state = DEV_STATE_NOT_OPER;
  334. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  335. } else
  336. ccw_device_set_disconnected(cdev);
  337. cdev->private->flags.donotify = 0;
  338. break;
  339. default:
  340. break;
  341. }
  342. if (cdev->private->flags.donotify) {
  343. cdev->private->flags.donotify = 0;
  344. ccw_device_oper_notify(cdev);
  345. }
  346. wake_up(&cdev->private->wait_q);
  347. }
  348. /*
  349. * Start device recognition.
  350. */
  351. void ccw_device_recognition(struct ccw_device *cdev)
  352. {
  353. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  354. /*
  355. * We used to start here with a sense pgid to find out whether a device
  356. * is locked by someone else. Unfortunately, the sense pgid command
  357. * code has other meanings on devices predating the path grouping
  358. * algorithm, so we start with sense id and box the device after an
  359. * timeout (or if sense pgid during path verification detects the device
  360. * is locked, as may happen on newer devices).
  361. */
  362. cdev->private->flags.recog_done = 0;
  363. cdev->private->state = DEV_STATE_SENSE_ID;
  364. if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
  365. ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
  366. return;
  367. }
  368. ccw_device_sense_id_start(cdev);
  369. }
  370. /*
  371. * Handle events for states that use the ccw request infrastructure.
  372. */
  373. static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
  374. {
  375. switch (e) {
  376. case DEV_EVENT_NOTOPER:
  377. ccw_request_notoper(cdev);
  378. break;
  379. case DEV_EVENT_INTERRUPT:
  380. ccw_request_handler(cdev);
  381. break;
  382. case DEV_EVENT_TIMEOUT:
  383. ccw_request_timeout(cdev);
  384. break;
  385. default:
  386. break;
  387. }
  388. }
  389. static void ccw_device_report_path_events(struct ccw_device *cdev)
  390. {
  391. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  392. int path_event[8];
  393. int chp, mask;
  394. for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
  395. path_event[chp] = PE_NONE;
  396. if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
  397. path_event[chp] |= PE_PATH_GONE;
  398. if (mask & cdev->private->path_new_mask & sch->vpm)
  399. path_event[chp] |= PE_PATH_AVAILABLE;
  400. if (mask & cdev->private->pgid_reset_mask & sch->vpm)
  401. path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
  402. }
  403. if (cdev->online && cdev->drv->path_event)
  404. cdev->drv->path_event(cdev, path_event);
  405. }
  406. static void ccw_device_reset_path_events(struct ccw_device *cdev)
  407. {
  408. cdev->private->path_gone_mask = 0;
  409. cdev->private->path_new_mask = 0;
  410. cdev->private->pgid_reset_mask = 0;
  411. }
  412. static void create_fake_irb(struct irb *irb, int type)
  413. {
  414. memset(irb, 0, sizeof(*irb));
  415. if (type == FAKE_CMD_IRB) {
  416. struct cmd_scsw *scsw = &irb->scsw.cmd;
  417. scsw->cc = 1;
  418. scsw->fctl = SCSW_FCTL_START_FUNC;
  419. scsw->actl = SCSW_ACTL_START_PEND;
  420. scsw->stctl = SCSW_STCTL_STATUS_PEND;
  421. } else if (type == FAKE_TM_IRB) {
  422. struct tm_scsw *scsw = &irb->scsw.tm;
  423. scsw->x = 1;
  424. scsw->cc = 1;
  425. scsw->fctl = SCSW_FCTL_START_FUNC;
  426. scsw->actl = SCSW_ACTL_START_PEND;
  427. scsw->stctl = SCSW_STCTL_STATUS_PEND;
  428. }
  429. }
  430. static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
  431. {
  432. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  433. u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
  434. if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
  435. ccw_device_schedule_recovery();
  436. cdev->private->path_broken_mask = broken_paths;
  437. }
  438. void ccw_device_verify_done(struct ccw_device *cdev, int err)
  439. {
  440. struct subchannel *sch;
  441. sch = to_subchannel(cdev->dev.parent);
  442. /* Update schib - pom may have changed. */
  443. if (cio_update_schib(sch)) {
  444. err = -ENODEV;
  445. goto callback;
  446. }
  447. /* Update lpm with verified path mask. */
  448. sch->lpm = sch->vpm;
  449. /* Repeat path verification? */
  450. if (cdev->private->flags.doverify) {
  451. ccw_device_verify_start(cdev);
  452. return;
  453. }
  454. callback:
  455. switch (err) {
  456. case 0:
  457. ccw_device_done(cdev, DEV_STATE_ONLINE);
  458. /* Deliver fake irb to device driver, if needed. */
  459. if (cdev->private->flags.fake_irb) {
  460. create_fake_irb(&cdev->private->irb,
  461. cdev->private->flags.fake_irb);
  462. cdev->private->flags.fake_irb = 0;
  463. if (cdev->handler)
  464. cdev->handler(cdev, cdev->private->intparm,
  465. &cdev->private->irb);
  466. memset(&cdev->private->irb, 0, sizeof(struct irb));
  467. }
  468. ccw_device_report_path_events(cdev);
  469. ccw_device_handle_broken_paths(cdev);
  470. break;
  471. case -ETIME:
  472. case -EUSERS:
  473. /* Reset oper notify indication after verify error. */
  474. cdev->private->flags.donotify = 0;
  475. ccw_device_done(cdev, DEV_STATE_BOXED);
  476. break;
  477. case -EACCES:
  478. /* Reset oper notify indication after verify error. */
  479. cdev->private->flags.donotify = 0;
  480. ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
  481. break;
  482. default:
  483. /* Reset oper notify indication after verify error. */
  484. cdev->private->flags.donotify = 0;
  485. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  486. break;
  487. }
  488. ccw_device_reset_path_events(cdev);
  489. }
  490. /*
  491. * Get device online.
  492. */
  493. int
  494. ccw_device_online(struct ccw_device *cdev)
  495. {
  496. struct subchannel *sch;
  497. int ret;
  498. if ((cdev->private->state != DEV_STATE_OFFLINE) &&
  499. (cdev->private->state != DEV_STATE_BOXED))
  500. return -EINVAL;
  501. sch = to_subchannel(cdev->dev.parent);
  502. ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
  503. if (ret != 0) {
  504. /* Couldn't enable the subchannel for i/o. Sick device. */
  505. if (ret == -ENODEV)
  506. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  507. return ret;
  508. }
  509. /* Start initial path verification. */
  510. cdev->private->state = DEV_STATE_VERIFY;
  511. ccw_device_verify_start(cdev);
  512. return 0;
  513. }
  514. void
  515. ccw_device_disband_done(struct ccw_device *cdev, int err)
  516. {
  517. switch (err) {
  518. case 0:
  519. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  520. break;
  521. case -ETIME:
  522. ccw_device_done(cdev, DEV_STATE_BOXED);
  523. break;
  524. default:
  525. cdev->private->flags.donotify = 0;
  526. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  527. break;
  528. }
  529. }
  530. /*
  531. * Shutdown device.
  532. */
  533. int
  534. ccw_device_offline(struct ccw_device *cdev)
  535. {
  536. struct subchannel *sch;
  537. /* Allow ccw_device_offline while disconnected. */
  538. if (cdev->private->state == DEV_STATE_DISCONNECTED ||
  539. cdev->private->state == DEV_STATE_NOT_OPER) {
  540. cdev->private->flags.donotify = 0;
  541. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  542. return 0;
  543. }
  544. if (cdev->private->state == DEV_STATE_BOXED) {
  545. ccw_device_done(cdev, DEV_STATE_BOXED);
  546. return 0;
  547. }
  548. if (ccw_device_is_orphan(cdev)) {
  549. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  550. return 0;
  551. }
  552. sch = to_subchannel(cdev->dev.parent);
  553. if (cio_update_schib(sch))
  554. return -ENODEV;
  555. if (scsw_actl(&sch->schib.scsw) != 0)
  556. return -EBUSY;
  557. if (cdev->private->state != DEV_STATE_ONLINE)
  558. return -EINVAL;
  559. /* Are we doing path grouping? */
  560. if (!cdev->private->flags.pgroup) {
  561. /* No, set state offline immediately. */
  562. ccw_device_done(cdev, DEV_STATE_OFFLINE);
  563. return 0;
  564. }
  565. /* Start Set Path Group commands. */
  566. cdev->private->state = DEV_STATE_DISBAND_PGID;
  567. ccw_device_disband_start(cdev);
  568. return 0;
  569. }
  570. /*
  571. * Handle not operational event in non-special state.
  572. */
  573. static void ccw_device_generic_notoper(struct ccw_device *cdev,
  574. enum dev_event dev_event)
  575. {
  576. if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
  577. ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
  578. else
  579. ccw_device_set_disconnected(cdev);
  580. }
  581. /*
  582. * Handle path verification event in offline state.
  583. */
  584. static void ccw_device_offline_verify(struct ccw_device *cdev,
  585. enum dev_event dev_event)
  586. {
  587. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  588. css_schedule_eval(sch->schid);
  589. }
  590. /*
  591. * Handle path verification event.
  592. */
  593. static void
  594. ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
  595. {
  596. struct subchannel *sch;
  597. if (cdev->private->state == DEV_STATE_W4SENSE) {
  598. cdev->private->flags.doverify = 1;
  599. return;
  600. }
  601. sch = to_subchannel(cdev->dev.parent);
  602. /*
  603. * Since we might not just be coming from an interrupt from the
  604. * subchannel we have to update the schib.
  605. */
  606. if (cio_update_schib(sch)) {
  607. ccw_device_verify_done(cdev, -ENODEV);
  608. return;
  609. }
  610. if (scsw_actl(&sch->schib.scsw) != 0 ||
  611. (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
  612. (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
  613. /*
  614. * No final status yet or final status not yet delivered
  615. * to the device driver. Can't do path verification now,
  616. * delay until final status was delivered.
  617. */
  618. cdev->private->flags.doverify = 1;
  619. return;
  620. }
  621. /* Device is idle, we can do the path verification. */
  622. cdev->private->state = DEV_STATE_VERIFY;
  623. ccw_device_verify_start(cdev);
  624. }
  625. /*
  626. * Handle path verification event in boxed state.
  627. */
  628. static void ccw_device_boxed_verify(struct ccw_device *cdev,
  629. enum dev_event dev_event)
  630. {
  631. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  632. if (cdev->online) {
  633. if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
  634. ccw_device_done(cdev, DEV_STATE_NOT_OPER);
  635. else
  636. ccw_device_online_verify(cdev, dev_event);
  637. } else
  638. css_schedule_eval(sch->schid);
  639. }
  640. /*
  641. * Pass interrupt to device driver.
  642. */
  643. static int ccw_device_call_handler(struct ccw_device *cdev)
  644. {
  645. unsigned int stctl;
  646. int ending_status;
  647. /*
  648. * we allow for the device action handler if .
  649. * - we received ending status
  650. * - the action handler requested to see all interrupts
  651. * - we received an intermediate status
  652. * - fast notification was requested (primary status)
  653. * - unsolicited interrupts
  654. */
  655. stctl = scsw_stctl(&cdev->private->irb.scsw);
  656. ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
  657. (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
  658. (stctl == SCSW_STCTL_STATUS_PEND);
  659. if (!ending_status &&
  660. !cdev->private->options.repall &&
  661. !(stctl & SCSW_STCTL_INTER_STATUS) &&
  662. !(cdev->private->options.fast &&
  663. (stctl & SCSW_STCTL_PRIM_STATUS)))
  664. return 0;
  665. if (ending_status)
  666. ccw_device_set_timeout(cdev, 0);
  667. if (cdev->handler)
  668. cdev->handler(cdev, cdev->private->intparm,
  669. &cdev->private->irb);
  670. memset(&cdev->private->irb, 0, sizeof(struct irb));
  671. return 1;
  672. }
  673. /*
  674. * Got an interrupt for a normal io (state online).
  675. */
  676. static void
  677. ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
  678. {
  679. struct irb *irb;
  680. int is_cmd;
  681. irb = this_cpu_ptr(&cio_irb);
  682. is_cmd = !scsw_is_tm(&irb->scsw);
  683. /* Check for unsolicited interrupt. */
  684. if (!scsw_is_solicited(&irb->scsw)) {
  685. if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
  686. !irb->esw.esw0.erw.cons) {
  687. /* Unit check but no sense data. Need basic sense. */
  688. if (ccw_device_do_sense(cdev, irb) != 0)
  689. goto call_handler_unsol;
  690. memcpy(&cdev->private->irb, irb, sizeof(struct irb));
  691. cdev->private->state = DEV_STATE_W4SENSE;
  692. cdev->private->intparm = 0;
  693. return;
  694. }
  695. call_handler_unsol:
  696. if (cdev->handler)
  697. cdev->handler (cdev, 0, irb);
  698. if (cdev->private->flags.doverify)
  699. ccw_device_online_verify(cdev, 0);
  700. return;
  701. }
  702. /* Accumulate status and find out if a basic sense is needed. */
  703. ccw_device_accumulate_irb(cdev, irb);
  704. if (is_cmd && cdev->private->flags.dosense) {
  705. if (ccw_device_do_sense(cdev, irb) == 0) {
  706. cdev->private->state = DEV_STATE_W4SENSE;
  707. }
  708. return;
  709. }
  710. /* Call the handler. */
  711. if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
  712. /* Start delayed path verification. */
  713. ccw_device_online_verify(cdev, 0);
  714. }
  715. /*
  716. * Got an timeout in online state.
  717. */
  718. static void
  719. ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  720. {
  721. int ret;
  722. ccw_device_set_timeout(cdev, 0);
  723. cdev->private->iretry = 255;
  724. cdev->private->async_kill_io_rc = -ETIMEDOUT;
  725. ret = ccw_device_cancel_halt_clear(cdev);
  726. if (ret == -EBUSY) {
  727. ccw_device_set_timeout(cdev, 3*HZ);
  728. cdev->private->state = DEV_STATE_TIMEOUT_KILL;
  729. return;
  730. }
  731. if (ret)
  732. dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
  733. else if (cdev->handler)
  734. cdev->handler(cdev, cdev->private->intparm,
  735. ERR_PTR(-ETIMEDOUT));
  736. }
  737. /*
  738. * Got an interrupt for a basic sense.
  739. */
  740. static void
  741. ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
  742. {
  743. struct irb *irb;
  744. irb = this_cpu_ptr(&cio_irb);
  745. /* Check for unsolicited interrupt. */
  746. if (scsw_stctl(&irb->scsw) ==
  747. (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
  748. if (scsw_cc(&irb->scsw) == 1)
  749. /* Basic sense hasn't started. Try again. */
  750. ccw_device_do_sense(cdev, irb);
  751. else {
  752. CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
  753. "interrupt during w4sense...\n",
  754. cdev->private->dev_id.ssid,
  755. cdev->private->dev_id.devno);
  756. if (cdev->handler)
  757. cdev->handler (cdev, 0, irb);
  758. }
  759. return;
  760. }
  761. /*
  762. * Check if a halt or clear has been issued in the meanwhile. If yes,
  763. * only deliver the halt/clear interrupt to the device driver as if it
  764. * had killed the original request.
  765. */
  766. if (scsw_fctl(&irb->scsw) &
  767. (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
  768. cdev->private->flags.dosense = 0;
  769. memset(&cdev->private->irb, 0, sizeof(struct irb));
  770. ccw_device_accumulate_irb(cdev, irb);
  771. goto call_handler;
  772. }
  773. /* Add basic sense info to irb. */
  774. ccw_device_accumulate_basic_sense(cdev, irb);
  775. if (cdev->private->flags.dosense) {
  776. /* Another basic sense is needed. */
  777. ccw_device_do_sense(cdev, irb);
  778. return;
  779. }
  780. call_handler:
  781. cdev->private->state = DEV_STATE_ONLINE;
  782. /* In case sensing interfered with setting the device online */
  783. wake_up(&cdev->private->wait_q);
  784. /* Call the handler. */
  785. if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
  786. /* Start delayed path verification. */
  787. ccw_device_online_verify(cdev, 0);
  788. }
  789. static void
  790. ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
  791. {
  792. ccw_device_set_timeout(cdev, 0);
  793. /* Start delayed path verification. */
  794. ccw_device_online_verify(cdev, 0);
  795. /* OK, i/o is dead now. Call interrupt handler. */
  796. if (cdev->handler)
  797. cdev->handler(cdev, cdev->private->intparm,
  798. ERR_PTR(cdev->private->async_kill_io_rc));
  799. }
  800. static void
  801. ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  802. {
  803. int ret;
  804. ret = ccw_device_cancel_halt_clear(cdev);
  805. if (ret == -EBUSY) {
  806. ccw_device_set_timeout(cdev, 3*HZ);
  807. return;
  808. }
  809. /* Start delayed path verification. */
  810. ccw_device_online_verify(cdev, 0);
  811. if (cdev->handler)
  812. cdev->handler(cdev, cdev->private->intparm,
  813. ERR_PTR(cdev->private->async_kill_io_rc));
  814. }
  815. void ccw_device_kill_io(struct ccw_device *cdev)
  816. {
  817. int ret;
  818. ccw_device_set_timeout(cdev, 0);
  819. cdev->private->iretry = 255;
  820. cdev->private->async_kill_io_rc = -EIO;
  821. ret = ccw_device_cancel_halt_clear(cdev);
  822. if (ret == -EBUSY) {
  823. ccw_device_set_timeout(cdev, 3*HZ);
  824. cdev->private->state = DEV_STATE_TIMEOUT_KILL;
  825. return;
  826. }
  827. /* Start delayed path verification. */
  828. ccw_device_online_verify(cdev, 0);
  829. if (cdev->handler)
  830. cdev->handler(cdev, cdev->private->intparm,
  831. ERR_PTR(-EIO));
  832. }
  833. static void
  834. ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
  835. {
  836. /* Start verification after current task finished. */
  837. cdev->private->flags.doverify = 1;
  838. }
  839. static void
  840. ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
  841. {
  842. struct subchannel *sch;
  843. sch = to_subchannel(cdev->dev.parent);
  844. if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
  845. /* Couldn't enable the subchannel for i/o. Sick device. */
  846. return;
  847. cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
  848. ccw_device_sense_id_start(cdev);
  849. }
  850. void ccw_device_trigger_reprobe(struct ccw_device *cdev)
  851. {
  852. struct subchannel *sch;
  853. if (cdev->private->state != DEV_STATE_DISCONNECTED)
  854. return;
  855. sch = to_subchannel(cdev->dev.parent);
  856. /* Update some values. */
  857. if (cio_update_schib(sch))
  858. return;
  859. /*
  860. * The pim, pam, pom values may not be accurate, but they are the best
  861. * we have before performing device selection :/
  862. */
  863. sch->lpm = sch->schib.pmcw.pam & sch->opm;
  864. /*
  865. * Use the initial configuration since we can't be shure that the old
  866. * paths are valid.
  867. */
  868. io_subchannel_init_config(sch);
  869. if (cio_commit_config(sch))
  870. return;
  871. /* We should also udate ssd info, but this has to wait. */
  872. /* Check if this is another device which appeared on the same sch. */
  873. if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
  874. css_schedule_eval(sch->schid);
  875. else
  876. ccw_device_start_id(cdev, 0);
  877. }
  878. static void ccw_device_disabled_irq(struct ccw_device *cdev,
  879. enum dev_event dev_event)
  880. {
  881. struct subchannel *sch;
  882. sch = to_subchannel(cdev->dev.parent);
  883. /*
  884. * An interrupt in a disabled state means a previous disable was not
  885. * successful - should not happen, but we try to disable again.
  886. */
  887. cio_disable_subchannel(sch);
  888. }
  889. static void
  890. ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
  891. {
  892. retry_set_schib(cdev);
  893. cdev->private->state = DEV_STATE_ONLINE;
  894. dev_fsm_event(cdev, dev_event);
  895. }
  896. static void ccw_device_update_cmfblock(struct ccw_device *cdev,
  897. enum dev_event dev_event)
  898. {
  899. cmf_retry_copy_block(cdev);
  900. cdev->private->state = DEV_STATE_ONLINE;
  901. dev_fsm_event(cdev, dev_event);
  902. }
  903. static void
  904. ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
  905. {
  906. ccw_device_set_timeout(cdev, 0);
  907. cdev->private->state = DEV_STATE_NOT_OPER;
  908. wake_up(&cdev->private->wait_q);
  909. }
  910. static void
  911. ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
  912. {
  913. int ret;
  914. ret = ccw_device_cancel_halt_clear(cdev);
  915. if (ret == -EBUSY) {
  916. ccw_device_set_timeout(cdev, HZ/10);
  917. } else {
  918. cdev->private->state = DEV_STATE_NOT_OPER;
  919. wake_up(&cdev->private->wait_q);
  920. }
  921. }
  922. /*
  923. * No operation action. This is used e.g. to ignore a timeout event in
  924. * state offline.
  925. */
  926. static void
  927. ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
  928. {
  929. }
  930. /*
  931. * device statemachine
  932. */
  933. fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
  934. [DEV_STATE_NOT_OPER] = {
  935. [DEV_EVENT_NOTOPER] = ccw_device_nop,
  936. [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
  937. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  938. [DEV_EVENT_VERIFY] = ccw_device_nop,
  939. },
  940. [DEV_STATE_SENSE_ID] = {
  941. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  942. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  943. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  944. [DEV_EVENT_VERIFY] = ccw_device_nop,
  945. },
  946. [DEV_STATE_OFFLINE] = {
  947. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  948. [DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
  949. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  950. [DEV_EVENT_VERIFY] = ccw_device_offline_verify,
  951. },
  952. [DEV_STATE_VERIFY] = {
  953. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  954. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  955. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  956. [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
  957. },
  958. [DEV_STATE_ONLINE] = {
  959. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  960. [DEV_EVENT_INTERRUPT] = ccw_device_irq,
  961. [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
  962. [DEV_EVENT_VERIFY] = ccw_device_online_verify,
  963. },
  964. [DEV_STATE_W4SENSE] = {
  965. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  966. [DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
  967. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  968. [DEV_EVENT_VERIFY] = ccw_device_online_verify,
  969. },
  970. [DEV_STATE_DISBAND_PGID] = {
  971. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  972. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  973. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  974. [DEV_EVENT_VERIFY] = ccw_device_nop,
  975. },
  976. [DEV_STATE_BOXED] = {
  977. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  978. [DEV_EVENT_INTERRUPT] = ccw_device_nop,
  979. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  980. [DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
  981. },
  982. /* states to wait for i/o completion before doing something */
  983. [DEV_STATE_TIMEOUT_KILL] = {
  984. [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
  985. [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
  986. [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
  987. [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
  988. },
  989. [DEV_STATE_QUIESCE] = {
  990. [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
  991. [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
  992. [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
  993. [DEV_EVENT_VERIFY] = ccw_device_nop,
  994. },
  995. /* special states for devices gone not operational */
  996. [DEV_STATE_DISCONNECTED] = {
  997. [DEV_EVENT_NOTOPER] = ccw_device_nop,
  998. [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
  999. [DEV_EVENT_TIMEOUT] = ccw_device_nop,
  1000. [DEV_EVENT_VERIFY] = ccw_device_start_id,
  1001. },
  1002. [DEV_STATE_DISCONNECTED_SENSE_ID] = {
  1003. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  1004. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  1005. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  1006. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1007. },
  1008. [DEV_STATE_CMFCHANGE] = {
  1009. [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
  1010. [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
  1011. [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
  1012. [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
  1013. },
  1014. [DEV_STATE_CMFUPDATE] = {
  1015. [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
  1016. [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
  1017. [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
  1018. [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
  1019. },
  1020. [DEV_STATE_STEAL_LOCK] = {
  1021. [DEV_EVENT_NOTOPER] = ccw_device_request_event,
  1022. [DEV_EVENT_INTERRUPT] = ccw_device_request_event,
  1023. [DEV_EVENT_TIMEOUT] = ccw_device_request_event,
  1024. [DEV_EVENT_VERIFY] = ccw_device_nop,
  1025. },
  1026. };
  1027. EXPORT_SYMBOL_GPL(ccw_device_set_timeout);