sbp_target.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397
  1. /*
  2. * SBP2 target driver (SCSI over IEEE1394 in target mode)
  3. *
  4. * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software Foundation,
  18. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  19. */
  20. #define KMSG_COMPONENT "sbp_target"
  21. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/init.h>
  25. #include <linux/types.h>
  26. #include <linux/string.h>
  27. #include <linux/configfs.h>
  28. #include <linux/ctype.h>
  29. #include <linux/firewire.h>
  30. #include <linux/firewire-constants.h>
  31. #include <scsi/scsi_proto.h>
  32. #include <scsi/scsi_tcq.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_backend.h>
  35. #include <target/target_core_fabric.h>
  36. #include <target/target_core_fabric_configfs.h>
  37. #include <target/configfs_macros.h>
  38. #include <asm/unaligned.h>
  39. #include "sbp_target.h"
  40. static const struct target_core_fabric_ops sbp_ops;
  41. /* FireWire address region for management and command block address handlers */
  42. static const struct fw_address_region sbp_register_region = {
  43. .start = CSR_REGISTER_BASE + 0x10000,
  44. .end = 0x1000000000000ULL,
  45. };
  46. static const u32 sbp_unit_directory_template[] = {
  47. 0x1200609e, /* unit_specifier_id: NCITS/T10 */
  48. 0x13010483, /* unit_sw_version: 1155D Rev 4 */
  49. 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
  50. 0x390104d8, /* command_set: SPC-2 */
  51. 0x3b000000, /* command_set_revision: 0 */
  52. 0x3c000001, /* firmware_revision: 1 */
  53. };
  54. #define SESSION_MAINTENANCE_INTERVAL HZ
  55. static atomic_t login_id = ATOMIC_INIT(0);
  56. static void session_maintenance_work(struct work_struct *);
  57. static int sbp_run_transaction(struct fw_card *, int, int, int, int,
  58. unsigned long long, void *, size_t);
  59. static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
  60. {
  61. int ret;
  62. __be32 high, low;
  63. ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  64. req->node_addr, req->generation, req->speed,
  65. (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
  66. &high, sizeof(high));
  67. if (ret != RCODE_COMPLETE)
  68. return ret;
  69. ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
  70. req->node_addr, req->generation, req->speed,
  71. (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
  72. &low, sizeof(low));
  73. if (ret != RCODE_COMPLETE)
  74. return ret;
  75. *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
  76. return RCODE_COMPLETE;
  77. }
  78. static struct sbp_session *sbp_session_find_by_guid(
  79. struct sbp_tpg *tpg, u64 guid)
  80. {
  81. struct se_session *se_sess;
  82. struct sbp_session *sess, *found = NULL;
  83. spin_lock_bh(&tpg->se_tpg.session_lock);
  84. list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  85. sess = se_sess->fabric_sess_ptr;
  86. if (sess->guid == guid)
  87. found = sess;
  88. }
  89. spin_unlock_bh(&tpg->se_tpg.session_lock);
  90. return found;
  91. }
  92. static struct sbp_login_descriptor *sbp_login_find_by_lun(
  93. struct sbp_session *session, u32 unpacked_lun)
  94. {
  95. struct sbp_login_descriptor *login, *found = NULL;
  96. spin_lock_bh(&session->lock);
  97. list_for_each_entry(login, &session->login_list, link) {
  98. if (login->login_lun == unpacked_lun)
  99. found = login;
  100. }
  101. spin_unlock_bh(&session->lock);
  102. return found;
  103. }
  104. static int sbp_login_count_all_by_lun(
  105. struct sbp_tpg *tpg,
  106. u32 unpacked_lun,
  107. int exclusive)
  108. {
  109. struct se_session *se_sess;
  110. struct sbp_session *sess;
  111. struct sbp_login_descriptor *login;
  112. int count = 0;
  113. spin_lock_bh(&tpg->se_tpg.session_lock);
  114. list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  115. sess = se_sess->fabric_sess_ptr;
  116. spin_lock_bh(&sess->lock);
  117. list_for_each_entry(login, &sess->login_list, link) {
  118. if (login->login_lun != unpacked_lun)
  119. continue;
  120. if (!exclusive || login->exclusive)
  121. count++;
  122. }
  123. spin_unlock_bh(&sess->lock);
  124. }
  125. spin_unlock_bh(&tpg->se_tpg.session_lock);
  126. return count;
  127. }
  128. static struct sbp_login_descriptor *sbp_login_find_by_id(
  129. struct sbp_tpg *tpg, int login_id)
  130. {
  131. struct se_session *se_sess;
  132. struct sbp_session *sess;
  133. struct sbp_login_descriptor *login, *found = NULL;
  134. spin_lock_bh(&tpg->se_tpg.session_lock);
  135. list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
  136. sess = se_sess->fabric_sess_ptr;
  137. spin_lock_bh(&sess->lock);
  138. list_for_each_entry(login, &sess->login_list, link) {
  139. if (login->login_id == login_id)
  140. found = login;
  141. }
  142. spin_unlock_bh(&sess->lock);
  143. }
  144. spin_unlock_bh(&tpg->se_tpg.session_lock);
  145. return found;
  146. }
  147. static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
  148. {
  149. struct se_portal_group *se_tpg = &tpg->se_tpg;
  150. struct se_lun *se_lun;
  151. rcu_read_lock();
  152. hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
  153. if (se_lun->unpacked_lun == login_lun) {
  154. rcu_read_unlock();
  155. *err = 0;
  156. return login_lun;
  157. }
  158. }
  159. rcu_read_unlock();
  160. *err = -ENODEV;
  161. return login_lun;
  162. }
  163. static struct sbp_session *sbp_session_create(
  164. struct sbp_tpg *tpg,
  165. u64 guid)
  166. {
  167. struct sbp_session *sess;
  168. int ret;
  169. char guid_str[17];
  170. struct se_node_acl *se_nacl;
  171. sess = kmalloc(sizeof(*sess), GFP_KERNEL);
  172. if (!sess) {
  173. pr_err("failed to allocate session descriptor\n");
  174. return ERR_PTR(-ENOMEM);
  175. }
  176. sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
  177. if (IS_ERR(sess->se_sess)) {
  178. pr_err("failed to init se_session\n");
  179. ret = PTR_ERR(sess->se_sess);
  180. kfree(sess);
  181. return ERR_PTR(ret);
  182. }
  183. snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
  184. se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
  185. if (!se_nacl) {
  186. pr_warn("Node ACL not found for %s\n", guid_str);
  187. transport_free_session(sess->se_sess);
  188. kfree(sess);
  189. return ERR_PTR(-EPERM);
  190. }
  191. sess->se_sess->se_node_acl = se_nacl;
  192. spin_lock_init(&sess->lock);
  193. INIT_LIST_HEAD(&sess->login_list);
  194. INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
  195. sess->guid = guid;
  196. transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
  197. return sess;
  198. }
  199. static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
  200. {
  201. spin_lock_bh(&sess->lock);
  202. if (!list_empty(&sess->login_list)) {
  203. spin_unlock_bh(&sess->lock);
  204. return;
  205. }
  206. spin_unlock_bh(&sess->lock);
  207. if (cancel_work)
  208. cancel_delayed_work_sync(&sess->maint_work);
  209. transport_deregister_session_configfs(sess->se_sess);
  210. transport_deregister_session(sess->se_sess);
  211. if (sess->card)
  212. fw_card_put(sess->card);
  213. kfree(sess);
  214. }
  215. static void sbp_target_agent_unregister(struct sbp_target_agent *);
  216. static void sbp_login_release(struct sbp_login_descriptor *login,
  217. bool cancel_work)
  218. {
  219. struct sbp_session *sess = login->sess;
  220. /* FIXME: abort/wait on tasks */
  221. sbp_target_agent_unregister(login->tgt_agt);
  222. if (sess) {
  223. spin_lock_bh(&sess->lock);
  224. list_del(&login->link);
  225. spin_unlock_bh(&sess->lock);
  226. sbp_session_release(sess, cancel_work);
  227. }
  228. kfree(login);
  229. }
  230. static struct sbp_target_agent *sbp_target_agent_register(
  231. struct sbp_login_descriptor *);
  232. static void sbp_management_request_login(
  233. struct sbp_management_agent *agent, struct sbp_management_request *req,
  234. int *status_data_size)
  235. {
  236. struct sbp_tport *tport = agent->tport;
  237. struct sbp_tpg *tpg = tport->tpg;
  238. struct sbp_session *sess;
  239. struct sbp_login_descriptor *login;
  240. struct sbp_login_response_block *response;
  241. u64 guid;
  242. u32 unpacked_lun;
  243. int login_response_len, ret;
  244. unpacked_lun = sbp_get_lun_from_tpg(tpg,
  245. LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
  246. if (ret) {
  247. pr_notice("login to unknown LUN: %d\n",
  248. LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
  249. req->status.status = cpu_to_be32(
  250. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  251. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
  252. return;
  253. }
  254. ret = read_peer_guid(&guid, req);
  255. if (ret != RCODE_COMPLETE) {
  256. pr_warn("failed to read peer GUID: %d\n", ret);
  257. req->status.status = cpu_to_be32(
  258. STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
  259. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
  260. return;
  261. }
  262. pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
  263. unpacked_lun, guid);
  264. sess = sbp_session_find_by_guid(tpg, guid);
  265. if (sess) {
  266. login = sbp_login_find_by_lun(sess, unpacked_lun);
  267. if (login) {
  268. pr_notice("initiator already logged-in\n");
  269. /*
  270. * SBP-2 R4 says we should return access denied, but
  271. * that can confuse initiators. Instead we need to
  272. * treat this like a reconnect, but send the login
  273. * response block like a fresh login.
  274. *
  275. * This is required particularly in the case of Apple
  276. * devices booting off the FireWire target, where
  277. * the firmware has an active login to the target. When
  278. * the OS takes control of the session it issues its own
  279. * LOGIN rather than a RECONNECT. To avoid the machine
  280. * waiting until the reconnect_hold expires, we can skip
  281. * the ACCESS_DENIED errors to speed things up.
  282. */
  283. goto already_logged_in;
  284. }
  285. }
  286. /*
  287. * check exclusive bit in login request
  288. * reject with access_denied if any logins present
  289. */
  290. if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
  291. sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
  292. pr_warn("refusing exclusive login with other active logins\n");
  293. req->status.status = cpu_to_be32(
  294. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  295. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
  296. return;
  297. }
  298. /*
  299. * check exclusive bit in any existing login descriptor
  300. * reject with access_denied if any exclusive logins present
  301. */
  302. if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
  303. pr_warn("refusing login while another exclusive login present\n");
  304. req->status.status = cpu_to_be32(
  305. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  306. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
  307. return;
  308. }
  309. /*
  310. * check we haven't exceeded the number of allowed logins
  311. * reject with resources_unavailable if we have
  312. */
  313. if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
  314. tport->max_logins_per_lun) {
  315. pr_warn("max number of logins reached\n");
  316. req->status.status = cpu_to_be32(
  317. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  318. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
  319. return;
  320. }
  321. if (!sess) {
  322. sess = sbp_session_create(tpg, guid);
  323. if (IS_ERR(sess)) {
  324. switch (PTR_ERR(sess)) {
  325. case -EPERM:
  326. ret = SBP_STATUS_ACCESS_DENIED;
  327. break;
  328. default:
  329. ret = SBP_STATUS_RESOURCES_UNAVAIL;
  330. break;
  331. }
  332. req->status.status = cpu_to_be32(
  333. STATUS_BLOCK_RESP(
  334. STATUS_RESP_REQUEST_COMPLETE) |
  335. STATUS_BLOCK_SBP_STATUS(ret));
  336. return;
  337. }
  338. sess->node_id = req->node_addr;
  339. sess->card = fw_card_get(req->card);
  340. sess->generation = req->generation;
  341. sess->speed = req->speed;
  342. schedule_delayed_work(&sess->maint_work,
  343. SESSION_MAINTENANCE_INTERVAL);
  344. }
  345. /* only take the latest reconnect_hold into account */
  346. sess->reconnect_hold = min(
  347. 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
  348. tport->max_reconnect_timeout) - 1;
  349. login = kmalloc(sizeof(*login), GFP_KERNEL);
  350. if (!login) {
  351. pr_err("failed to allocate login descriptor\n");
  352. sbp_session_release(sess, true);
  353. req->status.status = cpu_to_be32(
  354. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  355. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
  356. return;
  357. }
  358. login->sess = sess;
  359. login->login_lun = unpacked_lun;
  360. login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
  361. login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
  362. login->login_id = atomic_inc_return(&login_id);
  363. login->tgt_agt = sbp_target_agent_register(login);
  364. if (IS_ERR(login->tgt_agt)) {
  365. ret = PTR_ERR(login->tgt_agt);
  366. pr_err("failed to map command block handler: %d\n", ret);
  367. sbp_session_release(sess, true);
  368. kfree(login);
  369. req->status.status = cpu_to_be32(
  370. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  371. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
  372. return;
  373. }
  374. spin_lock_bh(&sess->lock);
  375. list_add_tail(&login->link, &sess->login_list);
  376. spin_unlock_bh(&sess->lock);
  377. already_logged_in:
  378. response = kzalloc(sizeof(*response), GFP_KERNEL);
  379. if (!response) {
  380. pr_err("failed to allocate login response block\n");
  381. sbp_login_release(login, true);
  382. req->status.status = cpu_to_be32(
  383. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  384. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
  385. return;
  386. }
  387. login_response_len = clamp_val(
  388. LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
  389. 12, sizeof(*response));
  390. response->misc = cpu_to_be32(
  391. ((login_response_len & 0xffff) << 16) |
  392. (login->login_id & 0xffff));
  393. response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
  394. addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
  395. &response->command_block_agent);
  396. ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
  397. sess->node_id, sess->generation, sess->speed,
  398. sbp2_pointer_to_addr(&req->orb.ptr2), response,
  399. login_response_len);
  400. if (ret != RCODE_COMPLETE) {
  401. pr_debug("failed to write login response block: %x\n", ret);
  402. kfree(response);
  403. sbp_login_release(login, true);
  404. req->status.status = cpu_to_be32(
  405. STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
  406. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
  407. return;
  408. }
  409. kfree(response);
  410. req->status.status = cpu_to_be32(
  411. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  412. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
  413. }
  414. static void sbp_management_request_query_logins(
  415. struct sbp_management_agent *agent, struct sbp_management_request *req,
  416. int *status_data_size)
  417. {
  418. pr_notice("QUERY LOGINS not implemented\n");
  419. /* FIXME: implement */
  420. req->status.status = cpu_to_be32(
  421. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  422. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  423. }
  424. static void sbp_management_request_reconnect(
  425. struct sbp_management_agent *agent, struct sbp_management_request *req,
  426. int *status_data_size)
  427. {
  428. struct sbp_tport *tport = agent->tport;
  429. struct sbp_tpg *tpg = tport->tpg;
  430. int ret;
  431. u64 guid;
  432. struct sbp_login_descriptor *login;
  433. ret = read_peer_guid(&guid, req);
  434. if (ret != RCODE_COMPLETE) {
  435. pr_warn("failed to read peer GUID: %d\n", ret);
  436. req->status.status = cpu_to_be32(
  437. STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
  438. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
  439. return;
  440. }
  441. pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
  442. login = sbp_login_find_by_id(tpg,
  443. RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
  444. if (!login) {
  445. pr_err("mgt_agent RECONNECT unknown login ID\n");
  446. req->status.status = cpu_to_be32(
  447. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  448. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
  449. return;
  450. }
  451. if (login->sess->guid != guid) {
  452. pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
  453. req->status.status = cpu_to_be32(
  454. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  455. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
  456. return;
  457. }
  458. spin_lock_bh(&login->sess->lock);
  459. if (login->sess->card)
  460. fw_card_put(login->sess->card);
  461. /* update the node details */
  462. login->sess->generation = req->generation;
  463. login->sess->node_id = req->node_addr;
  464. login->sess->card = fw_card_get(req->card);
  465. login->sess->speed = req->speed;
  466. spin_unlock_bh(&login->sess->lock);
  467. req->status.status = cpu_to_be32(
  468. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  469. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
  470. }
  471. static void sbp_management_request_logout(
  472. struct sbp_management_agent *agent, struct sbp_management_request *req,
  473. int *status_data_size)
  474. {
  475. struct sbp_tport *tport = agent->tport;
  476. struct sbp_tpg *tpg = tport->tpg;
  477. int id;
  478. struct sbp_login_descriptor *login;
  479. id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
  480. login = sbp_login_find_by_id(tpg, id);
  481. if (!login) {
  482. pr_warn("cannot find login: %d\n", id);
  483. req->status.status = cpu_to_be32(
  484. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  485. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
  486. return;
  487. }
  488. pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
  489. login->login_lun, login->login_id);
  490. if (req->node_addr != login->sess->node_id) {
  491. pr_warn("logout from different node ID\n");
  492. req->status.status = cpu_to_be32(
  493. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  494. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
  495. return;
  496. }
  497. sbp_login_release(login, true);
  498. req->status.status = cpu_to_be32(
  499. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  500. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
  501. }
  502. static void session_check_for_reset(struct sbp_session *sess)
  503. {
  504. bool card_valid = false;
  505. spin_lock_bh(&sess->lock);
  506. if (sess->card) {
  507. spin_lock_irq(&sess->card->lock);
  508. card_valid = (sess->card->local_node != NULL);
  509. spin_unlock_irq(&sess->card->lock);
  510. if (!card_valid) {
  511. fw_card_put(sess->card);
  512. sess->card = NULL;
  513. }
  514. }
  515. if (!card_valid || (sess->generation != sess->card->generation)) {
  516. pr_info("Waiting for reconnect from node: %016llx\n",
  517. sess->guid);
  518. sess->node_id = -1;
  519. sess->reconnect_expires = get_jiffies_64() +
  520. ((sess->reconnect_hold + 1) * HZ);
  521. }
  522. spin_unlock_bh(&sess->lock);
  523. }
  524. static void session_reconnect_expired(struct sbp_session *sess)
  525. {
  526. struct sbp_login_descriptor *login, *temp;
  527. LIST_HEAD(login_list);
  528. pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
  529. spin_lock_bh(&sess->lock);
  530. list_for_each_entry_safe(login, temp, &sess->login_list, link) {
  531. login->sess = NULL;
  532. list_move_tail(&login->link, &login_list);
  533. }
  534. spin_unlock_bh(&sess->lock);
  535. list_for_each_entry_safe(login, temp, &login_list, link) {
  536. list_del(&login->link);
  537. sbp_login_release(login, false);
  538. }
  539. sbp_session_release(sess, false);
  540. }
  541. static void session_maintenance_work(struct work_struct *work)
  542. {
  543. struct sbp_session *sess = container_of(work, struct sbp_session,
  544. maint_work.work);
  545. /* could be called while tearing down the session */
  546. spin_lock_bh(&sess->lock);
  547. if (list_empty(&sess->login_list)) {
  548. spin_unlock_bh(&sess->lock);
  549. return;
  550. }
  551. spin_unlock_bh(&sess->lock);
  552. if (sess->node_id != -1) {
  553. /* check for bus reset and make node_id invalid */
  554. session_check_for_reset(sess);
  555. schedule_delayed_work(&sess->maint_work,
  556. SESSION_MAINTENANCE_INTERVAL);
  557. } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
  558. /* still waiting for reconnect */
  559. schedule_delayed_work(&sess->maint_work,
  560. SESSION_MAINTENANCE_INTERVAL);
  561. } else {
  562. /* reconnect timeout has expired */
  563. session_reconnect_expired(sess);
  564. }
  565. }
  566. static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
  567. struct sbp_target_agent *agent)
  568. {
  569. int state;
  570. switch (tcode) {
  571. case TCODE_READ_QUADLET_REQUEST:
  572. pr_debug("tgt_agent AGENT_STATE READ\n");
  573. spin_lock_bh(&agent->lock);
  574. state = agent->state;
  575. spin_unlock_bh(&agent->lock);
  576. *(__be32 *)data = cpu_to_be32(state);
  577. return RCODE_COMPLETE;
  578. case TCODE_WRITE_QUADLET_REQUEST:
  579. /* ignored */
  580. return RCODE_COMPLETE;
  581. default:
  582. return RCODE_TYPE_ERROR;
  583. }
  584. }
  585. static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
  586. struct sbp_target_agent *agent)
  587. {
  588. switch (tcode) {
  589. case TCODE_WRITE_QUADLET_REQUEST:
  590. pr_debug("tgt_agent AGENT_RESET\n");
  591. spin_lock_bh(&agent->lock);
  592. agent->state = AGENT_STATE_RESET;
  593. spin_unlock_bh(&agent->lock);
  594. return RCODE_COMPLETE;
  595. default:
  596. return RCODE_TYPE_ERROR;
  597. }
  598. }
  599. static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
  600. struct sbp_target_agent *agent)
  601. {
  602. struct sbp2_pointer *ptr = data;
  603. switch (tcode) {
  604. case TCODE_WRITE_BLOCK_REQUEST:
  605. spin_lock_bh(&agent->lock);
  606. if (agent->state != AGENT_STATE_SUSPENDED &&
  607. agent->state != AGENT_STATE_RESET) {
  608. spin_unlock_bh(&agent->lock);
  609. pr_notice("Ignoring ORB_POINTER write while active.\n");
  610. return RCODE_CONFLICT_ERROR;
  611. }
  612. agent->state = AGENT_STATE_ACTIVE;
  613. spin_unlock_bh(&agent->lock);
  614. agent->orb_pointer = sbp2_pointer_to_addr(ptr);
  615. agent->doorbell = false;
  616. pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
  617. agent->orb_pointer);
  618. queue_work(system_unbound_wq, &agent->work);
  619. return RCODE_COMPLETE;
  620. case TCODE_READ_BLOCK_REQUEST:
  621. pr_debug("tgt_agent ORB_POINTER READ\n");
  622. spin_lock_bh(&agent->lock);
  623. addr_to_sbp2_pointer(agent->orb_pointer, ptr);
  624. spin_unlock_bh(&agent->lock);
  625. return RCODE_COMPLETE;
  626. default:
  627. return RCODE_TYPE_ERROR;
  628. }
  629. }
  630. static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
  631. struct sbp_target_agent *agent)
  632. {
  633. switch (tcode) {
  634. case TCODE_WRITE_QUADLET_REQUEST:
  635. spin_lock_bh(&agent->lock);
  636. if (agent->state != AGENT_STATE_SUSPENDED) {
  637. spin_unlock_bh(&agent->lock);
  638. pr_debug("Ignoring DOORBELL while active.\n");
  639. return RCODE_CONFLICT_ERROR;
  640. }
  641. agent->state = AGENT_STATE_ACTIVE;
  642. spin_unlock_bh(&agent->lock);
  643. agent->doorbell = true;
  644. pr_debug("tgt_agent DOORBELL\n");
  645. queue_work(system_unbound_wq, &agent->work);
  646. return RCODE_COMPLETE;
  647. case TCODE_READ_QUADLET_REQUEST:
  648. return RCODE_COMPLETE;
  649. default:
  650. return RCODE_TYPE_ERROR;
  651. }
  652. }
  653. static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
  654. int tcode, void *data, struct sbp_target_agent *agent)
  655. {
  656. switch (tcode) {
  657. case TCODE_WRITE_QUADLET_REQUEST:
  658. pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
  659. /* ignored as we don't send unsolicited status */
  660. return RCODE_COMPLETE;
  661. case TCODE_READ_QUADLET_REQUEST:
  662. return RCODE_COMPLETE;
  663. default:
  664. return RCODE_TYPE_ERROR;
  665. }
  666. }
  667. static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
  668. int tcode, int destination, int source, int generation,
  669. unsigned long long offset, void *data, size_t length,
  670. void *callback_data)
  671. {
  672. struct sbp_target_agent *agent = callback_data;
  673. struct sbp_session *sess = agent->login->sess;
  674. int sess_gen, sess_node, rcode;
  675. spin_lock_bh(&sess->lock);
  676. sess_gen = sess->generation;
  677. sess_node = sess->node_id;
  678. spin_unlock_bh(&sess->lock);
  679. if (generation != sess_gen) {
  680. pr_notice("ignoring request with wrong generation\n");
  681. rcode = RCODE_TYPE_ERROR;
  682. goto out;
  683. }
  684. if (source != sess_node) {
  685. pr_notice("ignoring request from foreign node (%x != %x)\n",
  686. source, sess_node);
  687. rcode = RCODE_TYPE_ERROR;
  688. goto out;
  689. }
  690. /* turn offset into the offset from the start of the block */
  691. offset -= agent->handler.offset;
  692. if (offset == 0x00 && length == 4) {
  693. /* AGENT_STATE */
  694. rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
  695. } else if (offset == 0x04 && length == 4) {
  696. /* AGENT_RESET */
  697. rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
  698. } else if (offset == 0x08 && length == 8) {
  699. /* ORB_POINTER */
  700. rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
  701. } else if (offset == 0x10 && length == 4) {
  702. /* DOORBELL */
  703. rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
  704. } else if (offset == 0x14 && length == 4) {
  705. /* UNSOLICITED_STATUS_ENABLE */
  706. rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
  707. data, agent);
  708. } else {
  709. rcode = RCODE_ADDRESS_ERROR;
  710. }
  711. out:
  712. fw_send_response(card, request, rcode);
  713. }
  714. static void sbp_handle_command(struct sbp_target_request *);
  715. static int sbp_send_status(struct sbp_target_request *);
  716. static void sbp_free_request(struct sbp_target_request *);
  717. static void tgt_agent_process_work(struct work_struct *work)
  718. {
  719. struct sbp_target_request *req =
  720. container_of(work, struct sbp_target_request, work);
  721. pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
  722. req->orb_pointer,
  723. sbp2_pointer_to_addr(&req->orb.next_orb),
  724. sbp2_pointer_to_addr(&req->orb.data_descriptor),
  725. be32_to_cpu(req->orb.misc));
  726. if (req->orb_pointer >> 32)
  727. pr_debug("ORB with high bits set\n");
  728. switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
  729. case 0:/* Format specified by this standard */
  730. sbp_handle_command(req);
  731. return;
  732. case 1: /* Reserved for future standardization */
  733. case 2: /* Vendor-dependent */
  734. req->status.status |= cpu_to_be32(
  735. STATUS_BLOCK_RESP(
  736. STATUS_RESP_REQUEST_COMPLETE) |
  737. STATUS_BLOCK_DEAD(0) |
  738. STATUS_BLOCK_LEN(1) |
  739. STATUS_BLOCK_SBP_STATUS(
  740. SBP_STATUS_REQ_TYPE_NOTSUPP));
  741. sbp_send_status(req);
  742. sbp_free_request(req);
  743. return;
  744. case 3: /* Dummy ORB */
  745. req->status.status |= cpu_to_be32(
  746. STATUS_BLOCK_RESP(
  747. STATUS_RESP_REQUEST_COMPLETE) |
  748. STATUS_BLOCK_DEAD(0) |
  749. STATUS_BLOCK_LEN(1) |
  750. STATUS_BLOCK_SBP_STATUS(
  751. SBP_STATUS_DUMMY_ORB_COMPLETE));
  752. sbp_send_status(req);
  753. sbp_free_request(req);
  754. return;
  755. default:
  756. BUG();
  757. }
  758. }
  759. /* used to double-check we haven't been issued an AGENT_RESET */
  760. static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
  761. {
  762. bool active;
  763. spin_lock_bh(&agent->lock);
  764. active = (agent->state == AGENT_STATE_ACTIVE);
  765. spin_unlock_bh(&agent->lock);
  766. return active;
  767. }
  768. static void tgt_agent_fetch_work(struct work_struct *work)
  769. {
  770. struct sbp_target_agent *agent =
  771. container_of(work, struct sbp_target_agent, work);
  772. struct sbp_session *sess = agent->login->sess;
  773. struct sbp_target_request *req;
  774. int ret;
  775. bool doorbell = agent->doorbell;
  776. u64 next_orb = agent->orb_pointer;
  777. while (next_orb && tgt_agent_check_active(agent)) {
  778. req = kzalloc(sizeof(*req), GFP_KERNEL);
  779. if (!req) {
  780. spin_lock_bh(&agent->lock);
  781. agent->state = AGENT_STATE_DEAD;
  782. spin_unlock_bh(&agent->lock);
  783. return;
  784. }
  785. req->login = agent->login;
  786. req->orb_pointer = next_orb;
  787. req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
  788. req->orb_pointer >> 32));
  789. req->status.orb_low = cpu_to_be32(
  790. req->orb_pointer & 0xfffffffc);
  791. /* read in the ORB */
  792. ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
  793. sess->node_id, sess->generation, sess->speed,
  794. req->orb_pointer, &req->orb, sizeof(req->orb));
  795. if (ret != RCODE_COMPLETE) {
  796. pr_debug("tgt_orb fetch failed: %x\n", ret);
  797. req->status.status |= cpu_to_be32(
  798. STATUS_BLOCK_SRC(
  799. STATUS_SRC_ORB_FINISHED) |
  800. STATUS_BLOCK_RESP(
  801. STATUS_RESP_TRANSPORT_FAILURE) |
  802. STATUS_BLOCK_DEAD(1) |
  803. STATUS_BLOCK_LEN(1) |
  804. STATUS_BLOCK_SBP_STATUS(
  805. SBP_STATUS_UNSPECIFIED_ERROR));
  806. spin_lock_bh(&agent->lock);
  807. agent->state = AGENT_STATE_DEAD;
  808. spin_unlock_bh(&agent->lock);
  809. sbp_send_status(req);
  810. sbp_free_request(req);
  811. return;
  812. }
  813. /* check the next_ORB field */
  814. if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
  815. next_orb = 0;
  816. req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
  817. STATUS_SRC_ORB_FINISHED));
  818. } else {
  819. next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
  820. req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
  821. STATUS_SRC_ORB_CONTINUING));
  822. }
  823. if (tgt_agent_check_active(agent) && !doorbell) {
  824. INIT_WORK(&req->work, tgt_agent_process_work);
  825. queue_work(system_unbound_wq, &req->work);
  826. } else {
  827. /* don't process this request, just check next_ORB */
  828. sbp_free_request(req);
  829. }
  830. spin_lock_bh(&agent->lock);
  831. doorbell = agent->doorbell = false;
  832. /* check if we should carry on processing */
  833. if (next_orb)
  834. agent->orb_pointer = next_orb;
  835. else
  836. agent->state = AGENT_STATE_SUSPENDED;
  837. spin_unlock_bh(&agent->lock);
  838. };
  839. }
  840. static struct sbp_target_agent *sbp_target_agent_register(
  841. struct sbp_login_descriptor *login)
  842. {
  843. struct sbp_target_agent *agent;
  844. int ret;
  845. agent = kmalloc(sizeof(*agent), GFP_KERNEL);
  846. if (!agent)
  847. return ERR_PTR(-ENOMEM);
  848. spin_lock_init(&agent->lock);
  849. agent->handler.length = 0x20;
  850. agent->handler.address_callback = tgt_agent_rw;
  851. agent->handler.callback_data = agent;
  852. agent->login = login;
  853. agent->state = AGENT_STATE_RESET;
  854. INIT_WORK(&agent->work, tgt_agent_fetch_work);
  855. agent->orb_pointer = 0;
  856. agent->doorbell = false;
  857. ret = fw_core_add_address_handler(&agent->handler,
  858. &sbp_register_region);
  859. if (ret < 0) {
  860. kfree(agent);
  861. return ERR_PTR(ret);
  862. }
  863. return agent;
  864. }
  865. static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
  866. {
  867. fw_core_remove_address_handler(&agent->handler);
  868. cancel_work_sync(&agent->work);
  869. kfree(agent);
  870. }
  871. /*
  872. * Simple wrapper around fw_run_transaction that retries the transaction several
  873. * times in case of failure, with an exponential backoff.
  874. */
  875. static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
  876. int generation, int speed, unsigned long long offset,
  877. void *payload, size_t length)
  878. {
  879. int attempt, ret, delay;
  880. for (attempt = 1; attempt <= 5; attempt++) {
  881. ret = fw_run_transaction(card, tcode, destination_id,
  882. generation, speed, offset, payload, length);
  883. switch (ret) {
  884. case RCODE_COMPLETE:
  885. case RCODE_TYPE_ERROR:
  886. case RCODE_ADDRESS_ERROR:
  887. case RCODE_GENERATION:
  888. return ret;
  889. default:
  890. delay = 5 * attempt * attempt;
  891. usleep_range(delay, delay * 2);
  892. }
  893. }
  894. return ret;
  895. }
  896. /*
  897. * Wrapper around sbp_run_transaction that gets the card, destination,
  898. * generation and speed out of the request's session.
  899. */
  900. static int sbp_run_request_transaction(struct sbp_target_request *req,
  901. int tcode, unsigned long long offset, void *payload,
  902. size_t length)
  903. {
  904. struct sbp_login_descriptor *login = req->login;
  905. struct sbp_session *sess = login->sess;
  906. struct fw_card *card;
  907. int node_id, generation, speed, ret;
  908. spin_lock_bh(&sess->lock);
  909. card = fw_card_get(sess->card);
  910. node_id = sess->node_id;
  911. generation = sess->generation;
  912. speed = sess->speed;
  913. spin_unlock_bh(&sess->lock);
  914. ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
  915. offset, payload, length);
  916. fw_card_put(card);
  917. return ret;
  918. }
  919. static int sbp_fetch_command(struct sbp_target_request *req)
  920. {
  921. int ret, cmd_len, copy_len;
  922. cmd_len = scsi_command_size(req->orb.command_block);
  923. req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
  924. if (!req->cmd_buf)
  925. return -ENOMEM;
  926. memcpy(req->cmd_buf, req->orb.command_block,
  927. min_t(int, cmd_len, sizeof(req->orb.command_block)));
  928. if (cmd_len > sizeof(req->orb.command_block)) {
  929. pr_debug("sbp_fetch_command: filling in long command\n");
  930. copy_len = cmd_len - sizeof(req->orb.command_block);
  931. ret = sbp_run_request_transaction(req,
  932. TCODE_READ_BLOCK_REQUEST,
  933. req->orb_pointer + sizeof(req->orb),
  934. req->cmd_buf + sizeof(req->orb.command_block),
  935. copy_len);
  936. if (ret != RCODE_COMPLETE)
  937. return -EIO;
  938. }
  939. return 0;
  940. }
  941. static int sbp_fetch_page_table(struct sbp_target_request *req)
  942. {
  943. int pg_tbl_sz, ret;
  944. struct sbp_page_table_entry *pg_tbl;
  945. if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
  946. return 0;
  947. pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
  948. sizeof(struct sbp_page_table_entry);
  949. pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
  950. if (!pg_tbl)
  951. return -ENOMEM;
  952. ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
  953. sbp2_pointer_to_addr(&req->orb.data_descriptor),
  954. pg_tbl, pg_tbl_sz);
  955. if (ret != RCODE_COMPLETE) {
  956. kfree(pg_tbl);
  957. return -EIO;
  958. }
  959. req->pg_tbl = pg_tbl;
  960. return 0;
  961. }
  962. static void sbp_calc_data_length_direction(struct sbp_target_request *req,
  963. u32 *data_len, enum dma_data_direction *data_dir)
  964. {
  965. int data_size, direction, idx;
  966. data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
  967. direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
  968. if (!data_size) {
  969. *data_len = 0;
  970. *data_dir = DMA_NONE;
  971. return;
  972. }
  973. *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  974. if (req->pg_tbl) {
  975. *data_len = 0;
  976. for (idx = 0; idx < data_size; idx++) {
  977. *data_len += be16_to_cpu(
  978. req->pg_tbl[idx].segment_length);
  979. }
  980. } else {
  981. *data_len = data_size;
  982. }
  983. }
  984. static void sbp_handle_command(struct sbp_target_request *req)
  985. {
  986. struct sbp_login_descriptor *login = req->login;
  987. struct sbp_session *sess = login->sess;
  988. int ret, unpacked_lun;
  989. u32 data_length;
  990. enum dma_data_direction data_dir;
  991. ret = sbp_fetch_command(req);
  992. if (ret) {
  993. pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
  994. goto err;
  995. }
  996. ret = sbp_fetch_page_table(req);
  997. if (ret) {
  998. pr_debug("sbp_handle_command: fetch page table failed: %d\n",
  999. ret);
  1000. goto err;
  1001. }
  1002. unpacked_lun = req->login->login_lun;
  1003. sbp_calc_data_length_direction(req, &data_length, &data_dir);
  1004. pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
  1005. req->orb_pointer, unpacked_lun, data_length, data_dir);
  1006. /* only used for printk until we do TMRs */
  1007. req->se_cmd.tag = req->orb_pointer;
  1008. if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
  1009. req->sense_buf, unpacked_lun, data_length,
  1010. TCM_SIMPLE_TAG, data_dir, 0))
  1011. goto err;
  1012. return;
  1013. err:
  1014. req->status.status |= cpu_to_be32(
  1015. STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
  1016. STATUS_BLOCK_DEAD(0) |
  1017. STATUS_BLOCK_LEN(1) |
  1018. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
  1019. sbp_send_status(req);
  1020. sbp_free_request(req);
  1021. }
  1022. /*
  1023. * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
  1024. * DMA_FROM_DEVICE = write to initiator (SCSI READ)
  1025. */
  1026. static int sbp_rw_data(struct sbp_target_request *req)
  1027. {
  1028. struct sbp_session *sess = req->login->sess;
  1029. int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
  1030. generation, num_pte, length, tfr_length,
  1031. rcode = RCODE_COMPLETE;
  1032. struct sbp_page_table_entry *pte;
  1033. unsigned long long offset;
  1034. struct fw_card *card;
  1035. struct sg_mapping_iter iter;
  1036. if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
  1037. tcode = TCODE_WRITE_BLOCK_REQUEST;
  1038. sg_miter_flags = SG_MITER_FROM_SG;
  1039. } else {
  1040. tcode = TCODE_READ_BLOCK_REQUEST;
  1041. sg_miter_flags = SG_MITER_TO_SG;
  1042. }
  1043. max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
  1044. speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
  1045. pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
  1046. if (pg_size) {
  1047. pr_err("sbp_run_transaction: page size ignored\n");
  1048. pg_size = 0x100 << pg_size;
  1049. }
  1050. spin_lock_bh(&sess->lock);
  1051. card = fw_card_get(sess->card);
  1052. node_id = sess->node_id;
  1053. generation = sess->generation;
  1054. spin_unlock_bh(&sess->lock);
  1055. if (req->pg_tbl) {
  1056. pte = req->pg_tbl;
  1057. num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
  1058. offset = 0;
  1059. length = 0;
  1060. } else {
  1061. pte = NULL;
  1062. num_pte = 0;
  1063. offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
  1064. length = req->se_cmd.data_length;
  1065. }
  1066. sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
  1067. sg_miter_flags);
  1068. while (length || num_pte) {
  1069. if (!length) {
  1070. offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
  1071. be32_to_cpu(pte->segment_base_lo);
  1072. length = be16_to_cpu(pte->segment_length);
  1073. pte++;
  1074. num_pte--;
  1075. }
  1076. sg_miter_next(&iter);
  1077. tfr_length = min3(length, max_payload, (int)iter.length);
  1078. /* FIXME: take page_size into account */
  1079. rcode = sbp_run_transaction(card, tcode, node_id,
  1080. generation, speed,
  1081. offset, iter.addr, tfr_length);
  1082. if (rcode != RCODE_COMPLETE)
  1083. break;
  1084. length -= tfr_length;
  1085. offset += tfr_length;
  1086. iter.consumed = tfr_length;
  1087. }
  1088. sg_miter_stop(&iter);
  1089. fw_card_put(card);
  1090. if (rcode == RCODE_COMPLETE) {
  1091. WARN_ON(length != 0);
  1092. return 0;
  1093. } else {
  1094. return -EIO;
  1095. }
  1096. }
  1097. static int sbp_send_status(struct sbp_target_request *req)
  1098. {
  1099. int ret, length;
  1100. struct sbp_login_descriptor *login = req->login;
  1101. length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
  1102. ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
  1103. login->status_fifo_addr, &req->status, length);
  1104. if (ret != RCODE_COMPLETE) {
  1105. pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
  1106. return -EIO;
  1107. }
  1108. pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
  1109. req->orb_pointer);
  1110. return 0;
  1111. }
  1112. static void sbp_sense_mangle(struct sbp_target_request *req)
  1113. {
  1114. struct se_cmd *se_cmd = &req->se_cmd;
  1115. u8 *sense = req->sense_buf;
  1116. u8 *status = req->status.data;
  1117. WARN_ON(se_cmd->scsi_sense_length < 18);
  1118. switch (sense[0] & 0x7f) { /* sfmt */
  1119. case 0x70: /* current, fixed */
  1120. status[0] = 0 << 6;
  1121. break;
  1122. case 0x71: /* deferred, fixed */
  1123. status[0] = 1 << 6;
  1124. break;
  1125. case 0x72: /* current, descriptor */
  1126. case 0x73: /* deferred, descriptor */
  1127. default:
  1128. /*
  1129. * TODO: SBP-3 specifies what we should do with descriptor
  1130. * format sense data
  1131. */
  1132. pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
  1133. sense[0]);
  1134. req->status.status |= cpu_to_be32(
  1135. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1136. STATUS_BLOCK_DEAD(0) |
  1137. STATUS_BLOCK_LEN(1) |
  1138. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
  1139. return;
  1140. }
  1141. status[0] |= se_cmd->scsi_status & 0x3f;/* status */
  1142. status[1] =
  1143. (sense[0] & 0x80) | /* valid */
  1144. ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
  1145. (sense[2] & 0x0f); /* sense_key */
  1146. status[2] = se_cmd->scsi_asc; /* sense_code */
  1147. status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
  1148. /* information */
  1149. status[4] = sense[3];
  1150. status[5] = sense[4];
  1151. status[6] = sense[5];
  1152. status[7] = sense[6];
  1153. /* CDB-dependent */
  1154. status[8] = sense[8];
  1155. status[9] = sense[9];
  1156. status[10] = sense[10];
  1157. status[11] = sense[11];
  1158. /* fru */
  1159. status[12] = sense[14];
  1160. /* sense_key-dependent */
  1161. status[13] = sense[15];
  1162. status[14] = sense[16];
  1163. status[15] = sense[17];
  1164. req->status.status |= cpu_to_be32(
  1165. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1166. STATUS_BLOCK_DEAD(0) |
  1167. STATUS_BLOCK_LEN(5) |
  1168. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
  1169. }
  1170. static int sbp_send_sense(struct sbp_target_request *req)
  1171. {
  1172. struct se_cmd *se_cmd = &req->se_cmd;
  1173. if (se_cmd->scsi_sense_length) {
  1174. sbp_sense_mangle(req);
  1175. } else {
  1176. req->status.status |= cpu_to_be32(
  1177. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1178. STATUS_BLOCK_DEAD(0) |
  1179. STATUS_BLOCK_LEN(1) |
  1180. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
  1181. }
  1182. return sbp_send_status(req);
  1183. }
  1184. static void sbp_free_request(struct sbp_target_request *req)
  1185. {
  1186. kfree(req->pg_tbl);
  1187. kfree(req->cmd_buf);
  1188. kfree(req);
  1189. }
  1190. static void sbp_mgt_agent_process(struct work_struct *work)
  1191. {
  1192. struct sbp_management_agent *agent =
  1193. container_of(work, struct sbp_management_agent, work);
  1194. struct sbp_management_request *req = agent->request;
  1195. int ret;
  1196. int status_data_len = 0;
  1197. /* fetch the ORB from the initiator */
  1198. ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
  1199. req->node_addr, req->generation, req->speed,
  1200. agent->orb_offset, &req->orb, sizeof(req->orb));
  1201. if (ret != RCODE_COMPLETE) {
  1202. pr_debug("mgt_orb fetch failed: %x\n", ret);
  1203. goto out;
  1204. }
  1205. pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
  1206. sbp2_pointer_to_addr(&req->orb.ptr1),
  1207. sbp2_pointer_to_addr(&req->orb.ptr2),
  1208. be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
  1209. sbp2_pointer_to_addr(&req->orb.status_fifo));
  1210. if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
  1211. ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
  1212. pr_err("mgt_orb bad request\n");
  1213. goto out;
  1214. }
  1215. switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
  1216. case MANAGEMENT_ORB_FUNCTION_LOGIN:
  1217. sbp_management_request_login(agent, req, &status_data_len);
  1218. break;
  1219. case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
  1220. sbp_management_request_query_logins(agent, req,
  1221. &status_data_len);
  1222. break;
  1223. case MANAGEMENT_ORB_FUNCTION_RECONNECT:
  1224. sbp_management_request_reconnect(agent, req, &status_data_len);
  1225. break;
  1226. case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
  1227. pr_notice("SET PASSWORD not implemented\n");
  1228. req->status.status = cpu_to_be32(
  1229. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1230. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1231. break;
  1232. case MANAGEMENT_ORB_FUNCTION_LOGOUT:
  1233. sbp_management_request_logout(agent, req, &status_data_len);
  1234. break;
  1235. case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
  1236. pr_notice("ABORT TASK not implemented\n");
  1237. req->status.status = cpu_to_be32(
  1238. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1239. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1240. break;
  1241. case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
  1242. pr_notice("ABORT TASK SET not implemented\n");
  1243. req->status.status = cpu_to_be32(
  1244. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1245. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1246. break;
  1247. case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
  1248. pr_notice("LOGICAL UNIT RESET not implemented\n");
  1249. req->status.status = cpu_to_be32(
  1250. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1251. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1252. break;
  1253. case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
  1254. pr_notice("TARGET RESET not implemented\n");
  1255. req->status.status = cpu_to_be32(
  1256. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1257. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1258. break;
  1259. default:
  1260. pr_notice("unknown management function 0x%x\n",
  1261. MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
  1262. req->status.status = cpu_to_be32(
  1263. STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
  1264. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
  1265. break;
  1266. }
  1267. req->status.status |= cpu_to_be32(
  1268. STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
  1269. STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
  1270. STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
  1271. req->status.orb_low = cpu_to_be32(agent->orb_offset);
  1272. /* write the status block back to the initiator */
  1273. ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
  1274. req->node_addr, req->generation, req->speed,
  1275. sbp2_pointer_to_addr(&req->orb.status_fifo),
  1276. &req->status, 8 + status_data_len);
  1277. if (ret != RCODE_COMPLETE) {
  1278. pr_debug("mgt_orb status write failed: %x\n", ret);
  1279. goto out;
  1280. }
  1281. out:
  1282. fw_card_put(req->card);
  1283. kfree(req);
  1284. spin_lock_bh(&agent->lock);
  1285. agent->state = MANAGEMENT_AGENT_STATE_IDLE;
  1286. spin_unlock_bh(&agent->lock);
  1287. }
  1288. static void sbp_mgt_agent_rw(struct fw_card *card,
  1289. struct fw_request *request, int tcode, int destination, int source,
  1290. int generation, unsigned long long offset, void *data, size_t length,
  1291. void *callback_data)
  1292. {
  1293. struct sbp_management_agent *agent = callback_data;
  1294. struct sbp2_pointer *ptr = data;
  1295. int rcode = RCODE_ADDRESS_ERROR;
  1296. if (!agent->tport->enable)
  1297. goto out;
  1298. if ((offset != agent->handler.offset) || (length != 8))
  1299. goto out;
  1300. if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
  1301. struct sbp_management_request *req;
  1302. int prev_state;
  1303. spin_lock_bh(&agent->lock);
  1304. prev_state = agent->state;
  1305. agent->state = MANAGEMENT_AGENT_STATE_BUSY;
  1306. spin_unlock_bh(&agent->lock);
  1307. if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
  1308. pr_notice("ignoring management request while busy\n");
  1309. rcode = RCODE_CONFLICT_ERROR;
  1310. goto out;
  1311. }
  1312. req = kzalloc(sizeof(*req), GFP_ATOMIC);
  1313. if (!req) {
  1314. rcode = RCODE_CONFLICT_ERROR;
  1315. goto out;
  1316. }
  1317. req->card = fw_card_get(card);
  1318. req->generation = generation;
  1319. req->node_addr = source;
  1320. req->speed = fw_get_request_speed(request);
  1321. agent->orb_offset = sbp2_pointer_to_addr(ptr);
  1322. agent->request = req;
  1323. queue_work(system_unbound_wq, &agent->work);
  1324. rcode = RCODE_COMPLETE;
  1325. } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
  1326. addr_to_sbp2_pointer(agent->orb_offset, ptr);
  1327. rcode = RCODE_COMPLETE;
  1328. } else {
  1329. rcode = RCODE_TYPE_ERROR;
  1330. }
  1331. out:
  1332. fw_send_response(card, request, rcode);
  1333. }
  1334. static struct sbp_management_agent *sbp_management_agent_register(
  1335. struct sbp_tport *tport)
  1336. {
  1337. int ret;
  1338. struct sbp_management_agent *agent;
  1339. agent = kmalloc(sizeof(*agent), GFP_KERNEL);
  1340. if (!agent)
  1341. return ERR_PTR(-ENOMEM);
  1342. spin_lock_init(&agent->lock);
  1343. agent->tport = tport;
  1344. agent->handler.length = 0x08;
  1345. agent->handler.address_callback = sbp_mgt_agent_rw;
  1346. agent->handler.callback_data = agent;
  1347. agent->state = MANAGEMENT_AGENT_STATE_IDLE;
  1348. INIT_WORK(&agent->work, sbp_mgt_agent_process);
  1349. agent->orb_offset = 0;
  1350. agent->request = NULL;
  1351. ret = fw_core_add_address_handler(&agent->handler,
  1352. &sbp_register_region);
  1353. if (ret < 0) {
  1354. kfree(agent);
  1355. return ERR_PTR(ret);
  1356. }
  1357. return agent;
  1358. }
  1359. static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
  1360. {
  1361. fw_core_remove_address_handler(&agent->handler);
  1362. cancel_work_sync(&agent->work);
  1363. kfree(agent);
  1364. }
  1365. static int sbp_check_true(struct se_portal_group *se_tpg)
  1366. {
  1367. return 1;
  1368. }
  1369. static int sbp_check_false(struct se_portal_group *se_tpg)
  1370. {
  1371. return 0;
  1372. }
  1373. static char *sbp_get_fabric_name(void)
  1374. {
  1375. return "sbp";
  1376. }
  1377. static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
  1378. {
  1379. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1380. struct sbp_tport *tport = tpg->tport;
  1381. return &tport->tport_name[0];
  1382. }
  1383. static u16 sbp_get_tag(struct se_portal_group *se_tpg)
  1384. {
  1385. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1386. return tpg->tport_tpgt;
  1387. }
  1388. static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
  1389. {
  1390. return 1;
  1391. }
  1392. static void sbp_release_cmd(struct se_cmd *se_cmd)
  1393. {
  1394. struct sbp_target_request *req = container_of(se_cmd,
  1395. struct sbp_target_request, se_cmd);
  1396. sbp_free_request(req);
  1397. }
  1398. static int sbp_shutdown_session(struct se_session *se_sess)
  1399. {
  1400. return 0;
  1401. }
  1402. static void sbp_close_session(struct se_session *se_sess)
  1403. {
  1404. return;
  1405. }
  1406. static u32 sbp_sess_get_index(struct se_session *se_sess)
  1407. {
  1408. return 0;
  1409. }
  1410. static int sbp_write_pending(struct se_cmd *se_cmd)
  1411. {
  1412. struct sbp_target_request *req = container_of(se_cmd,
  1413. struct sbp_target_request, se_cmd);
  1414. int ret;
  1415. ret = sbp_rw_data(req);
  1416. if (ret) {
  1417. req->status.status |= cpu_to_be32(
  1418. STATUS_BLOCK_RESP(
  1419. STATUS_RESP_TRANSPORT_FAILURE) |
  1420. STATUS_BLOCK_DEAD(0) |
  1421. STATUS_BLOCK_LEN(1) |
  1422. STATUS_BLOCK_SBP_STATUS(
  1423. SBP_STATUS_UNSPECIFIED_ERROR));
  1424. sbp_send_status(req);
  1425. return ret;
  1426. }
  1427. target_execute_cmd(se_cmd);
  1428. return 0;
  1429. }
  1430. static int sbp_write_pending_status(struct se_cmd *se_cmd)
  1431. {
  1432. return 0;
  1433. }
  1434. static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
  1435. {
  1436. return;
  1437. }
  1438. static int sbp_get_cmd_state(struct se_cmd *se_cmd)
  1439. {
  1440. return 0;
  1441. }
  1442. static int sbp_queue_data_in(struct se_cmd *se_cmd)
  1443. {
  1444. struct sbp_target_request *req = container_of(se_cmd,
  1445. struct sbp_target_request, se_cmd);
  1446. int ret;
  1447. ret = sbp_rw_data(req);
  1448. if (ret) {
  1449. req->status.status |= cpu_to_be32(
  1450. STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
  1451. STATUS_BLOCK_DEAD(0) |
  1452. STATUS_BLOCK_LEN(1) |
  1453. STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
  1454. sbp_send_status(req);
  1455. return ret;
  1456. }
  1457. return sbp_send_sense(req);
  1458. }
  1459. /*
  1460. * Called after command (no data transfer) or after the write (to device)
  1461. * operation is completed
  1462. */
  1463. static int sbp_queue_status(struct se_cmd *se_cmd)
  1464. {
  1465. struct sbp_target_request *req = container_of(se_cmd,
  1466. struct sbp_target_request, se_cmd);
  1467. return sbp_send_sense(req);
  1468. }
  1469. static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
  1470. {
  1471. }
  1472. static void sbp_aborted_task(struct se_cmd *se_cmd)
  1473. {
  1474. return;
  1475. }
  1476. static int sbp_check_stop_free(struct se_cmd *se_cmd)
  1477. {
  1478. struct sbp_target_request *req = container_of(se_cmd,
  1479. struct sbp_target_request, se_cmd);
  1480. transport_generic_free_cmd(&req->se_cmd, 0);
  1481. return 1;
  1482. }
  1483. static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
  1484. {
  1485. struct se_lun *lun;
  1486. int count = 0;
  1487. rcu_read_lock();
  1488. hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
  1489. count++;
  1490. rcu_read_unlock();
  1491. return count;
  1492. }
  1493. static int sbp_update_unit_directory(struct sbp_tport *tport)
  1494. {
  1495. struct se_lun *lun;
  1496. int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
  1497. u32 *data;
  1498. if (tport->unit_directory.data) {
  1499. fw_core_remove_descriptor(&tport->unit_directory);
  1500. kfree(tport->unit_directory.data);
  1501. tport->unit_directory.data = NULL;
  1502. }
  1503. if (!tport->enable || !tport->tpg)
  1504. return 0;
  1505. num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
  1506. /*
  1507. * Number of entries in the final unit directory:
  1508. * - all of those in the template
  1509. * - management_agent
  1510. * - unit_characteristics
  1511. * - reconnect_timeout
  1512. * - unit unique ID
  1513. * - one for each LUN
  1514. *
  1515. * MUST NOT include leaf or sub-directory entries
  1516. */
  1517. num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
  1518. if (tport->directory_id != -1)
  1519. num_entries++;
  1520. /* allocate num_entries + 4 for the header and unique ID leaf */
  1521. data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
  1522. if (!data)
  1523. return -ENOMEM;
  1524. /* directory_length */
  1525. data[idx++] = num_entries << 16;
  1526. /* directory_id */
  1527. if (tport->directory_id != -1)
  1528. data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
  1529. /* unit directory template */
  1530. memcpy(&data[idx], sbp_unit_directory_template,
  1531. sizeof(sbp_unit_directory_template));
  1532. idx += ARRAY_SIZE(sbp_unit_directory_template);
  1533. /* management_agent */
  1534. mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
  1535. data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
  1536. /* unit_characteristics */
  1537. data[idx++] = 0x3a000000 |
  1538. (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
  1539. SBP_ORB_FETCH_SIZE;
  1540. /* reconnect_timeout */
  1541. data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
  1542. /* unit unique ID (leaf is just after LUNs) */
  1543. data[idx++] = 0x8d000000 | (num_luns + 1);
  1544. rcu_read_lock();
  1545. hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
  1546. struct se_device *dev;
  1547. int type;
  1548. /*
  1549. * rcu_dereference_raw protected by se_lun->lun_group symlink
  1550. * reference to se_device->dev_group.
  1551. */
  1552. dev = rcu_dereference_raw(lun->lun_se_dev);
  1553. type = dev->transport->get_device_type(dev);
  1554. /* logical_unit_number */
  1555. data[idx++] = 0x14000000 |
  1556. ((type << 16) & 0x1f0000) |
  1557. (lun->unpacked_lun & 0xffff);
  1558. }
  1559. rcu_read_unlock();
  1560. /* unit unique ID leaf */
  1561. data[idx++] = 2 << 16;
  1562. data[idx++] = tport->guid >> 32;
  1563. data[idx++] = tport->guid;
  1564. tport->unit_directory.length = idx;
  1565. tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
  1566. tport->unit_directory.data = data;
  1567. ret = fw_core_add_descriptor(&tport->unit_directory);
  1568. if (ret < 0) {
  1569. kfree(tport->unit_directory.data);
  1570. tport->unit_directory.data = NULL;
  1571. }
  1572. return ret;
  1573. }
  1574. static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
  1575. {
  1576. const char *cp;
  1577. char c, nibble;
  1578. int pos = 0, err;
  1579. *wwn = 0;
  1580. for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
  1581. c = *cp;
  1582. if (c == '\n' && cp[1] == '\0')
  1583. continue;
  1584. if (c == '\0') {
  1585. err = 2;
  1586. if (pos != 16)
  1587. goto fail;
  1588. return cp - name;
  1589. }
  1590. err = 3;
  1591. if (isdigit(c))
  1592. nibble = c - '0';
  1593. else if (isxdigit(c))
  1594. nibble = tolower(c) - 'a' + 10;
  1595. else
  1596. goto fail;
  1597. *wwn = (*wwn << 4) | nibble;
  1598. pos++;
  1599. }
  1600. err = 4;
  1601. fail:
  1602. printk(KERN_INFO "err %u len %zu pos %u\n",
  1603. err, cp - name, pos);
  1604. return -1;
  1605. }
  1606. static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
  1607. {
  1608. return snprintf(buf, len, "%016llx", wwn);
  1609. }
  1610. static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
  1611. {
  1612. u64 guid = 0;
  1613. if (sbp_parse_wwn(name, &guid) < 0)
  1614. return -EINVAL;
  1615. return 0;
  1616. }
  1617. static int sbp_post_link_lun(
  1618. struct se_portal_group *se_tpg,
  1619. struct se_lun *se_lun)
  1620. {
  1621. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1622. return sbp_update_unit_directory(tpg->tport);
  1623. }
  1624. static void sbp_pre_unlink_lun(
  1625. struct se_portal_group *se_tpg,
  1626. struct se_lun *se_lun)
  1627. {
  1628. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1629. struct sbp_tport *tport = tpg->tport;
  1630. int ret;
  1631. if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
  1632. tport->enable = 0;
  1633. ret = sbp_update_unit_directory(tport);
  1634. if (ret < 0)
  1635. pr_err("unlink LUN: failed to update unit directory\n");
  1636. }
  1637. static struct se_portal_group *sbp_make_tpg(
  1638. struct se_wwn *wwn,
  1639. struct config_group *group,
  1640. const char *name)
  1641. {
  1642. struct sbp_tport *tport =
  1643. container_of(wwn, struct sbp_tport, tport_wwn);
  1644. struct sbp_tpg *tpg;
  1645. unsigned long tpgt;
  1646. int ret;
  1647. if (strstr(name, "tpgt_") != name)
  1648. return ERR_PTR(-EINVAL);
  1649. if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
  1650. return ERR_PTR(-EINVAL);
  1651. if (tport->tpg) {
  1652. pr_err("Only one TPG per Unit is possible.\n");
  1653. return ERR_PTR(-EBUSY);
  1654. }
  1655. tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
  1656. if (!tpg) {
  1657. pr_err("Unable to allocate struct sbp_tpg\n");
  1658. return ERR_PTR(-ENOMEM);
  1659. }
  1660. tpg->tport = tport;
  1661. tpg->tport_tpgt = tpgt;
  1662. tport->tpg = tpg;
  1663. /* default attribute values */
  1664. tport->enable = 0;
  1665. tport->directory_id = -1;
  1666. tport->mgt_orb_timeout = 15;
  1667. tport->max_reconnect_timeout = 5;
  1668. tport->max_logins_per_lun = 1;
  1669. tport->mgt_agt = sbp_management_agent_register(tport);
  1670. if (IS_ERR(tport->mgt_agt)) {
  1671. ret = PTR_ERR(tport->mgt_agt);
  1672. goto out_free_tpg;
  1673. }
  1674. ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
  1675. if (ret < 0)
  1676. goto out_unreg_mgt_agt;
  1677. return &tpg->se_tpg;
  1678. out_unreg_mgt_agt:
  1679. sbp_management_agent_unregister(tport->mgt_agt);
  1680. out_free_tpg:
  1681. tport->tpg = NULL;
  1682. kfree(tpg);
  1683. return ERR_PTR(ret);
  1684. }
  1685. static void sbp_drop_tpg(struct se_portal_group *se_tpg)
  1686. {
  1687. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1688. struct sbp_tport *tport = tpg->tport;
  1689. core_tpg_deregister(se_tpg);
  1690. sbp_management_agent_unregister(tport->mgt_agt);
  1691. tport->tpg = NULL;
  1692. kfree(tpg);
  1693. }
  1694. static struct se_wwn *sbp_make_tport(
  1695. struct target_fabric_configfs *tf,
  1696. struct config_group *group,
  1697. const char *name)
  1698. {
  1699. struct sbp_tport *tport;
  1700. u64 guid = 0;
  1701. if (sbp_parse_wwn(name, &guid) < 0)
  1702. return ERR_PTR(-EINVAL);
  1703. tport = kzalloc(sizeof(*tport), GFP_KERNEL);
  1704. if (!tport) {
  1705. pr_err("Unable to allocate struct sbp_tport\n");
  1706. return ERR_PTR(-ENOMEM);
  1707. }
  1708. tport->guid = guid;
  1709. sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
  1710. return &tport->tport_wwn;
  1711. }
  1712. static void sbp_drop_tport(struct se_wwn *wwn)
  1713. {
  1714. struct sbp_tport *tport =
  1715. container_of(wwn, struct sbp_tport, tport_wwn);
  1716. kfree(tport);
  1717. }
  1718. static ssize_t sbp_wwn_show_attr_version(
  1719. struct target_fabric_configfs *tf,
  1720. char *page)
  1721. {
  1722. return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
  1723. }
  1724. TF_WWN_ATTR_RO(sbp, version);
  1725. static struct configfs_attribute *sbp_wwn_attrs[] = {
  1726. &sbp_wwn_version.attr,
  1727. NULL,
  1728. };
  1729. static ssize_t sbp_tpg_show_directory_id(
  1730. struct se_portal_group *se_tpg,
  1731. char *page)
  1732. {
  1733. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1734. struct sbp_tport *tport = tpg->tport;
  1735. if (tport->directory_id == -1)
  1736. return sprintf(page, "implicit\n");
  1737. else
  1738. return sprintf(page, "%06x\n", tport->directory_id);
  1739. }
  1740. static ssize_t sbp_tpg_store_directory_id(
  1741. struct se_portal_group *se_tpg,
  1742. const char *page,
  1743. size_t count)
  1744. {
  1745. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1746. struct sbp_tport *tport = tpg->tport;
  1747. unsigned long val;
  1748. if (tport->enable) {
  1749. pr_err("Cannot change the directory_id on an active target.\n");
  1750. return -EBUSY;
  1751. }
  1752. if (strstr(page, "implicit") == page) {
  1753. tport->directory_id = -1;
  1754. } else {
  1755. if (kstrtoul(page, 16, &val) < 0)
  1756. return -EINVAL;
  1757. if (val > 0xffffff)
  1758. return -EINVAL;
  1759. tport->directory_id = val;
  1760. }
  1761. return count;
  1762. }
  1763. static ssize_t sbp_tpg_show_enable(
  1764. struct se_portal_group *se_tpg,
  1765. char *page)
  1766. {
  1767. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1768. struct sbp_tport *tport = tpg->tport;
  1769. return sprintf(page, "%d\n", tport->enable);
  1770. }
  1771. static ssize_t sbp_tpg_store_enable(
  1772. struct se_portal_group *se_tpg,
  1773. const char *page,
  1774. size_t count)
  1775. {
  1776. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1777. struct sbp_tport *tport = tpg->tport;
  1778. unsigned long val;
  1779. int ret;
  1780. if (kstrtoul(page, 0, &val) < 0)
  1781. return -EINVAL;
  1782. if ((val != 0) && (val != 1))
  1783. return -EINVAL;
  1784. if (tport->enable == val)
  1785. return count;
  1786. if (val) {
  1787. if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
  1788. pr_err("Cannot enable a target with no LUNs!\n");
  1789. return -EINVAL;
  1790. }
  1791. } else {
  1792. /* XXX: force-shutdown sessions instead? */
  1793. spin_lock_bh(&se_tpg->session_lock);
  1794. if (!list_empty(&se_tpg->tpg_sess_list)) {
  1795. spin_unlock_bh(&se_tpg->session_lock);
  1796. return -EBUSY;
  1797. }
  1798. spin_unlock_bh(&se_tpg->session_lock);
  1799. }
  1800. tport->enable = val;
  1801. ret = sbp_update_unit_directory(tport);
  1802. if (ret < 0) {
  1803. pr_err("Could not update Config ROM\n");
  1804. return ret;
  1805. }
  1806. return count;
  1807. }
  1808. TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR);
  1809. TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR);
  1810. static struct configfs_attribute *sbp_tpg_base_attrs[] = {
  1811. &sbp_tpg_directory_id.attr,
  1812. &sbp_tpg_enable.attr,
  1813. NULL,
  1814. };
  1815. static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout(
  1816. struct se_portal_group *se_tpg,
  1817. char *page)
  1818. {
  1819. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1820. struct sbp_tport *tport = tpg->tport;
  1821. return sprintf(page, "%d\n", tport->mgt_orb_timeout);
  1822. }
  1823. static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
  1824. struct se_portal_group *se_tpg,
  1825. const char *page,
  1826. size_t count)
  1827. {
  1828. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1829. struct sbp_tport *tport = tpg->tport;
  1830. unsigned long val;
  1831. int ret;
  1832. if (kstrtoul(page, 0, &val) < 0)
  1833. return -EINVAL;
  1834. if ((val < 1) || (val > 127))
  1835. return -EINVAL;
  1836. if (tport->mgt_orb_timeout == val)
  1837. return count;
  1838. tport->mgt_orb_timeout = val;
  1839. ret = sbp_update_unit_directory(tport);
  1840. if (ret < 0)
  1841. return ret;
  1842. return count;
  1843. }
  1844. static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout(
  1845. struct se_portal_group *se_tpg,
  1846. char *page)
  1847. {
  1848. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1849. struct sbp_tport *tport = tpg->tport;
  1850. return sprintf(page, "%d\n", tport->max_reconnect_timeout);
  1851. }
  1852. static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
  1853. struct se_portal_group *se_tpg,
  1854. const char *page,
  1855. size_t count)
  1856. {
  1857. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1858. struct sbp_tport *tport = tpg->tport;
  1859. unsigned long val;
  1860. int ret;
  1861. if (kstrtoul(page, 0, &val) < 0)
  1862. return -EINVAL;
  1863. if ((val < 1) || (val > 32767))
  1864. return -EINVAL;
  1865. if (tport->max_reconnect_timeout == val)
  1866. return count;
  1867. tport->max_reconnect_timeout = val;
  1868. ret = sbp_update_unit_directory(tport);
  1869. if (ret < 0)
  1870. return ret;
  1871. return count;
  1872. }
  1873. static ssize_t sbp_tpg_attrib_show_max_logins_per_lun(
  1874. struct se_portal_group *se_tpg,
  1875. char *page)
  1876. {
  1877. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1878. struct sbp_tport *tport = tpg->tport;
  1879. return sprintf(page, "%d\n", tport->max_logins_per_lun);
  1880. }
  1881. static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
  1882. struct se_portal_group *se_tpg,
  1883. const char *page,
  1884. size_t count)
  1885. {
  1886. struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
  1887. struct sbp_tport *tport = tpg->tport;
  1888. unsigned long val;
  1889. if (kstrtoul(page, 0, &val) < 0)
  1890. return -EINVAL;
  1891. if ((val < 1) || (val > 127))
  1892. return -EINVAL;
  1893. /* XXX: also check against current count? */
  1894. tport->max_logins_per_lun = val;
  1895. return count;
  1896. }
  1897. TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR);
  1898. TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR);
  1899. TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR);
  1900. static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
  1901. &sbp_tpg_attrib_mgt_orb_timeout.attr,
  1902. &sbp_tpg_attrib_max_reconnect_timeout.attr,
  1903. &sbp_tpg_attrib_max_logins_per_lun.attr,
  1904. NULL,
  1905. };
  1906. static const struct target_core_fabric_ops sbp_ops = {
  1907. .module = THIS_MODULE,
  1908. .name = "sbp",
  1909. .get_fabric_name = sbp_get_fabric_name,
  1910. .tpg_get_wwn = sbp_get_fabric_wwn,
  1911. .tpg_get_tag = sbp_get_tag,
  1912. .tpg_check_demo_mode = sbp_check_true,
  1913. .tpg_check_demo_mode_cache = sbp_check_true,
  1914. .tpg_check_demo_mode_write_protect = sbp_check_false,
  1915. .tpg_check_prod_mode_write_protect = sbp_check_false,
  1916. .tpg_get_inst_index = sbp_tpg_get_inst_index,
  1917. .release_cmd = sbp_release_cmd,
  1918. .shutdown_session = sbp_shutdown_session,
  1919. .close_session = sbp_close_session,
  1920. .sess_get_index = sbp_sess_get_index,
  1921. .write_pending = sbp_write_pending,
  1922. .write_pending_status = sbp_write_pending_status,
  1923. .set_default_node_attributes = sbp_set_default_node_attrs,
  1924. .get_cmd_state = sbp_get_cmd_state,
  1925. .queue_data_in = sbp_queue_data_in,
  1926. .queue_status = sbp_queue_status,
  1927. .queue_tm_rsp = sbp_queue_tm_rsp,
  1928. .aborted_task = sbp_aborted_task,
  1929. .check_stop_free = sbp_check_stop_free,
  1930. .fabric_make_wwn = sbp_make_tport,
  1931. .fabric_drop_wwn = sbp_drop_tport,
  1932. .fabric_make_tpg = sbp_make_tpg,
  1933. .fabric_drop_tpg = sbp_drop_tpg,
  1934. .fabric_post_link = sbp_post_link_lun,
  1935. .fabric_pre_unlink = sbp_pre_unlink_lun,
  1936. .fabric_make_np = NULL,
  1937. .fabric_drop_np = NULL,
  1938. .fabric_init_nodeacl = sbp_init_nodeacl,
  1939. .tfc_wwn_attrs = sbp_wwn_attrs,
  1940. .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
  1941. .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
  1942. };
  1943. static int __init sbp_init(void)
  1944. {
  1945. return target_register_template(&sbp_ops);
  1946. };
  1947. static void __exit sbp_exit(void)
  1948. {
  1949. target_unregister_template(&sbp_ops);
  1950. };
  1951. MODULE_DESCRIPTION("FireWire SBP fabric driver");
  1952. MODULE_LICENSE("GPL");
  1953. module_init(sbp_init);
  1954. module_exit(sbp_exit);