ipmi_si_intf.c 63 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * ipmi_si.c
  4. *
  5. * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
  6. * BT).
  7. *
  8. * Author: MontaVista Software, Inc.
  9. * Corey Minyard <minyard@mvista.com>
  10. * source@mvista.com
  11. *
  12. * Copyright 2002 MontaVista Software Inc.
  13. * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
  14. */
  15. /*
  16. * This file holds the "policy" for the interface to the SMI state
  17. * machine. It does the configuration, handles timers and interrupts,
  18. * and drives the real SMI state machine.
  19. */
  20. #include <linux/module.h>
  21. #include <linux/moduleparam.h>
  22. #include <linux/sched.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/timer.h>
  25. #include <linux/errno.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/slab.h>
  28. #include <linux/delay.h>
  29. #include <linux/list.h>
  30. #include <linux/notifier.h>
  31. #include <linux/mutex.h>
  32. #include <linux/kthread.h>
  33. #include <asm/irq.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/rcupdate.h>
  36. #include <linux/ipmi.h>
  37. #include <linux/ipmi_smi.h>
  38. #include "ipmi_si.h"
  39. #include <linux/string.h>
  40. #include <linux/ctype.h>
  41. #define PFX "ipmi_si: "
  42. /* Measure times between events in the driver. */
  43. #undef DEBUG_TIMING
  44. /* Call every 10 ms. */
  45. #define SI_TIMEOUT_TIME_USEC 10000
  46. #define SI_USEC_PER_JIFFY (1000000/HZ)
  47. #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
  48. #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
  49. short timeout */
  50. enum si_intf_state {
  51. SI_NORMAL,
  52. SI_GETTING_FLAGS,
  53. SI_GETTING_EVENTS,
  54. SI_CLEARING_FLAGS,
  55. SI_GETTING_MESSAGES,
  56. SI_CHECKING_ENABLES,
  57. SI_SETTING_ENABLES
  58. /* FIXME - add watchdog stuff. */
  59. };
  60. /* Some BT-specific defines we need here. */
  61. #define IPMI_BT_INTMASK_REG 2
  62. #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
  63. #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
  64. static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
  65. static int initialized;
  66. /*
  67. * Indexes into stats[] in smi_info below.
  68. */
  69. enum si_stat_indexes {
  70. /*
  71. * Number of times the driver requested a timer while an operation
  72. * was in progress.
  73. */
  74. SI_STAT_short_timeouts = 0,
  75. /*
  76. * Number of times the driver requested a timer while nothing was in
  77. * progress.
  78. */
  79. SI_STAT_long_timeouts,
  80. /* Number of times the interface was idle while being polled. */
  81. SI_STAT_idles,
  82. /* Number of interrupts the driver handled. */
  83. SI_STAT_interrupts,
  84. /* Number of time the driver got an ATTN from the hardware. */
  85. SI_STAT_attentions,
  86. /* Number of times the driver requested flags from the hardware. */
  87. SI_STAT_flag_fetches,
  88. /* Number of times the hardware didn't follow the state machine. */
  89. SI_STAT_hosed_count,
  90. /* Number of completed messages. */
  91. SI_STAT_complete_transactions,
  92. /* Number of IPMI events received from the hardware. */
  93. SI_STAT_events,
  94. /* Number of watchdog pretimeouts. */
  95. SI_STAT_watchdog_pretimeouts,
  96. /* Number of asynchronous messages received. */
  97. SI_STAT_incoming_messages,
  98. /* This *must* remain last, add new values above this. */
  99. SI_NUM_STATS
  100. };
  101. struct smi_info {
  102. int intf_num;
  103. ipmi_smi_t intf;
  104. struct si_sm_data *si_sm;
  105. const struct si_sm_handlers *handlers;
  106. spinlock_t si_lock;
  107. struct ipmi_smi_msg *waiting_msg;
  108. struct ipmi_smi_msg *curr_msg;
  109. enum si_intf_state si_state;
  110. /*
  111. * Used to handle the various types of I/O that can occur with
  112. * IPMI
  113. */
  114. struct si_sm_io io;
  115. /*
  116. * Per-OEM handler, called from handle_flags(). Returns 1
  117. * when handle_flags() needs to be re-run or 0 indicating it
  118. * set si_state itself.
  119. */
  120. int (*oem_data_avail_handler)(struct smi_info *smi_info);
  121. /*
  122. * Flags from the last GET_MSG_FLAGS command, used when an ATTN
  123. * is set to hold the flags until we are done handling everything
  124. * from the flags.
  125. */
  126. #define RECEIVE_MSG_AVAIL 0x01
  127. #define EVENT_MSG_BUFFER_FULL 0x02
  128. #define WDT_PRE_TIMEOUT_INT 0x08
  129. #define OEM0_DATA_AVAIL 0x20
  130. #define OEM1_DATA_AVAIL 0x40
  131. #define OEM2_DATA_AVAIL 0x80
  132. #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
  133. OEM1_DATA_AVAIL | \
  134. OEM2_DATA_AVAIL)
  135. unsigned char msg_flags;
  136. /* Does the BMC have an event buffer? */
  137. bool has_event_buffer;
  138. /*
  139. * If set to true, this will request events the next time the
  140. * state machine is idle.
  141. */
  142. atomic_t req_events;
  143. /*
  144. * If true, run the state machine to completion on every send
  145. * call. Generally used after a panic to make sure stuff goes
  146. * out.
  147. */
  148. bool run_to_completion;
  149. /* The timer for this si. */
  150. struct timer_list si_timer;
  151. /* This flag is set, if the timer can be set */
  152. bool timer_can_start;
  153. /* This flag is set, if the timer is running (timer_pending() isn't enough) */
  154. bool timer_running;
  155. /* The time (in jiffies) the last timeout occurred at. */
  156. unsigned long last_timeout_jiffies;
  157. /* Are we waiting for the events, pretimeouts, received msgs? */
  158. atomic_t need_watch;
  159. /*
  160. * The driver will disable interrupts when it gets into a
  161. * situation where it cannot handle messages due to lack of
  162. * memory. Once that situation clears up, it will re-enable
  163. * interrupts.
  164. */
  165. bool interrupt_disabled;
  166. /*
  167. * Does the BMC support events?
  168. */
  169. bool supports_event_msg_buff;
  170. /*
  171. * Can we disable interrupts the global enables receive irq
  172. * bit? There are currently two forms of brokenness, some
  173. * systems cannot disable the bit (which is technically within
  174. * the spec but a bad idea) and some systems have the bit
  175. * forced to zero even though interrupts work (which is
  176. * clearly outside the spec). The next bool tells which form
  177. * of brokenness is present.
  178. */
  179. bool cannot_disable_irq;
  180. /*
  181. * Some systems are broken and cannot set the irq enable
  182. * bit, even if they support interrupts.
  183. */
  184. bool irq_enable_broken;
  185. /*
  186. * Did we get an attention that we did not handle?
  187. */
  188. bool got_attn;
  189. /* From the get device id response... */
  190. struct ipmi_device_id device_id;
  191. /* Default driver model device. */
  192. struct platform_device *pdev;
  193. /* Have we added the device group to the device? */
  194. bool dev_group_added;
  195. /* Have we added the platform device? */
  196. bool pdev_registered;
  197. /* Counters and things for the proc filesystem. */
  198. atomic_t stats[SI_NUM_STATS];
  199. struct task_struct *thread;
  200. struct list_head link;
  201. };
  202. #define smi_inc_stat(smi, stat) \
  203. atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
  204. #define smi_get_stat(smi, stat) \
  205. ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
  206. #define IPMI_MAX_INTFS 4
  207. static int force_kipmid[IPMI_MAX_INTFS];
  208. static int num_force_kipmid;
  209. static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
  210. static int num_max_busy_us;
  211. static bool unload_when_empty = true;
  212. static int try_smi_init(struct smi_info *smi);
  213. static void shutdown_one_si(struct smi_info *smi_info);
  214. static void cleanup_one_si(struct smi_info *smi_info);
  215. static void cleanup_ipmi_si(void);
  216. #ifdef DEBUG_TIMING
  217. void debug_timestamp(char *msg)
  218. {
  219. struct timespec64 t;
  220. getnstimeofday64(&t);
  221. pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
  222. }
  223. #else
  224. #define debug_timestamp(x)
  225. #endif
  226. static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
  227. static int register_xaction_notifier(struct notifier_block *nb)
  228. {
  229. return atomic_notifier_chain_register(&xaction_notifier_list, nb);
  230. }
  231. static void deliver_recv_msg(struct smi_info *smi_info,
  232. struct ipmi_smi_msg *msg)
  233. {
  234. /* Deliver the message to the upper layer. */
  235. if (smi_info->intf)
  236. ipmi_smi_msg_received(smi_info->intf, msg);
  237. else
  238. ipmi_free_smi_msg(msg);
  239. }
  240. static void return_hosed_msg(struct smi_info *smi_info, int cCode)
  241. {
  242. struct ipmi_smi_msg *msg = smi_info->curr_msg;
  243. if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
  244. cCode = IPMI_ERR_UNSPECIFIED;
  245. /* else use it as is */
  246. /* Make it a response */
  247. msg->rsp[0] = msg->data[0] | 4;
  248. msg->rsp[1] = msg->data[1];
  249. msg->rsp[2] = cCode;
  250. msg->rsp_size = 3;
  251. smi_info->curr_msg = NULL;
  252. deliver_recv_msg(smi_info, msg);
  253. }
  254. static enum si_sm_result start_next_msg(struct smi_info *smi_info)
  255. {
  256. int rv;
  257. if (!smi_info->waiting_msg) {
  258. smi_info->curr_msg = NULL;
  259. rv = SI_SM_IDLE;
  260. } else {
  261. int err;
  262. smi_info->curr_msg = smi_info->waiting_msg;
  263. smi_info->waiting_msg = NULL;
  264. debug_timestamp("Start2");
  265. err = atomic_notifier_call_chain(&xaction_notifier_list,
  266. 0, smi_info);
  267. if (err & NOTIFY_STOP_MASK) {
  268. rv = SI_SM_CALL_WITHOUT_DELAY;
  269. goto out;
  270. }
  271. err = smi_info->handlers->start_transaction(
  272. smi_info->si_sm,
  273. smi_info->curr_msg->data,
  274. smi_info->curr_msg->data_size);
  275. if (err)
  276. return_hosed_msg(smi_info, err);
  277. rv = SI_SM_CALL_WITHOUT_DELAY;
  278. }
  279. out:
  280. return rv;
  281. }
  282. static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
  283. {
  284. if (!smi_info->timer_can_start)
  285. return;
  286. smi_info->last_timeout_jiffies = jiffies;
  287. mod_timer(&smi_info->si_timer, new_val);
  288. smi_info->timer_running = true;
  289. }
  290. /*
  291. * Start a new message and (re)start the timer and thread.
  292. */
  293. static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
  294. unsigned int size)
  295. {
  296. smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
  297. if (smi_info->thread)
  298. wake_up_process(smi_info->thread);
  299. smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
  300. }
  301. static void start_check_enables(struct smi_info *smi_info)
  302. {
  303. unsigned char msg[2];
  304. msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
  305. msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
  306. start_new_msg(smi_info, msg, 2);
  307. smi_info->si_state = SI_CHECKING_ENABLES;
  308. }
  309. static void start_clear_flags(struct smi_info *smi_info)
  310. {
  311. unsigned char msg[3];
  312. /* Make sure the watchdog pre-timeout flag is not set at startup. */
  313. msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
  314. msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
  315. msg[2] = WDT_PRE_TIMEOUT_INT;
  316. start_new_msg(smi_info, msg, 3);
  317. smi_info->si_state = SI_CLEARING_FLAGS;
  318. }
  319. static void start_getting_msg_queue(struct smi_info *smi_info)
  320. {
  321. smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
  322. smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
  323. smi_info->curr_msg->data_size = 2;
  324. start_new_msg(smi_info, smi_info->curr_msg->data,
  325. smi_info->curr_msg->data_size);
  326. smi_info->si_state = SI_GETTING_MESSAGES;
  327. }
  328. static void start_getting_events(struct smi_info *smi_info)
  329. {
  330. smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
  331. smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
  332. smi_info->curr_msg->data_size = 2;
  333. start_new_msg(smi_info, smi_info->curr_msg->data,
  334. smi_info->curr_msg->data_size);
  335. smi_info->si_state = SI_GETTING_EVENTS;
  336. }
  337. /*
  338. * When we have a situtaion where we run out of memory and cannot
  339. * allocate messages, we just leave them in the BMC and run the system
  340. * polled until we can allocate some memory. Once we have some
  341. * memory, we will re-enable the interrupt.
  342. *
  343. * Note that we cannot just use disable_irq(), since the interrupt may
  344. * be shared.
  345. */
  346. static inline bool disable_si_irq(struct smi_info *smi_info)
  347. {
  348. if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
  349. smi_info->interrupt_disabled = true;
  350. start_check_enables(smi_info);
  351. return true;
  352. }
  353. return false;
  354. }
  355. static inline bool enable_si_irq(struct smi_info *smi_info)
  356. {
  357. if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
  358. smi_info->interrupt_disabled = false;
  359. start_check_enables(smi_info);
  360. return true;
  361. }
  362. return false;
  363. }
  364. /*
  365. * Allocate a message. If unable to allocate, start the interrupt
  366. * disable process and return NULL. If able to allocate but
  367. * interrupts are disabled, free the message and return NULL after
  368. * starting the interrupt enable process.
  369. */
  370. static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
  371. {
  372. struct ipmi_smi_msg *msg;
  373. msg = ipmi_alloc_smi_msg();
  374. if (!msg) {
  375. if (!disable_si_irq(smi_info))
  376. smi_info->si_state = SI_NORMAL;
  377. } else if (enable_si_irq(smi_info)) {
  378. ipmi_free_smi_msg(msg);
  379. msg = NULL;
  380. }
  381. return msg;
  382. }
  383. static void handle_flags(struct smi_info *smi_info)
  384. {
  385. retry:
  386. if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
  387. /* Watchdog pre-timeout */
  388. smi_inc_stat(smi_info, watchdog_pretimeouts);
  389. start_clear_flags(smi_info);
  390. smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
  391. if (smi_info->intf)
  392. ipmi_smi_watchdog_pretimeout(smi_info->intf);
  393. } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
  394. /* Messages available. */
  395. smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
  396. if (!smi_info->curr_msg)
  397. return;
  398. start_getting_msg_queue(smi_info);
  399. } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
  400. /* Events available. */
  401. smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
  402. if (!smi_info->curr_msg)
  403. return;
  404. start_getting_events(smi_info);
  405. } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
  406. smi_info->oem_data_avail_handler) {
  407. if (smi_info->oem_data_avail_handler(smi_info))
  408. goto retry;
  409. } else
  410. smi_info->si_state = SI_NORMAL;
  411. }
  412. /*
  413. * Global enables we care about.
  414. */
  415. #define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
  416. IPMI_BMC_EVT_MSG_INTR)
  417. static u8 current_global_enables(struct smi_info *smi_info, u8 base,
  418. bool *irq_on)
  419. {
  420. u8 enables = 0;
  421. if (smi_info->supports_event_msg_buff)
  422. enables |= IPMI_BMC_EVT_MSG_BUFF;
  423. if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
  424. smi_info->cannot_disable_irq) &&
  425. !smi_info->irq_enable_broken)
  426. enables |= IPMI_BMC_RCV_MSG_INTR;
  427. if (smi_info->supports_event_msg_buff &&
  428. smi_info->io.irq && !smi_info->interrupt_disabled &&
  429. !smi_info->irq_enable_broken)
  430. enables |= IPMI_BMC_EVT_MSG_INTR;
  431. *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
  432. return enables;
  433. }
  434. static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
  435. {
  436. u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
  437. irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
  438. if ((bool)irqstate == irq_on)
  439. return;
  440. if (irq_on)
  441. smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
  442. IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
  443. else
  444. smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
  445. }
  446. static void handle_transaction_done(struct smi_info *smi_info)
  447. {
  448. struct ipmi_smi_msg *msg;
  449. debug_timestamp("Done");
  450. switch (smi_info->si_state) {
  451. case SI_NORMAL:
  452. if (!smi_info->curr_msg)
  453. break;
  454. smi_info->curr_msg->rsp_size
  455. = smi_info->handlers->get_result(
  456. smi_info->si_sm,
  457. smi_info->curr_msg->rsp,
  458. IPMI_MAX_MSG_LENGTH);
  459. /*
  460. * Do this here becase deliver_recv_msg() releases the
  461. * lock, and a new message can be put in during the
  462. * time the lock is released.
  463. */
  464. msg = smi_info->curr_msg;
  465. smi_info->curr_msg = NULL;
  466. deliver_recv_msg(smi_info, msg);
  467. break;
  468. case SI_GETTING_FLAGS:
  469. {
  470. unsigned char msg[4];
  471. unsigned int len;
  472. /* We got the flags from the SMI, now handle them. */
  473. len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
  474. if (msg[2] != 0) {
  475. /* Error fetching flags, just give up for now. */
  476. smi_info->si_state = SI_NORMAL;
  477. } else if (len < 4) {
  478. /*
  479. * Hmm, no flags. That's technically illegal, but
  480. * don't use uninitialized data.
  481. */
  482. smi_info->si_state = SI_NORMAL;
  483. } else {
  484. smi_info->msg_flags = msg[3];
  485. handle_flags(smi_info);
  486. }
  487. break;
  488. }
  489. case SI_CLEARING_FLAGS:
  490. {
  491. unsigned char msg[3];
  492. /* We cleared the flags. */
  493. smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
  494. if (msg[2] != 0) {
  495. /* Error clearing flags */
  496. dev_warn(smi_info->io.dev,
  497. "Error clearing flags: %2.2x\n", msg[2]);
  498. }
  499. smi_info->si_state = SI_NORMAL;
  500. break;
  501. }
  502. case SI_GETTING_EVENTS:
  503. {
  504. smi_info->curr_msg->rsp_size
  505. = smi_info->handlers->get_result(
  506. smi_info->si_sm,
  507. smi_info->curr_msg->rsp,
  508. IPMI_MAX_MSG_LENGTH);
  509. /*
  510. * Do this here becase deliver_recv_msg() releases the
  511. * lock, and a new message can be put in during the
  512. * time the lock is released.
  513. */
  514. msg = smi_info->curr_msg;
  515. smi_info->curr_msg = NULL;
  516. if (msg->rsp[2] != 0) {
  517. /* Error getting event, probably done. */
  518. msg->done(msg);
  519. /* Take off the event flag. */
  520. smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
  521. handle_flags(smi_info);
  522. } else {
  523. smi_inc_stat(smi_info, events);
  524. /*
  525. * Do this before we deliver the message
  526. * because delivering the message releases the
  527. * lock and something else can mess with the
  528. * state.
  529. */
  530. handle_flags(smi_info);
  531. deliver_recv_msg(smi_info, msg);
  532. }
  533. break;
  534. }
  535. case SI_GETTING_MESSAGES:
  536. {
  537. smi_info->curr_msg->rsp_size
  538. = smi_info->handlers->get_result(
  539. smi_info->si_sm,
  540. smi_info->curr_msg->rsp,
  541. IPMI_MAX_MSG_LENGTH);
  542. /*
  543. * Do this here becase deliver_recv_msg() releases the
  544. * lock, and a new message can be put in during the
  545. * time the lock is released.
  546. */
  547. msg = smi_info->curr_msg;
  548. smi_info->curr_msg = NULL;
  549. if (msg->rsp[2] != 0) {
  550. /* Error getting event, probably done. */
  551. msg->done(msg);
  552. /* Take off the msg flag. */
  553. smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
  554. handle_flags(smi_info);
  555. } else {
  556. smi_inc_stat(smi_info, incoming_messages);
  557. /*
  558. * Do this before we deliver the message
  559. * because delivering the message releases the
  560. * lock and something else can mess with the
  561. * state.
  562. */
  563. handle_flags(smi_info);
  564. deliver_recv_msg(smi_info, msg);
  565. }
  566. break;
  567. }
  568. case SI_CHECKING_ENABLES:
  569. {
  570. unsigned char msg[4];
  571. u8 enables;
  572. bool irq_on;
  573. /* We got the flags from the SMI, now handle them. */
  574. smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
  575. if (msg[2] != 0) {
  576. dev_warn(smi_info->io.dev,
  577. "Couldn't get irq info: %x.\n", msg[2]);
  578. dev_warn(smi_info->io.dev,
  579. "Maybe ok, but ipmi might run very slowly.\n");
  580. smi_info->si_state = SI_NORMAL;
  581. break;
  582. }
  583. enables = current_global_enables(smi_info, 0, &irq_on);
  584. if (smi_info->io.si_type == SI_BT)
  585. /* BT has its own interrupt enable bit. */
  586. check_bt_irq(smi_info, irq_on);
  587. if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
  588. /* Enables are not correct, fix them. */
  589. msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
  590. msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
  591. msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
  592. smi_info->handlers->start_transaction(
  593. smi_info->si_sm, msg, 3);
  594. smi_info->si_state = SI_SETTING_ENABLES;
  595. } else if (smi_info->supports_event_msg_buff) {
  596. smi_info->curr_msg = ipmi_alloc_smi_msg();
  597. if (!smi_info->curr_msg) {
  598. smi_info->si_state = SI_NORMAL;
  599. break;
  600. }
  601. start_getting_events(smi_info);
  602. } else {
  603. smi_info->si_state = SI_NORMAL;
  604. }
  605. break;
  606. }
  607. case SI_SETTING_ENABLES:
  608. {
  609. unsigned char msg[4];
  610. smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
  611. if (msg[2] != 0)
  612. dev_warn(smi_info->io.dev,
  613. "Could not set the global enables: 0x%x.\n",
  614. msg[2]);
  615. if (smi_info->supports_event_msg_buff) {
  616. smi_info->curr_msg = ipmi_alloc_smi_msg();
  617. if (!smi_info->curr_msg) {
  618. smi_info->si_state = SI_NORMAL;
  619. break;
  620. }
  621. start_getting_events(smi_info);
  622. } else {
  623. smi_info->si_state = SI_NORMAL;
  624. }
  625. break;
  626. }
  627. }
  628. }
  629. /*
  630. * Called on timeouts and events. Timeouts should pass the elapsed
  631. * time, interrupts should pass in zero. Must be called with
  632. * si_lock held and interrupts disabled.
  633. */
  634. static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
  635. int time)
  636. {
  637. enum si_sm_result si_sm_result;
  638. restart:
  639. /*
  640. * There used to be a loop here that waited a little while
  641. * (around 25us) before giving up. That turned out to be
  642. * pointless, the minimum delays I was seeing were in the 300us
  643. * range, which is far too long to wait in an interrupt. So
  644. * we just run until the state machine tells us something
  645. * happened or it needs a delay.
  646. */
  647. si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
  648. time = 0;
  649. while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
  650. si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
  651. if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
  652. smi_inc_stat(smi_info, complete_transactions);
  653. handle_transaction_done(smi_info);
  654. goto restart;
  655. } else if (si_sm_result == SI_SM_HOSED) {
  656. smi_inc_stat(smi_info, hosed_count);
  657. /*
  658. * Do the before return_hosed_msg, because that
  659. * releases the lock.
  660. */
  661. smi_info->si_state = SI_NORMAL;
  662. if (smi_info->curr_msg != NULL) {
  663. /*
  664. * If we were handling a user message, format
  665. * a response to send to the upper layer to
  666. * tell it about the error.
  667. */
  668. return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
  669. }
  670. goto restart;
  671. }
  672. /*
  673. * We prefer handling attn over new messages. But don't do
  674. * this if there is not yet an upper layer to handle anything.
  675. */
  676. if (likely(smi_info->intf) &&
  677. (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) {
  678. unsigned char msg[2];
  679. if (smi_info->si_state != SI_NORMAL) {
  680. /*
  681. * We got an ATTN, but we are doing something else.
  682. * Handle the ATTN later.
  683. */
  684. smi_info->got_attn = true;
  685. } else {
  686. smi_info->got_attn = false;
  687. smi_inc_stat(smi_info, attentions);
  688. /*
  689. * Got a attn, send down a get message flags to see
  690. * what's causing it. It would be better to handle
  691. * this in the upper layer, but due to the way
  692. * interrupts work with the SMI, that's not really
  693. * possible.
  694. */
  695. msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
  696. msg[1] = IPMI_GET_MSG_FLAGS_CMD;
  697. start_new_msg(smi_info, msg, 2);
  698. smi_info->si_state = SI_GETTING_FLAGS;
  699. goto restart;
  700. }
  701. }
  702. /* If we are currently idle, try to start the next message. */
  703. if (si_sm_result == SI_SM_IDLE) {
  704. smi_inc_stat(smi_info, idles);
  705. si_sm_result = start_next_msg(smi_info);
  706. if (si_sm_result != SI_SM_IDLE)
  707. goto restart;
  708. }
  709. if ((si_sm_result == SI_SM_IDLE)
  710. && (atomic_read(&smi_info->req_events))) {
  711. /*
  712. * We are idle and the upper layer requested that I fetch
  713. * events, so do so.
  714. */
  715. atomic_set(&smi_info->req_events, 0);
  716. /*
  717. * Take this opportunity to check the interrupt and
  718. * message enable state for the BMC. The BMC can be
  719. * asynchronously reset, and may thus get interrupts
  720. * disable and messages disabled.
  721. */
  722. if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
  723. start_check_enables(smi_info);
  724. } else {
  725. smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
  726. if (!smi_info->curr_msg)
  727. goto out;
  728. start_getting_events(smi_info);
  729. }
  730. goto restart;
  731. }
  732. if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
  733. /* Ok it if fails, the timer will just go off. */
  734. if (del_timer(&smi_info->si_timer))
  735. smi_info->timer_running = false;
  736. }
  737. out:
  738. return si_sm_result;
  739. }
  740. static void check_start_timer_thread(struct smi_info *smi_info)
  741. {
  742. if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
  743. smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
  744. if (smi_info->thread)
  745. wake_up_process(smi_info->thread);
  746. start_next_msg(smi_info);
  747. smi_event_handler(smi_info, 0);
  748. }
  749. }
  750. static void flush_messages(void *send_info)
  751. {
  752. struct smi_info *smi_info = send_info;
  753. enum si_sm_result result;
  754. /*
  755. * Currently, this function is called only in run-to-completion
  756. * mode. This means we are single-threaded, no need for locks.
  757. */
  758. result = smi_event_handler(smi_info, 0);
  759. while (result != SI_SM_IDLE) {
  760. udelay(SI_SHORT_TIMEOUT_USEC);
  761. result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
  762. }
  763. }
  764. static void sender(void *send_info,
  765. struct ipmi_smi_msg *msg)
  766. {
  767. struct smi_info *smi_info = send_info;
  768. unsigned long flags;
  769. debug_timestamp("Enqueue");
  770. if (smi_info->run_to_completion) {
  771. /*
  772. * If we are running to completion, start it. Upper
  773. * layer will call flush_messages to clear it out.
  774. */
  775. smi_info->waiting_msg = msg;
  776. return;
  777. }
  778. spin_lock_irqsave(&smi_info->si_lock, flags);
  779. /*
  780. * The following two lines don't need to be under the lock for
  781. * the lock's sake, but they do need SMP memory barriers to
  782. * avoid getting things out of order. We are already claiming
  783. * the lock, anyway, so just do it under the lock to avoid the
  784. * ordering problem.
  785. */
  786. BUG_ON(smi_info->waiting_msg);
  787. smi_info->waiting_msg = msg;
  788. check_start_timer_thread(smi_info);
  789. spin_unlock_irqrestore(&smi_info->si_lock, flags);
  790. }
  791. static void set_run_to_completion(void *send_info, bool i_run_to_completion)
  792. {
  793. struct smi_info *smi_info = send_info;
  794. smi_info->run_to_completion = i_run_to_completion;
  795. if (i_run_to_completion)
  796. flush_messages(smi_info);
  797. }
  798. /*
  799. * Use -1 in the nsec value of the busy waiting timespec to tell that
  800. * we are spinning in kipmid looking for something and not delaying
  801. * between checks
  802. */
  803. static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
  804. {
  805. ts->tv_nsec = -1;
  806. }
  807. static inline int ipmi_si_is_busy(struct timespec64 *ts)
  808. {
  809. return ts->tv_nsec != -1;
  810. }
  811. static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
  812. const struct smi_info *smi_info,
  813. struct timespec64 *busy_until)
  814. {
  815. unsigned int max_busy_us = 0;
  816. if (smi_info->intf_num < num_max_busy_us)
  817. max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
  818. if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
  819. ipmi_si_set_not_busy(busy_until);
  820. else if (!ipmi_si_is_busy(busy_until)) {
  821. getnstimeofday64(busy_until);
  822. timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
  823. } else {
  824. struct timespec64 now;
  825. getnstimeofday64(&now);
  826. if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
  827. ipmi_si_set_not_busy(busy_until);
  828. return 0;
  829. }
  830. }
  831. return 1;
  832. }
  833. /*
  834. * A busy-waiting loop for speeding up IPMI operation.
  835. *
  836. * Lousy hardware makes this hard. This is only enabled for systems
  837. * that are not BT and do not have interrupts. It starts spinning
  838. * when an operation is complete or until max_busy tells it to stop
  839. * (if that is enabled). See the paragraph on kimid_max_busy_us in
  840. * Documentation/IPMI.txt for details.
  841. */
  842. static int ipmi_thread(void *data)
  843. {
  844. struct smi_info *smi_info = data;
  845. unsigned long flags;
  846. enum si_sm_result smi_result;
  847. struct timespec64 busy_until;
  848. ipmi_si_set_not_busy(&busy_until);
  849. set_user_nice(current, MAX_NICE);
  850. while (!kthread_should_stop()) {
  851. int busy_wait;
  852. spin_lock_irqsave(&(smi_info->si_lock), flags);
  853. smi_result = smi_event_handler(smi_info, 0);
  854. /*
  855. * If the driver is doing something, there is a possible
  856. * race with the timer. If the timer handler see idle,
  857. * and the thread here sees something else, the timer
  858. * handler won't restart the timer even though it is
  859. * required. So start it here if necessary.
  860. */
  861. if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
  862. smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
  863. spin_unlock_irqrestore(&(smi_info->si_lock), flags);
  864. busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
  865. &busy_until);
  866. if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
  867. ; /* do nothing */
  868. else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
  869. schedule();
  870. else if (smi_result == SI_SM_IDLE) {
  871. if (atomic_read(&smi_info->need_watch)) {
  872. schedule_timeout_interruptible(100);
  873. } else {
  874. /* Wait to be woken up when we are needed. */
  875. __set_current_state(TASK_INTERRUPTIBLE);
  876. schedule();
  877. }
  878. } else
  879. schedule_timeout_interruptible(1);
  880. }
  881. return 0;
  882. }
  883. static void poll(void *send_info)
  884. {
  885. struct smi_info *smi_info = send_info;
  886. unsigned long flags = 0;
  887. bool run_to_completion = smi_info->run_to_completion;
  888. /*
  889. * Make sure there is some delay in the poll loop so we can
  890. * drive time forward and timeout things.
  891. */
  892. udelay(10);
  893. if (!run_to_completion)
  894. spin_lock_irqsave(&smi_info->si_lock, flags);
  895. smi_event_handler(smi_info, 10);
  896. if (!run_to_completion)
  897. spin_unlock_irqrestore(&smi_info->si_lock, flags);
  898. }
  899. static void request_events(void *send_info)
  900. {
  901. struct smi_info *smi_info = send_info;
  902. if (!smi_info->has_event_buffer)
  903. return;
  904. atomic_set(&smi_info->req_events, 1);
  905. }
  906. static void set_need_watch(void *send_info, bool enable)
  907. {
  908. struct smi_info *smi_info = send_info;
  909. unsigned long flags;
  910. atomic_set(&smi_info->need_watch, enable);
  911. spin_lock_irqsave(&smi_info->si_lock, flags);
  912. check_start_timer_thread(smi_info);
  913. spin_unlock_irqrestore(&smi_info->si_lock, flags);
  914. }
  915. static void smi_timeout(struct timer_list *t)
  916. {
  917. struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
  918. enum si_sm_result smi_result;
  919. unsigned long flags;
  920. unsigned long jiffies_now;
  921. long time_diff;
  922. long timeout;
  923. spin_lock_irqsave(&(smi_info->si_lock), flags);
  924. debug_timestamp("Timer");
  925. jiffies_now = jiffies;
  926. time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
  927. * SI_USEC_PER_JIFFY);
  928. smi_result = smi_event_handler(smi_info, time_diff);
  929. if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
  930. /* Running with interrupts, only do long timeouts. */
  931. timeout = jiffies + SI_TIMEOUT_JIFFIES;
  932. smi_inc_stat(smi_info, long_timeouts);
  933. goto do_mod_timer;
  934. }
  935. /*
  936. * If the state machine asks for a short delay, then shorten
  937. * the timer timeout.
  938. */
  939. if (smi_result == SI_SM_CALL_WITH_DELAY) {
  940. smi_inc_stat(smi_info, short_timeouts);
  941. timeout = jiffies + 1;
  942. } else {
  943. smi_inc_stat(smi_info, long_timeouts);
  944. timeout = jiffies + SI_TIMEOUT_JIFFIES;
  945. }
  946. do_mod_timer:
  947. if (smi_result != SI_SM_IDLE)
  948. smi_mod_timer(smi_info, timeout);
  949. else
  950. smi_info->timer_running = false;
  951. spin_unlock_irqrestore(&(smi_info->si_lock), flags);
  952. }
  953. irqreturn_t ipmi_si_irq_handler(int irq, void *data)
  954. {
  955. struct smi_info *smi_info = data;
  956. unsigned long flags;
  957. if (smi_info->io.si_type == SI_BT)
  958. /* We need to clear the IRQ flag for the BT interface. */
  959. smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
  960. IPMI_BT_INTMASK_CLEAR_IRQ_BIT
  961. | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
  962. spin_lock_irqsave(&(smi_info->si_lock), flags);
  963. smi_inc_stat(smi_info, interrupts);
  964. debug_timestamp("Interrupt");
  965. smi_event_handler(smi_info, 0);
  966. spin_unlock_irqrestore(&(smi_info->si_lock), flags);
  967. return IRQ_HANDLED;
  968. }
  969. static int smi_start_processing(void *send_info,
  970. ipmi_smi_t intf)
  971. {
  972. struct smi_info *new_smi = send_info;
  973. int enable = 0;
  974. new_smi->intf = intf;
  975. /* Set up the timer that drives the interface. */
  976. timer_setup(&new_smi->si_timer, smi_timeout, 0);
  977. new_smi->timer_can_start = true;
  978. smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
  979. /* Try to claim any interrupts. */
  980. if (new_smi->io.irq_setup) {
  981. new_smi->io.irq_handler_data = new_smi;
  982. new_smi->io.irq_setup(&new_smi->io);
  983. }
  984. /*
  985. * Check if the user forcefully enabled the daemon.
  986. */
  987. if (new_smi->intf_num < num_force_kipmid)
  988. enable = force_kipmid[new_smi->intf_num];
  989. /*
  990. * The BT interface is efficient enough to not need a thread,
  991. * and there is no need for a thread if we have interrupts.
  992. */
  993. else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
  994. enable = 1;
  995. if (enable) {
  996. new_smi->thread = kthread_run(ipmi_thread, new_smi,
  997. "kipmi%d", new_smi->intf_num);
  998. if (IS_ERR(new_smi->thread)) {
  999. dev_notice(new_smi->io.dev, "Could not start"
  1000. " kernel thread due to error %ld, only using"
  1001. " timers to drive the interface\n",
  1002. PTR_ERR(new_smi->thread));
  1003. new_smi->thread = NULL;
  1004. }
  1005. }
  1006. return 0;
  1007. }
  1008. static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
  1009. {
  1010. struct smi_info *smi = send_info;
  1011. data->addr_src = smi->io.addr_source;
  1012. data->dev = smi->io.dev;
  1013. data->addr_info = smi->io.addr_info;
  1014. get_device(smi->io.dev);
  1015. return 0;
  1016. }
  1017. static void set_maintenance_mode(void *send_info, bool enable)
  1018. {
  1019. struct smi_info *smi_info = send_info;
  1020. if (!enable)
  1021. atomic_set(&smi_info->req_events, 0);
  1022. }
  1023. static const struct ipmi_smi_handlers handlers = {
  1024. .owner = THIS_MODULE,
  1025. .start_processing = smi_start_processing,
  1026. .get_smi_info = get_smi_info,
  1027. .sender = sender,
  1028. .request_events = request_events,
  1029. .set_need_watch = set_need_watch,
  1030. .set_maintenance_mode = set_maintenance_mode,
  1031. .set_run_to_completion = set_run_to_completion,
  1032. .flush_messages = flush_messages,
  1033. .poll = poll,
  1034. };
  1035. static LIST_HEAD(smi_infos);
  1036. static DEFINE_MUTEX(smi_infos_lock);
  1037. static int smi_num; /* Used to sequence the SMIs */
  1038. static const char * const addr_space_to_str[] = { "i/o", "mem" };
  1039. module_param_array(force_kipmid, int, &num_force_kipmid, 0);
  1040. MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
  1041. " disabled(0). Normally the IPMI driver auto-detects"
  1042. " this, but the value may be overridden by this parm.");
  1043. module_param(unload_when_empty, bool, 0);
  1044. MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
  1045. " specified or found, default is 1. Setting to 0"
  1046. " is useful for hot add of devices using hotmod.");
  1047. module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
  1048. MODULE_PARM_DESC(kipmid_max_busy_us,
  1049. "Max time (in microseconds) to busy-wait for IPMI data before"
  1050. " sleeping. 0 (default) means to wait forever. Set to 100-500"
  1051. " if kipmid is using up a lot of CPU time.");
  1052. void ipmi_irq_finish_setup(struct si_sm_io *io)
  1053. {
  1054. if (io->si_type == SI_BT)
  1055. /* Enable the interrupt in the BT interface. */
  1056. io->outputb(io, IPMI_BT_INTMASK_REG,
  1057. IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
  1058. }
  1059. void ipmi_irq_start_cleanup(struct si_sm_io *io)
  1060. {
  1061. if (io->si_type == SI_BT)
  1062. /* Disable the interrupt in the BT interface. */
  1063. io->outputb(io, IPMI_BT_INTMASK_REG, 0);
  1064. }
  1065. static void std_irq_cleanup(struct si_sm_io *io)
  1066. {
  1067. ipmi_irq_start_cleanup(io);
  1068. free_irq(io->irq, io->irq_handler_data);
  1069. }
  1070. int ipmi_std_irq_setup(struct si_sm_io *io)
  1071. {
  1072. int rv;
  1073. if (!io->irq)
  1074. return 0;
  1075. rv = request_irq(io->irq,
  1076. ipmi_si_irq_handler,
  1077. IRQF_SHARED,
  1078. DEVICE_NAME,
  1079. io->irq_handler_data);
  1080. if (rv) {
  1081. dev_warn(io->dev, "%s unable to claim interrupt %d,"
  1082. " running polled\n",
  1083. DEVICE_NAME, io->irq);
  1084. io->irq = 0;
  1085. } else {
  1086. io->irq_cleanup = std_irq_cleanup;
  1087. ipmi_irq_finish_setup(io);
  1088. dev_info(io->dev, "Using irq %d\n", io->irq);
  1089. }
  1090. return rv;
  1091. }
  1092. static int wait_for_msg_done(struct smi_info *smi_info)
  1093. {
  1094. enum si_sm_result smi_result;
  1095. smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
  1096. for (;;) {
  1097. if (smi_result == SI_SM_CALL_WITH_DELAY ||
  1098. smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
  1099. schedule_timeout_uninterruptible(1);
  1100. smi_result = smi_info->handlers->event(
  1101. smi_info->si_sm, jiffies_to_usecs(1));
  1102. } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
  1103. smi_result = smi_info->handlers->event(
  1104. smi_info->si_sm, 0);
  1105. } else
  1106. break;
  1107. }
  1108. if (smi_result == SI_SM_HOSED)
  1109. /*
  1110. * We couldn't get the state machine to run, so whatever's at
  1111. * the port is probably not an IPMI SMI interface.
  1112. */
  1113. return -ENODEV;
  1114. return 0;
  1115. }
  1116. static int try_get_dev_id(struct smi_info *smi_info)
  1117. {
  1118. unsigned char msg[2];
  1119. unsigned char *resp;
  1120. unsigned long resp_len;
  1121. int rv = 0;
  1122. resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
  1123. if (!resp)
  1124. return -ENOMEM;
  1125. /*
  1126. * Do a Get Device ID command, since it comes back with some
  1127. * useful info.
  1128. */
  1129. msg[0] = IPMI_NETFN_APP_REQUEST << 2;
  1130. msg[1] = IPMI_GET_DEVICE_ID_CMD;
  1131. smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
  1132. rv = wait_for_msg_done(smi_info);
  1133. if (rv)
  1134. goto out;
  1135. resp_len = smi_info->handlers->get_result(smi_info->si_sm,
  1136. resp, IPMI_MAX_MSG_LENGTH);
  1137. /* Check and record info from the get device id, in case we need it. */
  1138. rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
  1139. resp + 2, resp_len - 2, &smi_info->device_id);
  1140. out:
  1141. kfree(resp);
  1142. return rv;
  1143. }
  1144. static int get_global_enables(struct smi_info *smi_info, u8 *enables)
  1145. {
  1146. unsigned char msg[3];
  1147. unsigned char *resp;
  1148. unsigned long resp_len;
  1149. int rv;
  1150. resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
  1151. if (!resp)
  1152. return -ENOMEM;
  1153. msg[0] = IPMI_NETFN_APP_REQUEST << 2;
  1154. msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
  1155. smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
  1156. rv = wait_for_msg_done(smi_info);
  1157. if (rv) {
  1158. dev_warn(smi_info->io.dev,
  1159. "Error getting response from get global enables command: %d\n",
  1160. rv);
  1161. goto out;
  1162. }
  1163. resp_len = smi_info->handlers->get_result(smi_info->si_sm,
  1164. resp, IPMI_MAX_MSG_LENGTH);
  1165. if (resp_len < 4 ||
  1166. resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
  1167. resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
  1168. resp[2] != 0) {
  1169. dev_warn(smi_info->io.dev,
  1170. "Invalid return from get global enables command: %ld %x %x %x\n",
  1171. resp_len, resp[0], resp[1], resp[2]);
  1172. rv = -EINVAL;
  1173. goto out;
  1174. } else {
  1175. *enables = resp[3];
  1176. }
  1177. out:
  1178. kfree(resp);
  1179. return rv;
  1180. }
  1181. /*
  1182. * Returns 1 if it gets an error from the command.
  1183. */
  1184. static int set_global_enables(struct smi_info *smi_info, u8 enables)
  1185. {
  1186. unsigned char msg[3];
  1187. unsigned char *resp;
  1188. unsigned long resp_len;
  1189. int rv;
  1190. resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
  1191. if (!resp)
  1192. return -ENOMEM;
  1193. msg[0] = IPMI_NETFN_APP_REQUEST << 2;
  1194. msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
  1195. msg[2] = enables;
  1196. smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
  1197. rv = wait_for_msg_done(smi_info);
  1198. if (rv) {
  1199. dev_warn(smi_info->io.dev,
  1200. "Error getting response from set global enables command: %d\n",
  1201. rv);
  1202. goto out;
  1203. }
  1204. resp_len = smi_info->handlers->get_result(smi_info->si_sm,
  1205. resp, IPMI_MAX_MSG_LENGTH);
  1206. if (resp_len < 3 ||
  1207. resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
  1208. resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
  1209. dev_warn(smi_info->io.dev,
  1210. "Invalid return from set global enables command: %ld %x %x\n",
  1211. resp_len, resp[0], resp[1]);
  1212. rv = -EINVAL;
  1213. goto out;
  1214. }
  1215. if (resp[2] != 0)
  1216. rv = 1;
  1217. out:
  1218. kfree(resp);
  1219. return rv;
  1220. }
  1221. /*
  1222. * Some BMCs do not support clearing the receive irq bit in the global
  1223. * enables (even if they don't support interrupts on the BMC). Check
  1224. * for this and handle it properly.
  1225. */
  1226. static void check_clr_rcv_irq(struct smi_info *smi_info)
  1227. {
  1228. u8 enables = 0;
  1229. int rv;
  1230. rv = get_global_enables(smi_info, &enables);
  1231. if (!rv) {
  1232. if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
  1233. /* Already clear, should work ok. */
  1234. return;
  1235. enables &= ~IPMI_BMC_RCV_MSG_INTR;
  1236. rv = set_global_enables(smi_info, enables);
  1237. }
  1238. if (rv < 0) {
  1239. dev_err(smi_info->io.dev,
  1240. "Cannot check clearing the rcv irq: %d\n", rv);
  1241. return;
  1242. }
  1243. if (rv) {
  1244. /*
  1245. * An error when setting the event buffer bit means
  1246. * clearing the bit is not supported.
  1247. */
  1248. dev_warn(smi_info->io.dev,
  1249. "The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
  1250. smi_info->cannot_disable_irq = true;
  1251. }
  1252. }
  1253. /*
  1254. * Some BMCs do not support setting the interrupt bits in the global
  1255. * enables even if they support interrupts. Clearly bad, but we can
  1256. * compensate.
  1257. */
  1258. static void check_set_rcv_irq(struct smi_info *smi_info)
  1259. {
  1260. u8 enables = 0;
  1261. int rv;
  1262. if (!smi_info->io.irq)
  1263. return;
  1264. rv = get_global_enables(smi_info, &enables);
  1265. if (!rv) {
  1266. enables |= IPMI_BMC_RCV_MSG_INTR;
  1267. rv = set_global_enables(smi_info, enables);
  1268. }
  1269. if (rv < 0) {
  1270. dev_err(smi_info->io.dev,
  1271. "Cannot check setting the rcv irq: %d\n", rv);
  1272. return;
  1273. }
  1274. if (rv) {
  1275. /*
  1276. * An error when setting the event buffer bit means
  1277. * setting the bit is not supported.
  1278. */
  1279. dev_warn(smi_info->io.dev,
  1280. "The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
  1281. smi_info->cannot_disable_irq = true;
  1282. smi_info->irq_enable_broken = true;
  1283. }
  1284. }
  1285. static int try_enable_event_buffer(struct smi_info *smi_info)
  1286. {
  1287. unsigned char msg[3];
  1288. unsigned char *resp;
  1289. unsigned long resp_len;
  1290. int rv = 0;
  1291. resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
  1292. if (!resp)
  1293. return -ENOMEM;
  1294. msg[0] = IPMI_NETFN_APP_REQUEST << 2;
  1295. msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
  1296. smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
  1297. rv = wait_for_msg_done(smi_info);
  1298. if (rv) {
  1299. pr_warn(PFX "Error getting response from get global enables command, the event buffer is not enabled.\n");
  1300. goto out;
  1301. }
  1302. resp_len = smi_info->handlers->get_result(smi_info->si_sm,
  1303. resp, IPMI_MAX_MSG_LENGTH);
  1304. if (resp_len < 4 ||
  1305. resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
  1306. resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
  1307. resp[2] != 0) {
  1308. pr_warn(PFX "Invalid return from get global enables command, cannot enable the event buffer.\n");
  1309. rv = -EINVAL;
  1310. goto out;
  1311. }
  1312. if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
  1313. /* buffer is already enabled, nothing to do. */
  1314. smi_info->supports_event_msg_buff = true;
  1315. goto out;
  1316. }
  1317. msg[0] = IPMI_NETFN_APP_REQUEST << 2;
  1318. msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
  1319. msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
  1320. smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
  1321. rv = wait_for_msg_done(smi_info);
  1322. if (rv) {
  1323. pr_warn(PFX "Error getting response from set global, enables command, the event buffer is not enabled.\n");
  1324. goto out;
  1325. }
  1326. resp_len = smi_info->handlers->get_result(smi_info->si_sm,
  1327. resp, IPMI_MAX_MSG_LENGTH);
  1328. if (resp_len < 3 ||
  1329. resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
  1330. resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
  1331. pr_warn(PFX "Invalid return from get global, enables command, not enable the event buffer.\n");
  1332. rv = -EINVAL;
  1333. goto out;
  1334. }
  1335. if (resp[2] != 0)
  1336. /*
  1337. * An error when setting the event buffer bit means
  1338. * that the event buffer is not supported.
  1339. */
  1340. rv = -ENOENT;
  1341. else
  1342. smi_info->supports_event_msg_buff = true;
  1343. out:
  1344. kfree(resp);
  1345. return rv;
  1346. }
  1347. #ifdef CONFIG_IPMI_PROC_INTERFACE
  1348. static int smi_type_proc_show(struct seq_file *m, void *v)
  1349. {
  1350. struct smi_info *smi = m->private;
  1351. seq_printf(m, "%s\n", si_to_str[smi->io.si_type]);
  1352. return 0;
  1353. }
  1354. static int smi_type_proc_open(struct inode *inode, struct file *file)
  1355. {
  1356. return single_open(file, smi_type_proc_show, PDE_DATA(inode));
  1357. }
  1358. static const struct file_operations smi_type_proc_ops = {
  1359. .open = smi_type_proc_open,
  1360. .read = seq_read,
  1361. .llseek = seq_lseek,
  1362. .release = single_release,
  1363. };
  1364. static int smi_si_stats_proc_show(struct seq_file *m, void *v)
  1365. {
  1366. struct smi_info *smi = m->private;
  1367. seq_printf(m, "interrupts_enabled: %d\n",
  1368. smi->io.irq && !smi->interrupt_disabled);
  1369. seq_printf(m, "short_timeouts: %u\n",
  1370. smi_get_stat(smi, short_timeouts));
  1371. seq_printf(m, "long_timeouts: %u\n",
  1372. smi_get_stat(smi, long_timeouts));
  1373. seq_printf(m, "idles: %u\n",
  1374. smi_get_stat(smi, idles));
  1375. seq_printf(m, "interrupts: %u\n",
  1376. smi_get_stat(smi, interrupts));
  1377. seq_printf(m, "attentions: %u\n",
  1378. smi_get_stat(smi, attentions));
  1379. seq_printf(m, "flag_fetches: %u\n",
  1380. smi_get_stat(smi, flag_fetches));
  1381. seq_printf(m, "hosed_count: %u\n",
  1382. smi_get_stat(smi, hosed_count));
  1383. seq_printf(m, "complete_transactions: %u\n",
  1384. smi_get_stat(smi, complete_transactions));
  1385. seq_printf(m, "events: %u\n",
  1386. smi_get_stat(smi, events));
  1387. seq_printf(m, "watchdog_pretimeouts: %u\n",
  1388. smi_get_stat(smi, watchdog_pretimeouts));
  1389. seq_printf(m, "incoming_messages: %u\n",
  1390. smi_get_stat(smi, incoming_messages));
  1391. return 0;
  1392. }
  1393. static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
  1394. {
  1395. return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode));
  1396. }
  1397. static const struct file_operations smi_si_stats_proc_ops = {
  1398. .open = smi_si_stats_proc_open,
  1399. .read = seq_read,
  1400. .llseek = seq_lseek,
  1401. .release = single_release,
  1402. };
  1403. static int smi_params_proc_show(struct seq_file *m, void *v)
  1404. {
  1405. struct smi_info *smi = m->private;
  1406. seq_printf(m,
  1407. "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
  1408. si_to_str[smi->io.si_type],
  1409. addr_space_to_str[smi->io.addr_type],
  1410. smi->io.addr_data,
  1411. smi->io.regspacing,
  1412. smi->io.regsize,
  1413. smi->io.regshift,
  1414. smi->io.irq,
  1415. smi->io.slave_addr);
  1416. return 0;
  1417. }
  1418. static int smi_params_proc_open(struct inode *inode, struct file *file)
  1419. {
  1420. return single_open(file, smi_params_proc_show, PDE_DATA(inode));
  1421. }
  1422. static const struct file_operations smi_params_proc_ops = {
  1423. .open = smi_params_proc_open,
  1424. .read = seq_read,
  1425. .llseek = seq_lseek,
  1426. .release = single_release,
  1427. };
  1428. #endif
  1429. #define IPMI_SI_ATTR(name) \
  1430. static ssize_t ipmi_##name##_show(struct device *dev, \
  1431. struct device_attribute *attr, \
  1432. char *buf) \
  1433. { \
  1434. struct smi_info *smi_info = dev_get_drvdata(dev); \
  1435. \
  1436. return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
  1437. } \
  1438. static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
  1439. static ssize_t ipmi_type_show(struct device *dev,
  1440. struct device_attribute *attr,
  1441. char *buf)
  1442. {
  1443. struct smi_info *smi_info = dev_get_drvdata(dev);
  1444. return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
  1445. }
  1446. static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
  1447. static ssize_t ipmi_interrupts_enabled_show(struct device *dev,
  1448. struct device_attribute *attr,
  1449. char *buf)
  1450. {
  1451. struct smi_info *smi_info = dev_get_drvdata(dev);
  1452. int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
  1453. return snprintf(buf, 10, "%d\n", enabled);
  1454. }
  1455. static DEVICE_ATTR(interrupts_enabled, S_IRUGO,
  1456. ipmi_interrupts_enabled_show, NULL);
  1457. IPMI_SI_ATTR(short_timeouts);
  1458. IPMI_SI_ATTR(long_timeouts);
  1459. IPMI_SI_ATTR(idles);
  1460. IPMI_SI_ATTR(interrupts);
  1461. IPMI_SI_ATTR(attentions);
  1462. IPMI_SI_ATTR(flag_fetches);
  1463. IPMI_SI_ATTR(hosed_count);
  1464. IPMI_SI_ATTR(complete_transactions);
  1465. IPMI_SI_ATTR(events);
  1466. IPMI_SI_ATTR(watchdog_pretimeouts);
  1467. IPMI_SI_ATTR(incoming_messages);
  1468. static ssize_t ipmi_params_show(struct device *dev,
  1469. struct device_attribute *attr,
  1470. char *buf)
  1471. {
  1472. struct smi_info *smi_info = dev_get_drvdata(dev);
  1473. return snprintf(buf, 200,
  1474. "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
  1475. si_to_str[smi_info->io.si_type],
  1476. addr_space_to_str[smi_info->io.addr_type],
  1477. smi_info->io.addr_data,
  1478. smi_info->io.regspacing,
  1479. smi_info->io.regsize,
  1480. smi_info->io.regshift,
  1481. smi_info->io.irq,
  1482. smi_info->io.slave_addr);
  1483. }
  1484. static DEVICE_ATTR(params, S_IRUGO, ipmi_params_show, NULL);
  1485. static struct attribute *ipmi_si_dev_attrs[] = {
  1486. &dev_attr_type.attr,
  1487. &dev_attr_interrupts_enabled.attr,
  1488. &dev_attr_short_timeouts.attr,
  1489. &dev_attr_long_timeouts.attr,
  1490. &dev_attr_idles.attr,
  1491. &dev_attr_interrupts.attr,
  1492. &dev_attr_attentions.attr,
  1493. &dev_attr_flag_fetches.attr,
  1494. &dev_attr_hosed_count.attr,
  1495. &dev_attr_complete_transactions.attr,
  1496. &dev_attr_events.attr,
  1497. &dev_attr_watchdog_pretimeouts.attr,
  1498. &dev_attr_incoming_messages.attr,
  1499. &dev_attr_params.attr,
  1500. NULL
  1501. };
  1502. static const struct attribute_group ipmi_si_dev_attr_group = {
  1503. .attrs = ipmi_si_dev_attrs,
  1504. };
  1505. /*
  1506. * oem_data_avail_to_receive_msg_avail
  1507. * @info - smi_info structure with msg_flags set
  1508. *
  1509. * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
  1510. * Returns 1 indicating need to re-run handle_flags().
  1511. */
  1512. static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
  1513. {
  1514. smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
  1515. RECEIVE_MSG_AVAIL);
  1516. return 1;
  1517. }
  1518. /*
  1519. * setup_dell_poweredge_oem_data_handler
  1520. * @info - smi_info.device_id must be populated
  1521. *
  1522. * Systems that match, but have firmware version < 1.40 may assert
  1523. * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
  1524. * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
  1525. * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
  1526. * as RECEIVE_MSG_AVAIL instead.
  1527. *
  1528. * As Dell has no plans to release IPMI 1.5 firmware that *ever*
  1529. * assert the OEM[012] bits, and if it did, the driver would have to
  1530. * change to handle that properly, we don't actually check for the
  1531. * firmware version.
  1532. * Device ID = 0x20 BMC on PowerEdge 8G servers
  1533. * Device Revision = 0x80
  1534. * Firmware Revision1 = 0x01 BMC version 1.40
  1535. * Firmware Revision2 = 0x40 BCD encoded
  1536. * IPMI Version = 0x51 IPMI 1.5
  1537. * Manufacturer ID = A2 02 00 Dell IANA
  1538. *
  1539. * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
  1540. * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
  1541. *
  1542. */
  1543. #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
  1544. #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
  1545. #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
  1546. #define DELL_IANA_MFR_ID 0x0002a2
  1547. static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
  1548. {
  1549. struct ipmi_device_id *id = &smi_info->device_id;
  1550. if (id->manufacturer_id == DELL_IANA_MFR_ID) {
  1551. if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
  1552. id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
  1553. id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
  1554. smi_info->oem_data_avail_handler =
  1555. oem_data_avail_to_receive_msg_avail;
  1556. } else if (ipmi_version_major(id) < 1 ||
  1557. (ipmi_version_major(id) == 1 &&
  1558. ipmi_version_minor(id) < 5)) {
  1559. smi_info->oem_data_avail_handler =
  1560. oem_data_avail_to_receive_msg_avail;
  1561. }
  1562. }
  1563. }
  1564. #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
  1565. static void return_hosed_msg_badsize(struct smi_info *smi_info)
  1566. {
  1567. struct ipmi_smi_msg *msg = smi_info->curr_msg;
  1568. /* Make it a response */
  1569. msg->rsp[0] = msg->data[0] | 4;
  1570. msg->rsp[1] = msg->data[1];
  1571. msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
  1572. msg->rsp_size = 3;
  1573. smi_info->curr_msg = NULL;
  1574. deliver_recv_msg(smi_info, msg);
  1575. }
  1576. /*
  1577. * dell_poweredge_bt_xaction_handler
  1578. * @info - smi_info.device_id must be populated
  1579. *
  1580. * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
  1581. * not respond to a Get SDR command if the length of the data
  1582. * requested is exactly 0x3A, which leads to command timeouts and no
  1583. * data returned. This intercepts such commands, and causes userspace
  1584. * callers to try again with a different-sized buffer, which succeeds.
  1585. */
  1586. #define STORAGE_NETFN 0x0A
  1587. #define STORAGE_CMD_GET_SDR 0x23
  1588. static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
  1589. unsigned long unused,
  1590. void *in)
  1591. {
  1592. struct smi_info *smi_info = in;
  1593. unsigned char *data = smi_info->curr_msg->data;
  1594. unsigned int size = smi_info->curr_msg->data_size;
  1595. if (size >= 8 &&
  1596. (data[0]>>2) == STORAGE_NETFN &&
  1597. data[1] == STORAGE_CMD_GET_SDR &&
  1598. data[7] == 0x3A) {
  1599. return_hosed_msg_badsize(smi_info);
  1600. return NOTIFY_STOP;
  1601. }
  1602. return NOTIFY_DONE;
  1603. }
  1604. static struct notifier_block dell_poweredge_bt_xaction_notifier = {
  1605. .notifier_call = dell_poweredge_bt_xaction_handler,
  1606. };
  1607. /*
  1608. * setup_dell_poweredge_bt_xaction_handler
  1609. * @info - smi_info.device_id must be filled in already
  1610. *
  1611. * Fills in smi_info.device_id.start_transaction_pre_hook
  1612. * when we know what function to use there.
  1613. */
  1614. static void
  1615. setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
  1616. {
  1617. struct ipmi_device_id *id = &smi_info->device_id;
  1618. if (id->manufacturer_id == DELL_IANA_MFR_ID &&
  1619. smi_info->io.si_type == SI_BT)
  1620. register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
  1621. }
  1622. /*
  1623. * setup_oem_data_handler
  1624. * @info - smi_info.device_id must be filled in already
  1625. *
  1626. * Fills in smi_info.device_id.oem_data_available_handler
  1627. * when we know what function to use there.
  1628. */
  1629. static void setup_oem_data_handler(struct smi_info *smi_info)
  1630. {
  1631. setup_dell_poweredge_oem_data_handler(smi_info);
  1632. }
  1633. static void setup_xaction_handlers(struct smi_info *smi_info)
  1634. {
  1635. setup_dell_poweredge_bt_xaction_handler(smi_info);
  1636. }
  1637. static void check_for_broken_irqs(struct smi_info *smi_info)
  1638. {
  1639. check_clr_rcv_irq(smi_info);
  1640. check_set_rcv_irq(smi_info);
  1641. }
  1642. static inline void stop_timer_and_thread(struct smi_info *smi_info)
  1643. {
  1644. if (smi_info->thread != NULL) {
  1645. kthread_stop(smi_info->thread);
  1646. smi_info->thread = NULL;
  1647. }
  1648. smi_info->timer_can_start = false;
  1649. if (smi_info->timer_running)
  1650. del_timer_sync(&smi_info->si_timer);
  1651. }
  1652. static struct smi_info *find_dup_si(struct smi_info *info)
  1653. {
  1654. struct smi_info *e;
  1655. list_for_each_entry(e, &smi_infos, link) {
  1656. if (e->io.addr_type != info->io.addr_type)
  1657. continue;
  1658. if (e->io.addr_data == info->io.addr_data) {
  1659. /*
  1660. * This is a cheap hack, ACPI doesn't have a defined
  1661. * slave address but SMBIOS does. Pick it up from
  1662. * any source that has it available.
  1663. */
  1664. if (info->io.slave_addr && !e->io.slave_addr)
  1665. e->io.slave_addr = info->io.slave_addr;
  1666. return e;
  1667. }
  1668. }
  1669. return NULL;
  1670. }
  1671. int ipmi_si_add_smi(struct si_sm_io *io)
  1672. {
  1673. int rv = 0;
  1674. struct smi_info *new_smi, *dup;
  1675. if (!io->io_setup) {
  1676. if (io->addr_type == IPMI_IO_ADDR_SPACE) {
  1677. io->io_setup = ipmi_si_port_setup;
  1678. } else if (io->addr_type == IPMI_MEM_ADDR_SPACE) {
  1679. io->io_setup = ipmi_si_mem_setup;
  1680. } else {
  1681. return -EINVAL;
  1682. }
  1683. }
  1684. new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
  1685. if (!new_smi)
  1686. return -ENOMEM;
  1687. spin_lock_init(&new_smi->si_lock);
  1688. new_smi->io = *io;
  1689. mutex_lock(&smi_infos_lock);
  1690. dup = find_dup_si(new_smi);
  1691. if (dup) {
  1692. if (new_smi->io.addr_source == SI_ACPI &&
  1693. dup->io.addr_source == SI_SMBIOS) {
  1694. /* We prefer ACPI over SMBIOS. */
  1695. dev_info(dup->io.dev,
  1696. "Removing SMBIOS-specified %s state machine in favor of ACPI\n",
  1697. si_to_str[new_smi->io.si_type]);
  1698. cleanup_one_si(dup);
  1699. } else {
  1700. dev_info(new_smi->io.dev,
  1701. "%s-specified %s state machine: duplicate\n",
  1702. ipmi_addr_src_to_str(new_smi->io.addr_source),
  1703. si_to_str[new_smi->io.si_type]);
  1704. rv = -EBUSY;
  1705. kfree(new_smi);
  1706. goto out_err;
  1707. }
  1708. }
  1709. pr_info(PFX "Adding %s-specified %s state machine\n",
  1710. ipmi_addr_src_to_str(new_smi->io.addr_source),
  1711. si_to_str[new_smi->io.si_type]);
  1712. list_add_tail(&new_smi->link, &smi_infos);
  1713. if (initialized) {
  1714. rv = try_smi_init(new_smi);
  1715. if (rv) {
  1716. cleanup_one_si(new_smi);
  1717. mutex_unlock(&smi_infos_lock);
  1718. return rv;
  1719. }
  1720. }
  1721. out_err:
  1722. mutex_unlock(&smi_infos_lock);
  1723. return rv;
  1724. }
  1725. /*
  1726. * Try to start up an interface. Must be called with smi_infos_lock
  1727. * held, primarily to keep smi_num consistent, we only one to do these
  1728. * one at a time.
  1729. */
  1730. static int try_smi_init(struct smi_info *new_smi)
  1731. {
  1732. int rv = 0;
  1733. int i;
  1734. char *init_name = NULL;
  1735. pr_info(PFX "Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
  1736. ipmi_addr_src_to_str(new_smi->io.addr_source),
  1737. si_to_str[new_smi->io.si_type],
  1738. addr_space_to_str[new_smi->io.addr_type],
  1739. new_smi->io.addr_data,
  1740. new_smi->io.slave_addr, new_smi->io.irq);
  1741. switch (new_smi->io.si_type) {
  1742. case SI_KCS:
  1743. new_smi->handlers = &kcs_smi_handlers;
  1744. break;
  1745. case SI_SMIC:
  1746. new_smi->handlers = &smic_smi_handlers;
  1747. break;
  1748. case SI_BT:
  1749. new_smi->handlers = &bt_smi_handlers;
  1750. break;
  1751. default:
  1752. /* No support for anything else yet. */
  1753. rv = -EIO;
  1754. goto out_err;
  1755. }
  1756. new_smi->intf_num = smi_num;
  1757. /* Do this early so it's available for logs. */
  1758. if (!new_smi->io.dev) {
  1759. init_name = kasprintf(GFP_KERNEL, "ipmi_si.%d",
  1760. new_smi->intf_num);
  1761. /*
  1762. * If we don't already have a device from something
  1763. * else (like PCI), then register a new one.
  1764. */
  1765. new_smi->pdev = platform_device_alloc("ipmi_si",
  1766. new_smi->intf_num);
  1767. if (!new_smi->pdev) {
  1768. pr_err(PFX "Unable to allocate platform device\n");
  1769. rv = -ENOMEM;
  1770. goto out_err;
  1771. }
  1772. new_smi->io.dev = &new_smi->pdev->dev;
  1773. new_smi->io.dev->driver = &ipmi_platform_driver.driver;
  1774. /* Nulled by device_add() */
  1775. new_smi->io.dev->init_name = init_name;
  1776. }
  1777. /* Allocate the state machine's data and initialize it. */
  1778. new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
  1779. if (!new_smi->si_sm) {
  1780. rv = -ENOMEM;
  1781. goto out_err;
  1782. }
  1783. new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
  1784. &new_smi->io);
  1785. /* Now that we know the I/O size, we can set up the I/O. */
  1786. rv = new_smi->io.io_setup(&new_smi->io);
  1787. if (rv) {
  1788. dev_err(new_smi->io.dev, "Could not set up I/O space\n");
  1789. goto out_err;
  1790. }
  1791. /* Do low-level detection first. */
  1792. if (new_smi->handlers->detect(new_smi->si_sm)) {
  1793. if (new_smi->io.addr_source)
  1794. dev_err(new_smi->io.dev,
  1795. "Interface detection failed\n");
  1796. rv = -ENODEV;
  1797. goto out_err;
  1798. }
  1799. /*
  1800. * Attempt a get device id command. If it fails, we probably
  1801. * don't have a BMC here.
  1802. */
  1803. rv = try_get_dev_id(new_smi);
  1804. if (rv) {
  1805. if (new_smi->io.addr_source)
  1806. dev_err(new_smi->io.dev,
  1807. "There appears to be no BMC at this location\n");
  1808. goto out_err;
  1809. }
  1810. setup_oem_data_handler(new_smi);
  1811. setup_xaction_handlers(new_smi);
  1812. check_for_broken_irqs(new_smi);
  1813. new_smi->waiting_msg = NULL;
  1814. new_smi->curr_msg = NULL;
  1815. atomic_set(&new_smi->req_events, 0);
  1816. new_smi->run_to_completion = false;
  1817. for (i = 0; i < SI_NUM_STATS; i++)
  1818. atomic_set(&new_smi->stats[i], 0);
  1819. new_smi->interrupt_disabled = true;
  1820. atomic_set(&new_smi->need_watch, 0);
  1821. rv = try_enable_event_buffer(new_smi);
  1822. if (rv == 0)
  1823. new_smi->has_event_buffer = true;
  1824. /*
  1825. * Start clearing the flags before we enable interrupts or the
  1826. * timer to avoid racing with the timer.
  1827. */
  1828. start_clear_flags(new_smi);
  1829. /*
  1830. * IRQ is defined to be set when non-zero. req_events will
  1831. * cause a global flags check that will enable interrupts.
  1832. */
  1833. if (new_smi->io.irq) {
  1834. new_smi->interrupt_disabled = false;
  1835. atomic_set(&new_smi->req_events, 1);
  1836. }
  1837. if (new_smi->pdev && !new_smi->pdev_registered) {
  1838. rv = platform_device_add(new_smi->pdev);
  1839. if (rv) {
  1840. dev_err(new_smi->io.dev,
  1841. "Unable to register system interface device: %d\n",
  1842. rv);
  1843. goto out_err;
  1844. }
  1845. new_smi->pdev_registered = true;
  1846. }
  1847. dev_set_drvdata(new_smi->io.dev, new_smi);
  1848. rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
  1849. if (rv) {
  1850. dev_err(new_smi->io.dev,
  1851. "Unable to add device attributes: error %d\n",
  1852. rv);
  1853. goto out_err;
  1854. }
  1855. new_smi->dev_group_added = true;
  1856. rv = ipmi_register_smi(&handlers,
  1857. new_smi,
  1858. new_smi->io.dev,
  1859. new_smi->io.slave_addr);
  1860. if (rv) {
  1861. dev_err(new_smi->io.dev,
  1862. "Unable to register device: error %d\n",
  1863. rv);
  1864. goto out_err;
  1865. }
  1866. #ifdef CONFIG_IPMI_PROC_INTERFACE
  1867. rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
  1868. &smi_type_proc_ops,
  1869. new_smi);
  1870. if (rv) {
  1871. dev_err(new_smi->io.dev,
  1872. "Unable to create proc entry: %d\n", rv);
  1873. goto out_err;
  1874. }
  1875. rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
  1876. &smi_si_stats_proc_ops,
  1877. new_smi);
  1878. if (rv) {
  1879. dev_err(new_smi->io.dev,
  1880. "Unable to create proc entry: %d\n", rv);
  1881. goto out_err;
  1882. }
  1883. rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
  1884. &smi_params_proc_ops,
  1885. new_smi);
  1886. if (rv) {
  1887. dev_err(new_smi->io.dev,
  1888. "Unable to create proc entry: %d\n", rv);
  1889. goto out_err;
  1890. }
  1891. #endif
  1892. /* Don't increment till we know we have succeeded. */
  1893. smi_num++;
  1894. dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
  1895. si_to_str[new_smi->io.si_type]);
  1896. WARN_ON(new_smi->io.dev->init_name != NULL);
  1897. kfree(init_name);
  1898. return 0;
  1899. out_err:
  1900. shutdown_one_si(new_smi);
  1901. kfree(init_name);
  1902. return rv;
  1903. }
  1904. static int init_ipmi_si(void)
  1905. {
  1906. struct smi_info *e;
  1907. enum ipmi_addr_src type = SI_INVALID;
  1908. if (initialized)
  1909. return 0;
  1910. pr_info("IPMI System Interface driver.\n");
  1911. /* If the user gave us a device, they presumably want us to use it */
  1912. if (!ipmi_si_hardcode_find_bmc())
  1913. goto do_scan;
  1914. ipmi_si_platform_init();
  1915. ipmi_si_pci_init();
  1916. ipmi_si_parisc_init();
  1917. /* We prefer devices with interrupts, but in the case of a machine
  1918. with multiple BMCs we assume that there will be several instances
  1919. of a given type so if we succeed in registering a type then also
  1920. try to register everything else of the same type */
  1921. do_scan:
  1922. mutex_lock(&smi_infos_lock);
  1923. list_for_each_entry(e, &smi_infos, link) {
  1924. /* Try to register a device if it has an IRQ and we either
  1925. haven't successfully registered a device yet or this
  1926. device has the same type as one we successfully registered */
  1927. if (e->io.irq && (!type || e->io.addr_source == type)) {
  1928. if (!try_smi_init(e)) {
  1929. type = e->io.addr_source;
  1930. }
  1931. }
  1932. }
  1933. /* type will only have been set if we successfully registered an si */
  1934. if (type)
  1935. goto skip_fallback_noirq;
  1936. /* Fall back to the preferred device */
  1937. list_for_each_entry(e, &smi_infos, link) {
  1938. if (!e->io.irq && (!type || e->io.addr_source == type)) {
  1939. if (!try_smi_init(e)) {
  1940. type = e->io.addr_source;
  1941. }
  1942. }
  1943. }
  1944. skip_fallback_noirq:
  1945. initialized = 1;
  1946. mutex_unlock(&smi_infos_lock);
  1947. if (type)
  1948. return 0;
  1949. mutex_lock(&smi_infos_lock);
  1950. if (unload_when_empty && list_empty(&smi_infos)) {
  1951. mutex_unlock(&smi_infos_lock);
  1952. cleanup_ipmi_si();
  1953. pr_warn(PFX "Unable to find any System Interface(s)\n");
  1954. return -ENODEV;
  1955. } else {
  1956. mutex_unlock(&smi_infos_lock);
  1957. return 0;
  1958. }
  1959. }
  1960. module_init(init_ipmi_si);
  1961. static void shutdown_one_si(struct smi_info *smi_info)
  1962. {
  1963. int rv = 0;
  1964. if (smi_info->intf) {
  1965. ipmi_smi_t intf = smi_info->intf;
  1966. smi_info->intf = NULL;
  1967. rv = ipmi_unregister_smi(intf);
  1968. if (rv) {
  1969. pr_err(PFX "Unable to unregister device: errno=%d\n",
  1970. rv);
  1971. }
  1972. }
  1973. if (smi_info->dev_group_added) {
  1974. device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
  1975. smi_info->dev_group_added = false;
  1976. }
  1977. if (smi_info->io.dev)
  1978. dev_set_drvdata(smi_info->io.dev, NULL);
  1979. /*
  1980. * Make sure that interrupts, the timer and the thread are
  1981. * stopped and will not run again.
  1982. */
  1983. smi_info->interrupt_disabled = true;
  1984. if (smi_info->io.irq_cleanup) {
  1985. smi_info->io.irq_cleanup(&smi_info->io);
  1986. smi_info->io.irq_cleanup = NULL;
  1987. }
  1988. stop_timer_and_thread(smi_info);
  1989. /*
  1990. * Wait until we know that we are out of any interrupt
  1991. * handlers might have been running before we freed the
  1992. * interrupt.
  1993. */
  1994. synchronize_sched();
  1995. /*
  1996. * Timeouts are stopped, now make sure the interrupts are off
  1997. * in the BMC. Note that timers and CPU interrupts are off,
  1998. * so no need for locks.
  1999. */
  2000. while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
  2001. poll(smi_info);
  2002. schedule_timeout_uninterruptible(1);
  2003. }
  2004. if (smi_info->handlers)
  2005. disable_si_irq(smi_info);
  2006. while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
  2007. poll(smi_info);
  2008. schedule_timeout_uninterruptible(1);
  2009. }
  2010. if (smi_info->handlers)
  2011. smi_info->handlers->cleanup(smi_info->si_sm);
  2012. if (smi_info->io.addr_source_cleanup) {
  2013. smi_info->io.addr_source_cleanup(&smi_info->io);
  2014. smi_info->io.addr_source_cleanup = NULL;
  2015. }
  2016. if (smi_info->io.io_cleanup) {
  2017. smi_info->io.io_cleanup(&smi_info->io);
  2018. smi_info->io.io_cleanup = NULL;
  2019. }
  2020. kfree(smi_info->si_sm);
  2021. smi_info->si_sm = NULL;
  2022. }
  2023. static void cleanup_one_si(struct smi_info *smi_info)
  2024. {
  2025. if (!smi_info)
  2026. return;
  2027. list_del(&smi_info->link);
  2028. shutdown_one_si(smi_info);
  2029. if (smi_info->pdev) {
  2030. if (smi_info->pdev_registered)
  2031. platform_device_unregister(smi_info->pdev);
  2032. else
  2033. platform_device_put(smi_info->pdev);
  2034. }
  2035. kfree(smi_info);
  2036. }
  2037. int ipmi_si_remove_by_dev(struct device *dev)
  2038. {
  2039. struct smi_info *e;
  2040. int rv = -ENOENT;
  2041. mutex_lock(&smi_infos_lock);
  2042. list_for_each_entry(e, &smi_infos, link) {
  2043. if (e->io.dev == dev) {
  2044. cleanup_one_si(e);
  2045. rv = 0;
  2046. break;
  2047. }
  2048. }
  2049. mutex_unlock(&smi_infos_lock);
  2050. return rv;
  2051. }
  2052. void ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
  2053. unsigned long addr)
  2054. {
  2055. /* remove */
  2056. struct smi_info *e, *tmp_e;
  2057. mutex_lock(&smi_infos_lock);
  2058. list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
  2059. if (e->io.addr_type != addr_space)
  2060. continue;
  2061. if (e->io.si_type != si_type)
  2062. continue;
  2063. if (e->io.addr_data == addr)
  2064. cleanup_one_si(e);
  2065. }
  2066. mutex_unlock(&smi_infos_lock);
  2067. }
  2068. static void cleanup_ipmi_si(void)
  2069. {
  2070. struct smi_info *e, *tmp_e;
  2071. if (!initialized)
  2072. return;
  2073. ipmi_si_pci_shutdown();
  2074. ipmi_si_parisc_shutdown();
  2075. ipmi_si_platform_shutdown();
  2076. mutex_lock(&smi_infos_lock);
  2077. list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
  2078. cleanup_one_si(e);
  2079. mutex_unlock(&smi_infos_lock);
  2080. }
  2081. module_exit(cleanup_ipmi_si);
  2082. MODULE_ALIAS("platform:dmi-ipmi-si");
  2083. MODULE_LICENSE("GPL");
  2084. MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
  2085. MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
  2086. " system interfaces.");