driver.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * System Control and Management Interface (SCMI) Message Protocol driver
  4. *
  5. * SCMI Message Protocol is used between the System Control Processor(SCP)
  6. * and the Application Processors(AP). The Message Handling Unit(MHU)
  7. * provides a mechanism for inter-processor communication between SCP's
  8. * Cortex M3 and AP.
  9. *
  10. * SCP offers control and management of the core/cluster power states,
  11. * various power domain DVFS including the core/cluster, certain system
  12. * clocks configuration, thermal sensors and many others.
  13. *
  14. * Copyright (C) 2018 ARM Ltd.
  15. */
  16. #include <linux/bitmap.h>
  17. #include <linux/export.h>
  18. #include <linux/io.h>
  19. #include <linux/kernel.h>
  20. #include <linux/ktime.h>
  21. #include <linux/mailbox_client.h>
  22. #include <linux/module.h>
  23. #include <linux/of_address.h>
  24. #include <linux/of_device.h>
  25. #include <linux/processor.h>
  26. #include <linux/semaphore.h>
  27. #include <linux/slab.h>
  28. #include "common.h"
  29. #define MSG_ID_SHIFT 0
  30. #define MSG_ID_MASK 0xff
  31. #define MSG_TYPE_SHIFT 8
  32. #define MSG_TYPE_MASK 0x3
  33. #define MSG_PROTOCOL_ID_SHIFT 10
  34. #define MSG_PROTOCOL_ID_MASK 0xff
  35. #define MSG_TOKEN_ID_SHIFT 18
  36. #define MSG_TOKEN_ID_MASK 0x3ff
  37. #define MSG_XTRACT_TOKEN(header) \
  38. (((header) >> MSG_TOKEN_ID_SHIFT) & MSG_TOKEN_ID_MASK)
  39. enum scmi_error_codes {
  40. SCMI_SUCCESS = 0, /* Success */
  41. SCMI_ERR_SUPPORT = -1, /* Not supported */
  42. SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
  43. SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
  44. SCMI_ERR_ENTRY = -4, /* Not found */
  45. SCMI_ERR_RANGE = -5, /* Value out of range */
  46. SCMI_ERR_BUSY = -6, /* Device busy */
  47. SCMI_ERR_COMMS = -7, /* Communication Error */
  48. SCMI_ERR_GENERIC = -8, /* Generic Error */
  49. SCMI_ERR_HARDWARE = -9, /* Hardware Error */
  50. SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
  51. SCMI_ERR_MAX
  52. };
  53. /* List of all SCMI devices active in system */
  54. static LIST_HEAD(scmi_list);
  55. /* Protection for the entire list */
  56. static DEFINE_MUTEX(scmi_list_mutex);
  57. /**
  58. * struct scmi_xfers_info - Structure to manage transfer information
  59. *
  60. * @xfer_block: Preallocated Message array
  61. * @xfer_alloc_table: Bitmap table for allocated messages.
  62. * Index of this bitmap table is also used for message
  63. * sequence identifier.
  64. * @xfer_lock: Protection for message allocation
  65. */
  66. struct scmi_xfers_info {
  67. struct scmi_xfer *xfer_block;
  68. unsigned long *xfer_alloc_table;
  69. /* protect transfer allocation */
  70. spinlock_t xfer_lock;
  71. };
  72. /**
  73. * struct scmi_desc - Description of SoC integration
  74. *
  75. * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
  76. * @max_msg: Maximum number of messages that can be pending
  77. * simultaneously in the system
  78. * @max_msg_size: Maximum size of data per message that can be handled.
  79. */
  80. struct scmi_desc {
  81. int max_rx_timeout_ms;
  82. int max_msg;
  83. int max_msg_size;
  84. };
  85. /**
  86. * struct scmi_chan_info - Structure representing a SCMI channel informfation
  87. *
  88. * @cl: Mailbox Client
  89. * @chan: Transmit/Receive mailbox channel
  90. * @payload: Transmit/Receive mailbox channel payload area
  91. * @dev: Reference to device in the SCMI hierarchy corresponding to this
  92. * channel
  93. */
  94. struct scmi_chan_info {
  95. struct mbox_client cl;
  96. struct mbox_chan *chan;
  97. void __iomem *payload;
  98. struct device *dev;
  99. struct scmi_handle *handle;
  100. };
  101. /**
  102. * struct scmi_info - Structure representing a SCMI instance
  103. *
  104. * @dev: Device pointer
  105. * @desc: SoC description for this instance
  106. * @handle: Instance of SCMI handle to send to clients
  107. * @version: SCMI revision information containing protocol version,
  108. * implementation version and (sub-)vendor identification.
  109. * @minfo: Message info
  110. * @tx_idr: IDR object to map protocol id to channel info pointer
  111. * @protocols_imp: list of protocols implemented, currently maximum of
  112. * MAX_PROTOCOLS_IMP elements allocated by the base protocol
  113. * @node: list head
  114. * @users: Number of users of this instance
  115. */
  116. struct scmi_info {
  117. struct device *dev;
  118. const struct scmi_desc *desc;
  119. struct scmi_revision_info version;
  120. struct scmi_handle handle;
  121. struct scmi_xfers_info minfo;
  122. struct idr tx_idr;
  123. u8 *protocols_imp;
  124. struct list_head node;
  125. int users;
  126. };
  127. #define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
  128. #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
  129. /*
  130. * SCMI specification requires all parameters, message headers, return
  131. * arguments or any protocol data to be expressed in little endian
  132. * format only.
  133. */
  134. struct scmi_shared_mem {
  135. __le32 reserved;
  136. __le32 channel_status;
  137. #define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
  138. #define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
  139. __le32 reserved1[2];
  140. __le32 flags;
  141. #define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
  142. __le32 length;
  143. __le32 msg_header;
  144. u8 msg_payload[0];
  145. };
  146. static const int scmi_linux_errmap[] = {
  147. /* better than switch case as long as return value is continuous */
  148. 0, /* SCMI_SUCCESS */
  149. -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
  150. -EINVAL, /* SCMI_ERR_PARAM */
  151. -EACCES, /* SCMI_ERR_ACCESS */
  152. -ENOENT, /* SCMI_ERR_ENTRY */
  153. -ERANGE, /* SCMI_ERR_RANGE */
  154. -EBUSY, /* SCMI_ERR_BUSY */
  155. -ECOMM, /* SCMI_ERR_COMMS */
  156. -EIO, /* SCMI_ERR_GENERIC */
  157. -EREMOTEIO, /* SCMI_ERR_HARDWARE */
  158. -EPROTO, /* SCMI_ERR_PROTOCOL */
  159. };
  160. static inline int scmi_to_linux_errno(int errno)
  161. {
  162. if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
  163. return scmi_linux_errmap[-errno];
  164. return -EIO;
  165. }
  166. /**
  167. * scmi_dump_header_dbg() - Helper to dump a message header.
  168. *
  169. * @dev: Device pointer corresponding to the SCMI entity
  170. * @hdr: pointer to header.
  171. */
  172. static inline void scmi_dump_header_dbg(struct device *dev,
  173. struct scmi_msg_hdr *hdr)
  174. {
  175. dev_dbg(dev, "Command ID: %x Sequence ID: %x Protocol: %x\n",
  176. hdr->id, hdr->seq, hdr->protocol_id);
  177. }
  178. static void scmi_fetch_response(struct scmi_xfer *xfer,
  179. struct scmi_shared_mem __iomem *mem)
  180. {
  181. xfer->hdr.status = ioread32(mem->msg_payload);
  182. /* Skip the length of header and statues in payload area i.e 8 bytes*/
  183. xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
  184. /* Take a copy to the rx buffer.. */
  185. memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
  186. }
  187. /**
  188. * scmi_rx_callback() - mailbox client callback for receive messages
  189. *
  190. * @cl: client pointer
  191. * @m: mailbox message
  192. *
  193. * Processes one received message to appropriate transfer information and
  194. * signals completion of the transfer.
  195. *
  196. * NOTE: This function will be invoked in IRQ context, hence should be
  197. * as optimal as possible.
  198. */
  199. static void scmi_rx_callback(struct mbox_client *cl, void *m)
  200. {
  201. u16 xfer_id;
  202. struct scmi_xfer *xfer;
  203. struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
  204. struct device *dev = cinfo->dev;
  205. struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
  206. struct scmi_xfers_info *minfo = &info->minfo;
  207. struct scmi_shared_mem __iomem *mem = cinfo->payload;
  208. xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
  209. /*
  210. * Are we even expecting this?
  211. */
  212. if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
  213. dev_err(dev, "message for %d is not expected!\n", xfer_id);
  214. return;
  215. }
  216. xfer = &minfo->xfer_block[xfer_id];
  217. scmi_dump_header_dbg(dev, &xfer->hdr);
  218. /* Is the message of valid length? */
  219. if (xfer->rx.len > info->desc->max_msg_size) {
  220. dev_err(dev, "unable to handle %zu xfer(max %d)\n",
  221. xfer->rx.len, info->desc->max_msg_size);
  222. return;
  223. }
  224. scmi_fetch_response(xfer, mem);
  225. complete(&xfer->done);
  226. }
  227. /**
  228. * pack_scmi_header() - packs and returns 32-bit header
  229. *
  230. * @hdr: pointer to header containing all the information on message id,
  231. * protocol id and sequence id.
  232. */
  233. static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
  234. {
  235. return ((hdr->id & MSG_ID_MASK) << MSG_ID_SHIFT) |
  236. ((hdr->seq & MSG_TOKEN_ID_MASK) << MSG_TOKEN_ID_SHIFT) |
  237. ((hdr->protocol_id & MSG_PROTOCOL_ID_MASK) << MSG_PROTOCOL_ID_SHIFT);
  238. }
  239. /**
  240. * scmi_tx_prepare() - mailbox client callback to prepare for the transfer
  241. *
  242. * @cl: client pointer
  243. * @m: mailbox message
  244. *
  245. * This function prepares the shared memory which contains the header and the
  246. * payload.
  247. */
  248. static void scmi_tx_prepare(struct mbox_client *cl, void *m)
  249. {
  250. struct scmi_xfer *t = m;
  251. struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
  252. struct scmi_shared_mem __iomem *mem = cinfo->payload;
  253. /* Mark channel busy + clear error */
  254. iowrite32(0x0, &mem->channel_status);
  255. iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
  256. &mem->flags);
  257. iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
  258. iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
  259. if (t->tx.buf)
  260. memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
  261. }
  262. /**
  263. * scmi_one_xfer_get() - Allocate one message
  264. *
  265. * @handle: SCMI entity handle
  266. *
  267. * Helper function which is used by various command functions that are
  268. * exposed to clients of this driver for allocating a message traffic event.
  269. *
  270. * This function can sleep depending on pending requests already in the system
  271. * for the SCMI entity. Further, this also holds a spinlock to maintain
  272. * integrity of internal data structures.
  273. *
  274. * Return: 0 if all went fine, else corresponding error.
  275. */
  276. static struct scmi_xfer *scmi_one_xfer_get(const struct scmi_handle *handle)
  277. {
  278. u16 xfer_id;
  279. struct scmi_xfer *xfer;
  280. unsigned long flags, bit_pos;
  281. struct scmi_info *info = handle_to_scmi_info(handle);
  282. struct scmi_xfers_info *minfo = &info->minfo;
  283. /* Keep the locked section as small as possible */
  284. spin_lock_irqsave(&minfo->xfer_lock, flags);
  285. bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
  286. info->desc->max_msg);
  287. if (bit_pos == info->desc->max_msg) {
  288. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  289. return ERR_PTR(-ENOMEM);
  290. }
  291. set_bit(bit_pos, minfo->xfer_alloc_table);
  292. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  293. xfer_id = bit_pos;
  294. xfer = &minfo->xfer_block[xfer_id];
  295. xfer->hdr.seq = xfer_id;
  296. reinit_completion(&xfer->done);
  297. return xfer;
  298. }
  299. /**
  300. * scmi_one_xfer_put() - Release a message
  301. *
  302. * @minfo: transfer info pointer
  303. * @xfer: message that was reserved by scmi_one_xfer_get
  304. *
  305. * This holds a spinlock to maintain integrity of internal data structures.
  306. */
  307. void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
  308. {
  309. unsigned long flags;
  310. struct scmi_info *info = handle_to_scmi_info(handle);
  311. struct scmi_xfers_info *minfo = &info->minfo;
  312. /*
  313. * Keep the locked section as small as possible
  314. * NOTE: we might escape with smp_mb and no lock here..
  315. * but just be conservative and symmetric.
  316. */
  317. spin_lock_irqsave(&minfo->xfer_lock, flags);
  318. clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
  319. spin_unlock_irqrestore(&minfo->xfer_lock, flags);
  320. }
  321. static bool
  322. scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
  323. {
  324. struct scmi_shared_mem __iomem *mem = cinfo->payload;
  325. u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
  326. if (xfer->hdr.seq != xfer_id)
  327. return false;
  328. return ioread32(&mem->channel_status) &
  329. (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
  330. SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
  331. }
  332. #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
  333. static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
  334. struct scmi_xfer *xfer, ktime_t stop)
  335. {
  336. ktime_t __cur = ktime_get();
  337. return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
  338. }
  339. /**
  340. * scmi_do_xfer() - Do one transfer
  341. *
  342. * @info: Pointer to SCMI entity information
  343. * @xfer: Transfer to initiate and wait for response
  344. *
  345. * Return: -ETIMEDOUT in case of no response, if transmit error,
  346. * return corresponding error, else if all goes well,
  347. * return 0.
  348. */
  349. int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
  350. {
  351. int ret;
  352. int timeout;
  353. struct scmi_info *info = handle_to_scmi_info(handle);
  354. struct device *dev = info->dev;
  355. struct scmi_chan_info *cinfo;
  356. cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
  357. if (unlikely(!cinfo))
  358. return -EINVAL;
  359. ret = mbox_send_message(cinfo->chan, xfer);
  360. if (ret < 0) {
  361. dev_dbg(dev, "mbox send fail %d\n", ret);
  362. return ret;
  363. }
  364. /* mbox_send_message returns non-negative value on success, so reset */
  365. ret = 0;
  366. if (xfer->hdr.poll_completion) {
  367. ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
  368. spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
  369. if (ktime_before(ktime_get(), stop))
  370. scmi_fetch_response(xfer, cinfo->payload);
  371. else
  372. ret = -ETIMEDOUT;
  373. } else {
  374. /* And we wait for the response. */
  375. timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
  376. if (!wait_for_completion_timeout(&xfer->done, timeout)) {
  377. dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
  378. (void *)_RET_IP_);
  379. ret = -ETIMEDOUT;
  380. }
  381. }
  382. if (!ret && xfer->hdr.status)
  383. ret = scmi_to_linux_errno(xfer->hdr.status);
  384. /*
  385. * NOTE: we might prefer not to need the mailbox ticker to manage the
  386. * transfer queueing since the protocol layer queues things by itself.
  387. * Unfortunately, we have to kick the mailbox framework after we have
  388. * received our message.
  389. */
  390. mbox_client_txdone(cinfo->chan, ret);
  391. return ret;
  392. }
  393. /**
  394. * scmi_one_xfer_init() - Allocate and initialise one message
  395. *
  396. * @handle: SCMI entity handle
  397. * @msg_id: Message identifier
  398. * @msg_prot_id: Protocol identifier for the message
  399. * @tx_size: transmit message size
  400. * @rx_size: receive message size
  401. * @p: pointer to the allocated and initialised message
  402. *
  403. * This function allocates the message using @scmi_one_xfer_get and
  404. * initialise the header.
  405. *
  406. * Return: 0 if all went fine with @p pointing to message, else
  407. * corresponding error.
  408. */
  409. int scmi_one_xfer_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
  410. size_t tx_size, size_t rx_size, struct scmi_xfer **p)
  411. {
  412. int ret;
  413. struct scmi_xfer *xfer;
  414. struct scmi_info *info = handle_to_scmi_info(handle);
  415. struct device *dev = info->dev;
  416. /* Ensure we have sane transfer sizes */
  417. if (rx_size > info->desc->max_msg_size ||
  418. tx_size > info->desc->max_msg_size)
  419. return -ERANGE;
  420. xfer = scmi_one_xfer_get(handle);
  421. if (IS_ERR(xfer)) {
  422. ret = PTR_ERR(xfer);
  423. dev_err(dev, "failed to get free message slot(%d)\n", ret);
  424. return ret;
  425. }
  426. xfer->tx.len = tx_size;
  427. xfer->rx.len = rx_size ? : info->desc->max_msg_size;
  428. xfer->hdr.id = msg_id;
  429. xfer->hdr.protocol_id = prot_id;
  430. xfer->hdr.poll_completion = false;
  431. *p = xfer;
  432. return 0;
  433. }
  434. /**
  435. * scmi_version_get() - command to get the revision of the SCMI entity
  436. *
  437. * @handle: Handle to SCMI entity information
  438. *
  439. * Updates the SCMI information in the internal data structure.
  440. *
  441. * Return: 0 if all went fine, else return appropriate error.
  442. */
  443. int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
  444. u32 *version)
  445. {
  446. int ret;
  447. __le32 *rev_info;
  448. struct scmi_xfer *t;
  449. ret = scmi_one_xfer_init(handle, PROTOCOL_VERSION, protocol, 0,
  450. sizeof(*version), &t);
  451. if (ret)
  452. return ret;
  453. ret = scmi_do_xfer(handle, t);
  454. if (!ret) {
  455. rev_info = t->rx.buf;
  456. *version = le32_to_cpu(*rev_info);
  457. }
  458. scmi_one_xfer_put(handle, t);
  459. return ret;
  460. }
  461. void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
  462. u8 *prot_imp)
  463. {
  464. struct scmi_info *info = handle_to_scmi_info(handle);
  465. info->protocols_imp = prot_imp;
  466. }
  467. static bool
  468. scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
  469. {
  470. int i;
  471. struct scmi_info *info = handle_to_scmi_info(handle);
  472. if (!info->protocols_imp)
  473. return false;
  474. for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
  475. if (info->protocols_imp[i] == prot_id)
  476. return true;
  477. return false;
  478. }
  479. /**
  480. * scmi_handle_get() - Get the SCMI handle for a device
  481. *
  482. * @dev: pointer to device for which we want SCMI handle
  483. *
  484. * NOTE: The function does not track individual clients of the framework
  485. * and is expected to be maintained by caller of SCMI protocol library.
  486. * scmi_handle_put must be balanced with successful scmi_handle_get
  487. *
  488. * Return: pointer to handle if successful, NULL on error
  489. */
  490. struct scmi_handle *scmi_handle_get(struct device *dev)
  491. {
  492. struct list_head *p;
  493. struct scmi_info *info;
  494. struct scmi_handle *handle = NULL;
  495. mutex_lock(&scmi_list_mutex);
  496. list_for_each(p, &scmi_list) {
  497. info = list_entry(p, struct scmi_info, node);
  498. if (dev->parent == info->dev) {
  499. handle = &info->handle;
  500. info->users++;
  501. break;
  502. }
  503. }
  504. mutex_unlock(&scmi_list_mutex);
  505. return handle;
  506. }
  507. /**
  508. * scmi_handle_put() - Release the handle acquired by scmi_handle_get
  509. *
  510. * @handle: handle acquired by scmi_handle_get
  511. *
  512. * NOTE: The function does not track individual clients of the framework
  513. * and is expected to be maintained by caller of SCMI protocol library.
  514. * scmi_handle_put must be balanced with successful scmi_handle_get
  515. *
  516. * Return: 0 is successfully released
  517. * if null was passed, it returns -EINVAL;
  518. */
  519. int scmi_handle_put(const struct scmi_handle *handle)
  520. {
  521. struct scmi_info *info;
  522. if (!handle)
  523. return -EINVAL;
  524. info = handle_to_scmi_info(handle);
  525. mutex_lock(&scmi_list_mutex);
  526. if (!WARN_ON(!info->users))
  527. info->users--;
  528. mutex_unlock(&scmi_list_mutex);
  529. return 0;
  530. }
  531. static const struct scmi_desc scmi_generic_desc = {
  532. .max_rx_timeout_ms = 30, /* we may increase this if required */
  533. .max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
  534. .max_msg_size = 128,
  535. };
  536. /* Each compatible listed below must have descriptor associated with it */
  537. static const struct of_device_id scmi_of_match[] = {
  538. { .compatible = "arm,scmi", .data = &scmi_generic_desc },
  539. { /* Sentinel */ },
  540. };
  541. MODULE_DEVICE_TABLE(of, scmi_of_match);
  542. static int scmi_xfer_info_init(struct scmi_info *sinfo)
  543. {
  544. int i;
  545. struct scmi_xfer *xfer;
  546. struct device *dev = sinfo->dev;
  547. const struct scmi_desc *desc = sinfo->desc;
  548. struct scmi_xfers_info *info = &sinfo->minfo;
  549. /* Pre-allocated messages, no more than what hdr.seq can support */
  550. if (WARN_ON(desc->max_msg >= (MSG_TOKEN_ID_MASK + 1))) {
  551. dev_err(dev, "Maximum message of %d exceeds supported %d\n",
  552. desc->max_msg, MSG_TOKEN_ID_MASK + 1);
  553. return -EINVAL;
  554. }
  555. info->xfer_block = devm_kcalloc(dev, desc->max_msg,
  556. sizeof(*info->xfer_block), GFP_KERNEL);
  557. if (!info->xfer_block)
  558. return -ENOMEM;
  559. info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
  560. sizeof(long), GFP_KERNEL);
  561. if (!info->xfer_alloc_table)
  562. return -ENOMEM;
  563. bitmap_zero(info->xfer_alloc_table, desc->max_msg);
  564. /* Pre-initialize the buffer pointer to pre-allocated buffers */
  565. for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
  566. xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
  567. GFP_KERNEL);
  568. if (!xfer->rx.buf)
  569. return -ENOMEM;
  570. xfer->tx.buf = xfer->rx.buf;
  571. init_completion(&xfer->done);
  572. }
  573. spin_lock_init(&info->xfer_lock);
  574. return 0;
  575. }
  576. static int scmi_mailbox_check(struct device_node *np)
  577. {
  578. struct of_phandle_args arg;
  579. return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
  580. }
  581. static int scmi_mbox_free_channel(int id, void *p, void *data)
  582. {
  583. struct scmi_chan_info *cinfo = p;
  584. struct idr *idr = data;
  585. if (!IS_ERR_OR_NULL(cinfo->chan)) {
  586. mbox_free_channel(cinfo->chan);
  587. cinfo->chan = NULL;
  588. }
  589. idr_remove(idr, id);
  590. return 0;
  591. }
  592. static int scmi_remove(struct platform_device *pdev)
  593. {
  594. int ret = 0;
  595. struct scmi_info *info = platform_get_drvdata(pdev);
  596. struct idr *idr = &info->tx_idr;
  597. mutex_lock(&scmi_list_mutex);
  598. if (info->users)
  599. ret = -EBUSY;
  600. else
  601. list_del(&info->node);
  602. mutex_unlock(&scmi_list_mutex);
  603. if (!ret) {
  604. /* Safe to free channels since no more users */
  605. ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
  606. idr_destroy(&info->tx_idr);
  607. }
  608. return ret;
  609. }
  610. static inline int
  611. scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev, int prot_id)
  612. {
  613. int ret;
  614. struct resource res;
  615. resource_size_t size;
  616. struct device_node *shmem, *np = dev->of_node;
  617. struct scmi_chan_info *cinfo;
  618. struct mbox_client *cl;
  619. if (scmi_mailbox_check(np)) {
  620. cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
  621. goto idr_alloc;
  622. }
  623. cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
  624. if (!cinfo)
  625. return -ENOMEM;
  626. cinfo->dev = dev;
  627. cl = &cinfo->cl;
  628. cl->dev = dev;
  629. cl->rx_callback = scmi_rx_callback;
  630. cl->tx_prepare = scmi_tx_prepare;
  631. cl->tx_block = false;
  632. cl->knows_txdone = true;
  633. shmem = of_parse_phandle(np, "shmem", 0);
  634. ret = of_address_to_resource(shmem, 0, &res);
  635. of_node_put(shmem);
  636. if (ret) {
  637. dev_err(dev, "failed to get SCMI Tx payload mem resource\n");
  638. return ret;
  639. }
  640. size = resource_size(&res);
  641. cinfo->payload = devm_ioremap(info->dev, res.start, size);
  642. if (!cinfo->payload) {
  643. dev_err(dev, "failed to ioremap SCMI Tx payload\n");
  644. return -EADDRNOTAVAIL;
  645. }
  646. /* Transmit channel is first entry i.e. index 0 */
  647. cinfo->chan = mbox_request_channel(cl, 0);
  648. if (IS_ERR(cinfo->chan)) {
  649. ret = PTR_ERR(cinfo->chan);
  650. if (ret != -EPROBE_DEFER)
  651. dev_err(dev, "failed to request SCMI Tx mailbox\n");
  652. return ret;
  653. }
  654. idr_alloc:
  655. ret = idr_alloc(&info->tx_idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
  656. if (ret != prot_id) {
  657. dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
  658. return ret;
  659. }
  660. cinfo->handle = &info->handle;
  661. return 0;
  662. }
  663. static inline void
  664. scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
  665. int prot_id)
  666. {
  667. struct scmi_device *sdev;
  668. sdev = scmi_device_create(np, info->dev, prot_id);
  669. if (!sdev) {
  670. dev_err(info->dev, "failed to create %d protocol device\n",
  671. prot_id);
  672. return;
  673. }
  674. if (scmi_mbox_chan_setup(info, &sdev->dev, prot_id)) {
  675. dev_err(&sdev->dev, "failed to setup transport\n");
  676. scmi_device_destroy(sdev);
  677. }
  678. /* setup handle now as the transport is ready */
  679. scmi_set_handle(sdev);
  680. }
  681. static int scmi_probe(struct platform_device *pdev)
  682. {
  683. int ret;
  684. struct scmi_handle *handle;
  685. const struct scmi_desc *desc;
  686. struct scmi_info *info;
  687. struct device *dev = &pdev->dev;
  688. struct device_node *child, *np = dev->of_node;
  689. /* Only mailbox method supported, check for the presence of one */
  690. if (scmi_mailbox_check(np)) {
  691. dev_err(dev, "no mailbox found in %pOF\n", np);
  692. return -EINVAL;
  693. }
  694. desc = of_match_device(scmi_of_match, dev)->data;
  695. info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
  696. if (!info)
  697. return -ENOMEM;
  698. info->dev = dev;
  699. info->desc = desc;
  700. INIT_LIST_HEAD(&info->node);
  701. ret = scmi_xfer_info_init(info);
  702. if (ret)
  703. return ret;
  704. platform_set_drvdata(pdev, info);
  705. idr_init(&info->tx_idr);
  706. handle = &info->handle;
  707. handle->dev = info->dev;
  708. handle->version = &info->version;
  709. ret = scmi_mbox_chan_setup(info, dev, SCMI_PROTOCOL_BASE);
  710. if (ret)
  711. return ret;
  712. ret = scmi_base_protocol_init(handle);
  713. if (ret) {
  714. dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
  715. return ret;
  716. }
  717. mutex_lock(&scmi_list_mutex);
  718. list_add_tail(&info->node, &scmi_list);
  719. mutex_unlock(&scmi_list_mutex);
  720. for_each_available_child_of_node(np, child) {
  721. u32 prot_id;
  722. if (of_property_read_u32(child, "reg", &prot_id))
  723. continue;
  724. prot_id &= MSG_PROTOCOL_ID_MASK;
  725. if (!scmi_is_protocol_implemented(handle, prot_id)) {
  726. dev_err(dev, "SCMI protocol %d not implemented\n",
  727. prot_id);
  728. continue;
  729. }
  730. scmi_create_protocol_device(child, info, prot_id);
  731. }
  732. return 0;
  733. }
  734. static struct platform_driver scmi_driver = {
  735. .driver = {
  736. .name = "arm-scmi",
  737. .of_match_table = scmi_of_match,
  738. },
  739. .probe = scmi_probe,
  740. .remove = scmi_remove,
  741. };
  742. module_platform_driver(scmi_driver);
  743. MODULE_ALIAS("platform: arm-scmi");
  744. MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
  745. MODULE_DESCRIPTION("ARM SCMI protocol driver");
  746. MODULE_LICENSE("GPL v2");