bman.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
  2. *
  3. * Redistribution and use in source and binary forms, with or without
  4. * modification, are permitted provided that the following conditions are met:
  5. * * Redistributions of source code must retain the above copyright
  6. * notice, this list of conditions and the following disclaimer.
  7. * * Redistributions in binary form must reproduce the above copyright
  8. * notice, this list of conditions and the following disclaimer in the
  9. * documentation and/or other materials provided with the distribution.
  10. * * Neither the name of Freescale Semiconductor nor the
  11. * names of its contributors may be used to endorse or promote products
  12. * derived from this software without specific prior written permission.
  13. *
  14. * ALTERNATIVELY, this software may be distributed under the terms of the
  15. * GNU General Public License ("GPL") as published by the Free Software
  16. * Foundation, either version 2 of that License or (at your option) any
  17. * later version.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  20. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  21. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  22. * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  23. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  24. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  25. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  26. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  28. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. */
  30. #include "bman_priv.h"
  31. #define IRQNAME "BMan portal %d"
  32. #define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
  33. /* Portal register assists */
  34. #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
  35. /* Cache-inhibited register offsets */
  36. #define BM_REG_RCR_PI_CINH 0x3000
  37. #define BM_REG_RCR_CI_CINH 0x3100
  38. #define BM_REG_RCR_ITR 0x3200
  39. #define BM_REG_CFG 0x3300
  40. #define BM_REG_SCN(n) (0x3400 + ((n) << 6))
  41. #define BM_REG_ISR 0x3e00
  42. #define BM_REG_IER 0x3e40
  43. #define BM_REG_ISDR 0x3e80
  44. #define BM_REG_IIR 0x3ec0
  45. /* Cache-enabled register offsets */
  46. #define BM_CL_CR 0x0000
  47. #define BM_CL_RR0 0x0100
  48. #define BM_CL_RR1 0x0140
  49. #define BM_CL_RCR 0x1000
  50. #define BM_CL_RCR_PI_CENA 0x3000
  51. #define BM_CL_RCR_CI_CENA 0x3100
  52. #else
  53. /* Cache-inhibited register offsets */
  54. #define BM_REG_RCR_PI_CINH 0x0000
  55. #define BM_REG_RCR_CI_CINH 0x0004
  56. #define BM_REG_RCR_ITR 0x0008
  57. #define BM_REG_CFG 0x0100
  58. #define BM_REG_SCN(n) (0x0200 + ((n) << 2))
  59. #define BM_REG_ISR 0x0e00
  60. #define BM_REG_IER 0x0e04
  61. #define BM_REG_ISDR 0x0e08
  62. #define BM_REG_IIR 0x0e0c
  63. /* Cache-enabled register offsets */
  64. #define BM_CL_CR 0x0000
  65. #define BM_CL_RR0 0x0100
  66. #define BM_CL_RR1 0x0140
  67. #define BM_CL_RCR 0x1000
  68. #define BM_CL_RCR_PI_CENA 0x3000
  69. #define BM_CL_RCR_CI_CENA 0x3100
  70. #endif
  71. /*
  72. * Portal modes.
  73. * Enum types;
  74. * pmode == production mode
  75. * cmode == consumption mode,
  76. * Enum values use 3 letter codes. First letter matches the portal mode,
  77. * remaining two letters indicate;
  78. * ci == cache-inhibited portal register
  79. * ce == cache-enabled portal register
  80. * vb == in-band valid-bit (cache-enabled)
  81. */
  82. enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
  83. bm_rcr_pci = 0, /* PI index, cache-inhibited */
  84. bm_rcr_pce = 1, /* PI index, cache-enabled */
  85. bm_rcr_pvb = 2 /* valid-bit */
  86. };
  87. enum bm_rcr_cmode { /* s/w-only */
  88. bm_rcr_cci, /* CI index, cache-inhibited */
  89. bm_rcr_cce /* CI index, cache-enabled */
  90. };
  91. /* --- Portal structures --- */
  92. #define BM_RCR_SIZE 8
  93. /* Release Command */
  94. struct bm_rcr_entry {
  95. union {
  96. struct {
  97. u8 _ncw_verb; /* writes to this are non-coherent */
  98. u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
  99. u8 __reserved1[62];
  100. };
  101. struct bm_buffer bufs[8];
  102. };
  103. };
  104. #define BM_RCR_VERB_VBIT 0x80
  105. #define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
  106. #define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
  107. #define BM_RCR_VERB_CMD_BPID_MULTI 0x30
  108. #define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
  109. struct bm_rcr {
  110. struct bm_rcr_entry *ring, *cursor;
  111. u8 ci, available, ithresh, vbit;
  112. #ifdef CONFIG_FSL_DPAA_CHECKING
  113. u32 busy;
  114. enum bm_rcr_pmode pmode;
  115. enum bm_rcr_cmode cmode;
  116. #endif
  117. };
  118. /* MC (Management Command) command */
  119. struct bm_mc_command {
  120. u8 _ncw_verb; /* writes to this are non-coherent */
  121. u8 bpid; /* used by acquire command */
  122. u8 __reserved[62];
  123. };
  124. #define BM_MCC_VERB_VBIT 0x80
  125. #define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
  126. #define BM_MCC_VERB_CMD_ACQUIRE 0x10
  127. #define BM_MCC_VERB_CMD_QUERY 0x40
  128. #define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
  129. /* MC result, Acquire and Query Response */
  130. union bm_mc_result {
  131. struct {
  132. u8 verb;
  133. u8 bpid;
  134. u8 __reserved[62];
  135. };
  136. struct bm_buffer bufs[8];
  137. };
  138. #define BM_MCR_VERB_VBIT 0x80
  139. #define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
  140. #define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
  141. #define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
  142. #define BM_MCR_VERB_CMD_ERR_INVALID 0x60
  143. #define BM_MCR_VERB_CMD_ERR_ECC 0x70
  144. #define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
  145. #define BM_MCR_TIMEOUT 10000 /* us */
  146. struct bm_mc {
  147. struct bm_mc_command *cr;
  148. union bm_mc_result *rr;
  149. u8 rridx, vbit;
  150. #ifdef CONFIG_FSL_DPAA_CHECKING
  151. enum {
  152. /* Can only be _mc_start()ed */
  153. mc_idle,
  154. /* Can only be _mc_commit()ed or _mc_abort()ed */
  155. mc_user,
  156. /* Can only be _mc_retry()ed */
  157. mc_hw
  158. } state;
  159. #endif
  160. };
  161. struct bm_addr {
  162. void *ce; /* cache-enabled */
  163. __be32 *ce_be; /* Same as above but for direct access */
  164. void __iomem *ci; /* cache-inhibited */
  165. };
  166. struct bm_portal {
  167. struct bm_addr addr;
  168. struct bm_rcr rcr;
  169. struct bm_mc mc;
  170. } ____cacheline_aligned;
  171. /* Cache-inhibited register access. */
  172. static inline u32 bm_in(struct bm_portal *p, u32 offset)
  173. {
  174. return ioread32be(p->addr.ci + offset);
  175. }
  176. static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
  177. {
  178. iowrite32be(val, p->addr.ci + offset);
  179. }
  180. /* Cache Enabled Portal Access */
  181. static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
  182. {
  183. dpaa_invalidate(p->addr.ce + offset);
  184. }
  185. static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
  186. {
  187. dpaa_touch_ro(p->addr.ce + offset);
  188. }
  189. static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
  190. {
  191. return be32_to_cpu(*(p->addr.ce_be + (offset/4)));
  192. }
  193. struct bman_portal {
  194. struct bm_portal p;
  195. /* interrupt sources processed by portal_isr(), configurable */
  196. unsigned long irq_sources;
  197. /* probing time config params for cpu-affine portals */
  198. const struct bm_portal_config *config;
  199. char irqname[MAX_IRQNAME];
  200. };
  201. static cpumask_t affine_mask;
  202. static DEFINE_SPINLOCK(affine_mask_lock);
  203. static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
  204. static inline struct bman_portal *get_affine_portal(void)
  205. {
  206. return &get_cpu_var(bman_affine_portal);
  207. }
  208. static inline void put_affine_portal(void)
  209. {
  210. put_cpu_var(bman_affine_portal);
  211. }
  212. /*
  213. * This object type refers to a pool, it isn't *the* pool. There may be
  214. * more than one such object per BMan buffer pool, eg. if different users of the
  215. * pool are operating via different portals.
  216. */
  217. struct bman_pool {
  218. /* index of the buffer pool to encapsulate (0-63) */
  219. u32 bpid;
  220. /* Used for hash-table admin when using depletion notifications. */
  221. struct bman_portal *portal;
  222. struct bman_pool *next;
  223. };
  224. static u32 poll_portal_slow(struct bman_portal *p, u32 is);
  225. static irqreturn_t portal_isr(int irq, void *ptr)
  226. {
  227. struct bman_portal *p = ptr;
  228. struct bm_portal *portal = &p->p;
  229. u32 clear = p->irq_sources;
  230. u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
  231. if (unlikely(!is))
  232. return IRQ_NONE;
  233. clear |= poll_portal_slow(p, is);
  234. bm_out(portal, BM_REG_ISR, clear);
  235. return IRQ_HANDLED;
  236. }
  237. /* --- RCR API --- */
  238. #define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
  239. #define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
  240. /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
  241. static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
  242. {
  243. uintptr_t addr = (uintptr_t)p;
  244. addr &= ~RCR_CARRY;
  245. return (struct bm_rcr_entry *)addr;
  246. }
  247. #ifdef CONFIG_FSL_DPAA_CHECKING
  248. /* Bit-wise logic to convert a ring pointer to a ring index */
  249. static int rcr_ptr2idx(struct bm_rcr_entry *e)
  250. {
  251. return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
  252. }
  253. #endif
  254. /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
  255. static inline void rcr_inc(struct bm_rcr *rcr)
  256. {
  257. /* increment to the next RCR pointer and handle overflow and 'vbit' */
  258. struct bm_rcr_entry *partial = rcr->cursor + 1;
  259. rcr->cursor = rcr_carryclear(partial);
  260. if (partial != rcr->cursor)
  261. rcr->vbit ^= BM_RCR_VERB_VBIT;
  262. }
  263. static int bm_rcr_get_avail(struct bm_portal *portal)
  264. {
  265. struct bm_rcr *rcr = &portal->rcr;
  266. return rcr->available;
  267. }
  268. static int bm_rcr_get_fill(struct bm_portal *portal)
  269. {
  270. struct bm_rcr *rcr = &portal->rcr;
  271. return BM_RCR_SIZE - 1 - rcr->available;
  272. }
  273. static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
  274. {
  275. struct bm_rcr *rcr = &portal->rcr;
  276. rcr->ithresh = ithresh;
  277. bm_out(portal, BM_REG_RCR_ITR, ithresh);
  278. }
  279. static void bm_rcr_cce_prefetch(struct bm_portal *portal)
  280. {
  281. __maybe_unused struct bm_rcr *rcr = &portal->rcr;
  282. DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
  283. bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
  284. }
  285. static u8 bm_rcr_cce_update(struct bm_portal *portal)
  286. {
  287. struct bm_rcr *rcr = &portal->rcr;
  288. u8 diff, old_ci = rcr->ci;
  289. DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
  290. rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
  291. bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
  292. diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
  293. rcr->available += diff;
  294. return diff;
  295. }
  296. static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
  297. {
  298. struct bm_rcr *rcr = &portal->rcr;
  299. DPAA_ASSERT(!rcr->busy);
  300. if (!rcr->available)
  301. return NULL;
  302. #ifdef CONFIG_FSL_DPAA_CHECKING
  303. rcr->busy = 1;
  304. #endif
  305. dpaa_zero(rcr->cursor);
  306. return rcr->cursor;
  307. }
  308. static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
  309. {
  310. struct bm_rcr *rcr = &portal->rcr;
  311. struct bm_rcr_entry *rcursor;
  312. DPAA_ASSERT(rcr->busy);
  313. DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
  314. DPAA_ASSERT(rcr->available >= 1);
  315. dma_wmb();
  316. rcursor = rcr->cursor;
  317. rcursor->_ncw_verb = myverb | rcr->vbit;
  318. dpaa_flush(rcursor);
  319. rcr_inc(rcr);
  320. rcr->available--;
  321. #ifdef CONFIG_FSL_DPAA_CHECKING
  322. rcr->busy = 0;
  323. #endif
  324. }
  325. static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
  326. enum bm_rcr_cmode cmode)
  327. {
  328. struct bm_rcr *rcr = &portal->rcr;
  329. u32 cfg;
  330. u8 pi;
  331. rcr->ring = portal->addr.ce + BM_CL_RCR;
  332. rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
  333. pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
  334. rcr->cursor = rcr->ring + pi;
  335. rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
  336. BM_RCR_VERB_VBIT : 0;
  337. rcr->available = BM_RCR_SIZE - 1
  338. - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
  339. rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
  340. #ifdef CONFIG_FSL_DPAA_CHECKING
  341. rcr->busy = 0;
  342. rcr->pmode = pmode;
  343. rcr->cmode = cmode;
  344. #endif
  345. cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
  346. | (pmode & 0x3); /* BCSP_CFG::RPM */
  347. bm_out(portal, BM_REG_CFG, cfg);
  348. return 0;
  349. }
  350. static void bm_rcr_finish(struct bm_portal *portal)
  351. {
  352. #ifdef CONFIG_FSL_DPAA_CHECKING
  353. struct bm_rcr *rcr = &portal->rcr;
  354. int i;
  355. DPAA_ASSERT(!rcr->busy);
  356. i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
  357. if (i != rcr_ptr2idx(rcr->cursor))
  358. pr_crit("losing uncommitted RCR entries\n");
  359. i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
  360. if (i != rcr->ci)
  361. pr_crit("missing existing RCR completions\n");
  362. if (rcr->ci != rcr_ptr2idx(rcr->cursor))
  363. pr_crit("RCR destroyed unquiesced\n");
  364. #endif
  365. }
  366. /* --- Management command API --- */
  367. static int bm_mc_init(struct bm_portal *portal)
  368. {
  369. struct bm_mc *mc = &portal->mc;
  370. mc->cr = portal->addr.ce + BM_CL_CR;
  371. mc->rr = portal->addr.ce + BM_CL_RR0;
  372. mc->rridx = (mc->cr->_ncw_verb & BM_MCC_VERB_VBIT) ?
  373. 0 : 1;
  374. mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
  375. #ifdef CONFIG_FSL_DPAA_CHECKING
  376. mc->state = mc_idle;
  377. #endif
  378. return 0;
  379. }
  380. static void bm_mc_finish(struct bm_portal *portal)
  381. {
  382. #ifdef CONFIG_FSL_DPAA_CHECKING
  383. struct bm_mc *mc = &portal->mc;
  384. DPAA_ASSERT(mc->state == mc_idle);
  385. if (mc->state != mc_idle)
  386. pr_crit("Losing incomplete MC command\n");
  387. #endif
  388. }
  389. static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
  390. {
  391. struct bm_mc *mc = &portal->mc;
  392. DPAA_ASSERT(mc->state == mc_idle);
  393. #ifdef CONFIG_FSL_DPAA_CHECKING
  394. mc->state = mc_user;
  395. #endif
  396. dpaa_zero(mc->cr);
  397. return mc->cr;
  398. }
  399. static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
  400. {
  401. struct bm_mc *mc = &portal->mc;
  402. union bm_mc_result *rr = mc->rr + mc->rridx;
  403. DPAA_ASSERT(mc->state == mc_user);
  404. dma_wmb();
  405. mc->cr->_ncw_verb = myverb | mc->vbit;
  406. dpaa_flush(mc->cr);
  407. dpaa_invalidate_touch_ro(rr);
  408. #ifdef CONFIG_FSL_DPAA_CHECKING
  409. mc->state = mc_hw;
  410. #endif
  411. }
  412. static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
  413. {
  414. struct bm_mc *mc = &portal->mc;
  415. union bm_mc_result *rr = mc->rr + mc->rridx;
  416. DPAA_ASSERT(mc->state == mc_hw);
  417. /*
  418. * The inactive response register's verb byte always returns zero until
  419. * its command is submitted and completed. This includes the valid-bit,
  420. * in case you were wondering...
  421. */
  422. if (!rr->verb) {
  423. dpaa_invalidate_touch_ro(rr);
  424. return NULL;
  425. }
  426. mc->rridx ^= 1;
  427. mc->vbit ^= BM_MCC_VERB_VBIT;
  428. #ifdef CONFIG_FSL_DPAA_CHECKING
  429. mc->state = mc_idle;
  430. #endif
  431. return rr;
  432. }
  433. static inline int bm_mc_result_timeout(struct bm_portal *portal,
  434. union bm_mc_result **mcr)
  435. {
  436. int timeout = BM_MCR_TIMEOUT;
  437. do {
  438. *mcr = bm_mc_result(portal);
  439. if (*mcr)
  440. break;
  441. udelay(1);
  442. } while (--timeout);
  443. return timeout;
  444. }
  445. /* Disable all BSCN interrupts for the portal */
  446. static void bm_isr_bscn_disable(struct bm_portal *portal)
  447. {
  448. bm_out(portal, BM_REG_SCN(0), 0);
  449. bm_out(portal, BM_REG_SCN(1), 0);
  450. }
  451. static int bman_create_portal(struct bman_portal *portal,
  452. const struct bm_portal_config *c)
  453. {
  454. struct bm_portal *p;
  455. int ret;
  456. p = &portal->p;
  457. /*
  458. * prep the low-level portal struct with the mapped addresses from the
  459. * config, everything that follows depends on it and "config" is more
  460. * for (de)reference...
  461. */
  462. p->addr.ce = c->addr_virt_ce;
  463. p->addr.ce_be = c->addr_virt_ce;
  464. p->addr.ci = c->addr_virt_ci;
  465. if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
  466. dev_err(c->dev, "RCR initialisation failed\n");
  467. goto fail_rcr;
  468. }
  469. if (bm_mc_init(p)) {
  470. dev_err(c->dev, "MC initialisation failed\n");
  471. goto fail_mc;
  472. }
  473. /*
  474. * Default to all BPIDs disabled, we enable as required at
  475. * run-time.
  476. */
  477. bm_isr_bscn_disable(p);
  478. /* Write-to-clear any stale interrupt status bits */
  479. bm_out(p, BM_REG_ISDR, 0xffffffff);
  480. portal->irq_sources = 0;
  481. bm_out(p, BM_REG_IER, 0);
  482. bm_out(p, BM_REG_ISR, 0xffffffff);
  483. snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
  484. if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
  485. dev_err(c->dev, "request_irq() failed\n");
  486. goto fail_irq;
  487. }
  488. if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu))
  489. goto fail_affinity;
  490. /* Need RCR to be empty before continuing */
  491. ret = bm_rcr_get_fill(p);
  492. if (ret) {
  493. dev_err(c->dev, "RCR unclean\n");
  494. goto fail_rcr_empty;
  495. }
  496. /* Success */
  497. portal->config = c;
  498. bm_out(p, BM_REG_ISDR, 0);
  499. bm_out(p, BM_REG_IIR, 0);
  500. return 0;
  501. fail_rcr_empty:
  502. fail_affinity:
  503. free_irq(c->irq, portal);
  504. fail_irq:
  505. bm_mc_finish(p);
  506. fail_mc:
  507. bm_rcr_finish(p);
  508. fail_rcr:
  509. return -EIO;
  510. }
  511. struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
  512. {
  513. struct bman_portal *portal;
  514. int err;
  515. portal = &per_cpu(bman_affine_portal, c->cpu);
  516. err = bman_create_portal(portal, c);
  517. if (err)
  518. return NULL;
  519. spin_lock(&affine_mask_lock);
  520. cpumask_set_cpu(c->cpu, &affine_mask);
  521. spin_unlock(&affine_mask_lock);
  522. return portal;
  523. }
  524. static u32 poll_portal_slow(struct bman_portal *p, u32 is)
  525. {
  526. u32 ret = is;
  527. if (is & BM_PIRQ_RCRI) {
  528. bm_rcr_cce_update(&p->p);
  529. bm_rcr_set_ithresh(&p->p, 0);
  530. bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
  531. is &= ~BM_PIRQ_RCRI;
  532. }
  533. /* There should be no status register bits left undefined */
  534. DPAA_ASSERT(!is);
  535. return ret;
  536. }
  537. int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
  538. {
  539. unsigned long irqflags;
  540. local_irq_save(irqflags);
  541. p->irq_sources |= bits & BM_PIRQ_VISIBLE;
  542. bm_out(&p->p, BM_REG_IER, p->irq_sources);
  543. local_irq_restore(irqflags);
  544. return 0;
  545. }
  546. static int bm_shutdown_pool(u32 bpid)
  547. {
  548. struct bm_mc_command *bm_cmd;
  549. union bm_mc_result *bm_res;
  550. while (1) {
  551. struct bman_portal *p = get_affine_portal();
  552. /* Acquire buffers until empty */
  553. bm_cmd = bm_mc_start(&p->p);
  554. bm_cmd->bpid = bpid;
  555. bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
  556. if (!bm_mc_result_timeout(&p->p, &bm_res)) {
  557. put_affine_portal();
  558. pr_crit("BMan Acquire Command timedout\n");
  559. return -ETIMEDOUT;
  560. }
  561. if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
  562. put_affine_portal();
  563. /* Pool is empty */
  564. return 0;
  565. }
  566. put_affine_portal();
  567. }
  568. return 0;
  569. }
  570. struct gen_pool *bm_bpalloc;
  571. static int bm_alloc_bpid_range(u32 *result, u32 count)
  572. {
  573. unsigned long addr;
  574. addr = gen_pool_alloc(bm_bpalloc, count);
  575. if (!addr)
  576. return -ENOMEM;
  577. *result = addr & ~DPAA_GENALLOC_OFF;
  578. return 0;
  579. }
  580. static int bm_release_bpid(u32 bpid)
  581. {
  582. int ret;
  583. ret = bm_shutdown_pool(bpid);
  584. if (ret) {
  585. pr_debug("BPID %d leaked\n", bpid);
  586. return ret;
  587. }
  588. gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
  589. return 0;
  590. }
  591. struct bman_pool *bman_new_pool(void)
  592. {
  593. struct bman_pool *pool = NULL;
  594. u32 bpid;
  595. if (bm_alloc_bpid_range(&bpid, 1))
  596. return NULL;
  597. pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  598. if (!pool)
  599. goto err;
  600. pool->bpid = bpid;
  601. return pool;
  602. err:
  603. bm_release_bpid(bpid);
  604. kfree(pool);
  605. return NULL;
  606. }
  607. EXPORT_SYMBOL(bman_new_pool);
  608. void bman_free_pool(struct bman_pool *pool)
  609. {
  610. bm_release_bpid(pool->bpid);
  611. kfree(pool);
  612. }
  613. EXPORT_SYMBOL(bman_free_pool);
  614. int bman_get_bpid(const struct bman_pool *pool)
  615. {
  616. return pool->bpid;
  617. }
  618. EXPORT_SYMBOL(bman_get_bpid);
  619. static void update_rcr_ci(struct bman_portal *p, int avail)
  620. {
  621. if (avail)
  622. bm_rcr_cce_prefetch(&p->p);
  623. else
  624. bm_rcr_cce_update(&p->p);
  625. }
  626. int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
  627. {
  628. struct bman_portal *p;
  629. struct bm_rcr_entry *r;
  630. unsigned long irqflags;
  631. int avail, timeout = 1000; /* 1ms */
  632. int i = num - 1;
  633. DPAA_ASSERT(num > 0 && num <= 8);
  634. do {
  635. p = get_affine_portal();
  636. local_irq_save(irqflags);
  637. avail = bm_rcr_get_avail(&p->p);
  638. if (avail < 2)
  639. update_rcr_ci(p, avail);
  640. r = bm_rcr_start(&p->p);
  641. local_irq_restore(irqflags);
  642. put_affine_portal();
  643. if (likely(r))
  644. break;
  645. udelay(1);
  646. } while (--timeout);
  647. if (unlikely(!timeout))
  648. return -ETIMEDOUT;
  649. p = get_affine_portal();
  650. local_irq_save(irqflags);
  651. /*
  652. * we can copy all but the first entry, as this can trigger badness
  653. * with the valid-bit
  654. */
  655. bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
  656. bm_buffer_set_bpid(r->bufs, pool->bpid);
  657. if (i)
  658. memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
  659. bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
  660. (num & BM_RCR_VERB_BUFCOUNT_MASK));
  661. local_irq_restore(irqflags);
  662. put_affine_portal();
  663. return 0;
  664. }
  665. EXPORT_SYMBOL(bman_release);
  666. int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
  667. {
  668. struct bman_portal *p = get_affine_portal();
  669. struct bm_mc_command *mcc;
  670. union bm_mc_result *mcr;
  671. int ret;
  672. DPAA_ASSERT(num > 0 && num <= 8);
  673. mcc = bm_mc_start(&p->p);
  674. mcc->bpid = pool->bpid;
  675. bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
  676. (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
  677. if (!bm_mc_result_timeout(&p->p, &mcr)) {
  678. put_affine_portal();
  679. pr_crit("BMan Acquire Timeout\n");
  680. return -ETIMEDOUT;
  681. }
  682. ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
  683. if (bufs)
  684. memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
  685. put_affine_portal();
  686. if (ret != num)
  687. ret = -ENOMEM;
  688. return ret;
  689. }
  690. EXPORT_SYMBOL(bman_acquire);
  691. const struct bm_portal_config *
  692. bman_get_bm_portal_config(const struct bman_portal *portal)
  693. {
  694. return portal->config;
  695. }