vpe-mt.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
  7. * Copyright (C) 2013 Imagination Technologies Ltd.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/device.h>
  11. #include <linux/fs.h>
  12. #include <linux/slab.h>
  13. #include <linux/export.h>
  14. #include <asm/mipsregs.h>
  15. #include <asm/mipsmtregs.h>
  16. #include <asm/mips_mt.h>
  17. #include <asm/vpe.h>
  18. static int major;
  19. /* The number of TCs and VPEs physically available on the core */
  20. static int hw_tcs, hw_vpes;
  21. /* We are prepared so configure and start the VPE... */
  22. int vpe_run(struct vpe *v)
  23. {
  24. unsigned long flags, val, dmt_flag;
  25. struct vpe_notifications *notifier;
  26. unsigned int vpeflags;
  27. struct tc *t;
  28. /* check we are the Master VPE */
  29. local_irq_save(flags);
  30. val = read_c0_vpeconf0();
  31. if (!(val & VPECONF0_MVP)) {
  32. pr_warn("VPE loader: only Master VPE's are able to config MT\n");
  33. local_irq_restore(flags);
  34. return -1;
  35. }
  36. dmt_flag = dmt();
  37. vpeflags = dvpe();
  38. if (list_empty(&v->tc)) {
  39. evpe(vpeflags);
  40. emt(dmt_flag);
  41. local_irq_restore(flags);
  42. pr_warn("VPE loader: No TC's associated with VPE %d\n",
  43. v->minor);
  44. return -ENOEXEC;
  45. }
  46. t = list_first_entry(&v->tc, struct tc, tc);
  47. /* Put MVPE's into 'configuration state' */
  48. set_c0_mvpcontrol(MVPCONTROL_VPC);
  49. settc(t->index);
  50. /* should check it is halted, and not activated */
  51. if ((read_tc_c0_tcstatus() & TCSTATUS_A) ||
  52. !(read_tc_c0_tchalt() & TCHALT_H)) {
  53. evpe(vpeflags);
  54. emt(dmt_flag);
  55. local_irq_restore(flags);
  56. pr_warn("VPE loader: TC %d is already active!\n",
  57. t->index);
  58. return -ENOEXEC;
  59. }
  60. /*
  61. * Write the address we want it to start running from in the TCPC
  62. * register.
  63. */
  64. write_tc_c0_tcrestart((unsigned long)v->__start);
  65. write_tc_c0_tccontext((unsigned long)0);
  66. /*
  67. * Mark the TC as activated, not interrupt exempt and not dynamically
  68. * allocatable
  69. */
  70. val = read_tc_c0_tcstatus();
  71. val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
  72. write_tc_c0_tcstatus(val);
  73. write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
  74. /*
  75. * The sde-kit passes 'memsize' to __start in $a3, so set something
  76. * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
  77. * DFLT_HEAP_SIZE when you compile your program
  78. */
  79. mttgpr(6, v->ntcs);
  80. mttgpr(7, physical_memsize);
  81. /* set up VPE1 */
  82. /*
  83. * bind the TC to VPE 1 as late as possible so we only have the final
  84. * VPE registers to set up, and so an EJTAG probe can trigger on it
  85. */
  86. write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
  87. write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
  88. back_to_back_c0_hazard();
  89. /* Set up the XTC bit in vpeconf0 to point at our tc */
  90. write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
  91. | (t->index << VPECONF0_XTC_SHIFT));
  92. back_to_back_c0_hazard();
  93. /* enable this VPE */
  94. write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
  95. /* clear out any left overs from a previous program */
  96. write_vpe_c0_status(0);
  97. write_vpe_c0_cause(0);
  98. /* take system out of configuration state */
  99. clear_c0_mvpcontrol(MVPCONTROL_VPC);
  100. /*
  101. * SMTC/SMVP kernels manage VPE enable independently,
  102. * but uniprocessor kernels need to turn it on, even
  103. * if that wasn't the pre-dvpe() state.
  104. */
  105. #ifdef CONFIG_SMP
  106. evpe(vpeflags);
  107. #else
  108. evpe(EVPE_ENABLE);
  109. #endif
  110. emt(dmt_flag);
  111. local_irq_restore(flags);
  112. list_for_each_entry(notifier, &v->notify, list)
  113. notifier->start(VPE_MODULE_MINOR);
  114. return 0;
  115. }
  116. void cleanup_tc(struct tc *tc)
  117. {
  118. unsigned long flags;
  119. unsigned int mtflags, vpflags;
  120. int tmp;
  121. local_irq_save(flags);
  122. mtflags = dmt();
  123. vpflags = dvpe();
  124. /* Put MVPE's into 'configuration state' */
  125. set_c0_mvpcontrol(MVPCONTROL_VPC);
  126. settc(tc->index);
  127. tmp = read_tc_c0_tcstatus();
  128. /* mark not allocated and not dynamically allocatable */
  129. tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
  130. tmp |= TCSTATUS_IXMT; /* interrupt exempt */
  131. write_tc_c0_tcstatus(tmp);
  132. write_tc_c0_tchalt(TCHALT_H);
  133. mips_ihb();
  134. clear_c0_mvpcontrol(MVPCONTROL_VPC);
  135. evpe(vpflags);
  136. emt(mtflags);
  137. local_irq_restore(flags);
  138. }
  139. /* module wrapper entry points */
  140. /* give me a vpe */
  141. void *vpe_alloc(void)
  142. {
  143. int i;
  144. struct vpe *v;
  145. /* find a vpe */
  146. for (i = 1; i < MAX_VPES; i++) {
  147. v = get_vpe(i);
  148. if (v != NULL) {
  149. v->state = VPE_STATE_INUSE;
  150. return v;
  151. }
  152. }
  153. return NULL;
  154. }
  155. EXPORT_SYMBOL(vpe_alloc);
  156. /* start running from here */
  157. int vpe_start(void *vpe, unsigned long start)
  158. {
  159. struct vpe *v = vpe;
  160. v->__start = start;
  161. return vpe_run(v);
  162. }
  163. EXPORT_SYMBOL(vpe_start);
  164. /* halt it for now */
  165. int vpe_stop(void *vpe)
  166. {
  167. struct vpe *v = vpe;
  168. struct tc *t;
  169. unsigned int evpe_flags;
  170. evpe_flags = dvpe();
  171. t = list_entry(v->tc.next, struct tc, tc);
  172. if (t != NULL) {
  173. settc(t->index);
  174. write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
  175. }
  176. evpe(evpe_flags);
  177. return 0;
  178. }
  179. EXPORT_SYMBOL(vpe_stop);
  180. /* I've done with it thank you */
  181. int vpe_free(void *vpe)
  182. {
  183. struct vpe *v = vpe;
  184. struct tc *t;
  185. unsigned int evpe_flags;
  186. t = list_entry(v->tc.next, struct tc, tc);
  187. if (t == NULL)
  188. return -ENOEXEC;
  189. evpe_flags = dvpe();
  190. /* Put MVPE's into 'configuration state' */
  191. set_c0_mvpcontrol(MVPCONTROL_VPC);
  192. settc(t->index);
  193. write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
  194. /* halt the TC */
  195. write_tc_c0_tchalt(TCHALT_H);
  196. mips_ihb();
  197. /* mark the TC unallocated */
  198. write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
  199. v->state = VPE_STATE_UNUSED;
  200. clear_c0_mvpcontrol(MVPCONTROL_VPC);
  201. evpe(evpe_flags);
  202. return 0;
  203. }
  204. EXPORT_SYMBOL(vpe_free);
  205. static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
  206. const char *buf, size_t len)
  207. {
  208. struct vpe *vpe = get_vpe(aprp_cpu_index());
  209. struct vpe_notifications *notifier;
  210. list_for_each_entry(notifier, &vpe->notify, list)
  211. notifier->stop(aprp_cpu_index());
  212. release_progmem(vpe->load_addr);
  213. cleanup_tc(get_tc(aprp_cpu_index()));
  214. vpe_stop(vpe);
  215. vpe_free(vpe);
  216. return len;
  217. }
  218. static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
  219. static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
  220. char *buf)
  221. {
  222. struct vpe *vpe = get_vpe(aprp_cpu_index());
  223. return sprintf(buf, "%d\n", vpe->ntcs);
  224. }
  225. static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
  226. const char *buf, size_t len)
  227. {
  228. struct vpe *vpe = get_vpe(aprp_cpu_index());
  229. unsigned long new;
  230. int ret;
  231. ret = kstrtoul(buf, 0, &new);
  232. if (ret < 0)
  233. return ret;
  234. if (new == 0 || new > (hw_tcs - aprp_cpu_index()))
  235. return -EINVAL;
  236. vpe->ntcs = new;
  237. return len;
  238. }
  239. static DEVICE_ATTR_RW(ntcs);
  240. static struct attribute *vpe_attrs[] = {
  241. &dev_attr_kill.attr,
  242. &dev_attr_ntcs.attr,
  243. NULL,
  244. };
  245. ATTRIBUTE_GROUPS(vpe);
  246. static void vpe_device_release(struct device *cd)
  247. {
  248. kfree(cd);
  249. }
  250. static struct class vpe_class = {
  251. .name = "vpe",
  252. .owner = THIS_MODULE,
  253. .dev_release = vpe_device_release,
  254. .dev_groups = vpe_groups,
  255. };
  256. static struct device vpe_device;
  257. int __init vpe_module_init(void)
  258. {
  259. unsigned int mtflags, vpflags;
  260. unsigned long flags, val;
  261. struct vpe *v = NULL;
  262. struct tc *t;
  263. int tc, err;
  264. if (!cpu_has_mipsmt) {
  265. pr_warn("VPE loader: not a MIPS MT capable processor\n");
  266. return -ENODEV;
  267. }
  268. if (vpelimit == 0) {
  269. pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
  270. "Pass maxvpes=<n> argument as kernel argument\n");
  271. return -ENODEV;
  272. }
  273. if (aprp_cpu_index() == 0) {
  274. pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n"
  275. "Pass maxtcs=<n> argument as kernel argument\n");
  276. return -ENODEV;
  277. }
  278. major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
  279. if (major < 0) {
  280. pr_warn("VPE loader: unable to register character device\n");
  281. return major;
  282. }
  283. err = class_register(&vpe_class);
  284. if (err) {
  285. pr_err("vpe_class registration failed\n");
  286. goto out_chrdev;
  287. }
  288. device_initialize(&vpe_device);
  289. vpe_device.class = &vpe_class,
  290. vpe_device.parent = NULL,
  291. dev_set_name(&vpe_device, "vpe1");
  292. vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
  293. err = device_add(&vpe_device);
  294. if (err) {
  295. pr_err("Adding vpe_device failed\n");
  296. goto out_class;
  297. }
  298. local_irq_save(flags);
  299. mtflags = dmt();
  300. vpflags = dvpe();
  301. /* Put MVPE's into 'configuration state' */
  302. set_c0_mvpcontrol(MVPCONTROL_VPC);
  303. val = read_c0_mvpconf0();
  304. hw_tcs = (val & MVPCONF0_PTC) + 1;
  305. hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
  306. for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) {
  307. /*
  308. * Must re-enable multithreading temporarily or in case we
  309. * reschedule send IPIs or similar we might hang.
  310. */
  311. clear_c0_mvpcontrol(MVPCONTROL_VPC);
  312. evpe(vpflags);
  313. emt(mtflags);
  314. local_irq_restore(flags);
  315. t = alloc_tc(tc);
  316. if (!t) {
  317. err = -ENOMEM;
  318. goto out_dev;
  319. }
  320. local_irq_save(flags);
  321. mtflags = dmt();
  322. vpflags = dvpe();
  323. set_c0_mvpcontrol(MVPCONTROL_VPC);
  324. /* VPE's */
  325. if (tc < hw_tcs) {
  326. settc(tc);
  327. v = alloc_vpe(tc);
  328. if (v == NULL) {
  329. pr_warn("VPE: unable to allocate VPE\n");
  330. goto out_reenable;
  331. }
  332. v->ntcs = hw_tcs - aprp_cpu_index();
  333. /* add the tc to the list of this vpe's tc's. */
  334. list_add(&t->tc, &v->tc);
  335. /* deactivate all but vpe0 */
  336. if (tc >= aprp_cpu_index()) {
  337. unsigned long tmp = read_vpe_c0_vpeconf0();
  338. tmp &= ~VPECONF0_VPA;
  339. /* master VPE */
  340. tmp |= VPECONF0_MVP;
  341. write_vpe_c0_vpeconf0(tmp);
  342. }
  343. /* disable multi-threading with TC's */
  344. write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() &
  345. ~VPECONTROL_TE);
  346. if (tc >= vpelimit) {
  347. /*
  348. * Set config to be the same as vpe0,
  349. * particularly kseg0 coherency alg
  350. */
  351. write_vpe_c0_config(read_c0_config());
  352. }
  353. }
  354. /* TC's */
  355. t->pvpe = v; /* set the parent vpe */
  356. if (tc >= aprp_cpu_index()) {
  357. unsigned long tmp;
  358. settc(tc);
  359. /* Any TC that is bound to VPE0 gets left as is - in
  360. * case we are running SMTC on VPE0. A TC that is bound
  361. * to any other VPE gets bound to VPE0, ideally I'd like
  362. * to make it homeless but it doesn't appear to let me
  363. * bind a TC to a non-existent VPE. Which is perfectly
  364. * reasonable.
  365. *
  366. * The (un)bound state is visible to an EJTAG probe so
  367. * may notify GDB...
  368. */
  369. tmp = read_tc_c0_tcbind();
  370. if (tmp & TCBIND_CURVPE) {
  371. /* tc is bound >vpe0 */
  372. write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
  373. t->pvpe = get_vpe(0); /* set the parent vpe */
  374. }
  375. /* halt the TC */
  376. write_tc_c0_tchalt(TCHALT_H);
  377. mips_ihb();
  378. tmp = read_tc_c0_tcstatus();
  379. /* mark not activated and not dynamically allocatable */
  380. tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
  381. tmp |= TCSTATUS_IXMT; /* interrupt exempt */
  382. write_tc_c0_tcstatus(tmp);
  383. }
  384. }
  385. out_reenable:
  386. /* release config state */
  387. clear_c0_mvpcontrol(MVPCONTROL_VPC);
  388. evpe(vpflags);
  389. emt(mtflags);
  390. local_irq_restore(flags);
  391. return 0;
  392. out_dev:
  393. device_del(&vpe_device);
  394. out_class:
  395. class_unregister(&vpe_class);
  396. out_chrdev:
  397. unregister_chrdev(major, VPE_MODULE_NAME);
  398. return err;
  399. }
  400. void __exit vpe_module_exit(void)
  401. {
  402. struct vpe *v, *n;
  403. device_del(&vpe_device);
  404. class_unregister(&vpe_class);
  405. unregister_chrdev(major, VPE_MODULE_NAME);
  406. /* No locking needed here */
  407. list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
  408. if (v->state != VPE_STATE_UNUSED)
  409. release_vpe(v);
  410. }
  411. }