context.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/bitmap.h>
  12. #include <linux/sched.h>
  13. #include <linux/pid.h>
  14. #include <linux/fs.h>
  15. #include <linux/mm.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/slab.h>
  18. #include <linux/idr.h>
  19. #include <linux/sched/mm.h>
  20. #include <linux/mmu_context.h>
  21. #include <asm/cputable.h>
  22. #include <asm/current.h>
  23. #include <asm/copro.h>
  24. #include "cxl.h"
  25. /*
  26. * Allocates space for a CXL context.
  27. */
  28. struct cxl_context *cxl_context_alloc(void)
  29. {
  30. return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
  31. }
  32. /*
  33. * Initialises a CXL context.
  34. */
  35. int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
  36. {
  37. int i;
  38. ctx->afu = afu;
  39. ctx->master = master;
  40. ctx->pid = NULL; /* Set in start work ioctl */
  41. mutex_init(&ctx->mapping_lock);
  42. ctx->mapping = NULL;
  43. ctx->tidr = 0;
  44. ctx->assign_tidr = false;
  45. if (cxl_is_power8()) {
  46. spin_lock_init(&ctx->sste_lock);
  47. /*
  48. * Allocate the segment table before we put it in the IDR so that we
  49. * can always access it when dereferenced from IDR. For the same
  50. * reason, the segment table is only destroyed after the context is
  51. * removed from the IDR. Access to this in the IOCTL is protected by
  52. * Linux filesytem symantics (can't IOCTL until open is complete).
  53. */
  54. i = cxl_alloc_sst(ctx);
  55. if (i)
  56. return i;
  57. }
  58. INIT_WORK(&ctx->fault_work, cxl_handle_fault);
  59. init_waitqueue_head(&ctx->wq);
  60. spin_lock_init(&ctx->lock);
  61. ctx->irq_bitmap = NULL;
  62. ctx->pending_irq = false;
  63. ctx->pending_fault = false;
  64. ctx->pending_afu_err = false;
  65. INIT_LIST_HEAD(&ctx->irq_names);
  66. INIT_LIST_HEAD(&ctx->extra_irq_contexts);
  67. /*
  68. * When we have to destroy all contexts in cxl_context_detach_all() we
  69. * end up with afu_release_irqs() called from inside a
  70. * idr_for_each_entry(). Hence we need to make sure that anything
  71. * dereferenced from this IDR is ok before we allocate the IDR here.
  72. * This clears out the IRQ ranges to ensure this.
  73. */
  74. for (i = 0; i < CXL_IRQ_RANGES; i++)
  75. ctx->irqs.range[i] = 0;
  76. mutex_init(&ctx->status_mutex);
  77. ctx->status = OPENED;
  78. /*
  79. * Allocating IDR! We better make sure everything's setup that
  80. * dereferences from it.
  81. */
  82. mutex_lock(&afu->contexts_lock);
  83. idr_preload(GFP_KERNEL);
  84. i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe,
  85. ctx->afu->num_procs, GFP_NOWAIT);
  86. idr_preload_end();
  87. mutex_unlock(&afu->contexts_lock);
  88. if (i < 0)
  89. return i;
  90. ctx->pe = i;
  91. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  92. ctx->elem = &ctx->afu->native->spa[i];
  93. ctx->external_pe = ctx->pe;
  94. } else {
  95. ctx->external_pe = -1; /* assigned when attaching */
  96. }
  97. ctx->pe_inserted = false;
  98. /*
  99. * take a ref on the afu so that it stays alive at-least till
  100. * this context is reclaimed inside reclaim_ctx.
  101. */
  102. cxl_afu_get(afu);
  103. return 0;
  104. }
  105. void cxl_context_set_mapping(struct cxl_context *ctx,
  106. struct address_space *mapping)
  107. {
  108. mutex_lock(&ctx->mapping_lock);
  109. ctx->mapping = mapping;
  110. mutex_unlock(&ctx->mapping_lock);
  111. }
  112. static vm_fault_t cxl_mmap_fault(struct vm_fault *vmf)
  113. {
  114. struct vm_area_struct *vma = vmf->vma;
  115. struct cxl_context *ctx = vma->vm_file->private_data;
  116. u64 area, offset;
  117. vm_fault_t ret;
  118. offset = vmf->pgoff << PAGE_SHIFT;
  119. pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
  120. __func__, ctx->pe, vmf->address, offset);
  121. if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
  122. area = ctx->afu->psn_phys;
  123. if (offset >= ctx->afu->adapter->ps_size)
  124. return VM_FAULT_SIGBUS;
  125. } else {
  126. area = ctx->psn_phys;
  127. if (offset >= ctx->psn_size)
  128. return VM_FAULT_SIGBUS;
  129. }
  130. mutex_lock(&ctx->status_mutex);
  131. if (ctx->status != STARTED) {
  132. mutex_unlock(&ctx->status_mutex);
  133. pr_devel("%s: Context not started, failing problem state access\n", __func__);
  134. if (ctx->mmio_err_ff) {
  135. if (!ctx->ff_page) {
  136. ctx->ff_page = alloc_page(GFP_USER);
  137. if (!ctx->ff_page)
  138. return VM_FAULT_OOM;
  139. memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
  140. }
  141. get_page(ctx->ff_page);
  142. vmf->page = ctx->ff_page;
  143. vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
  144. return 0;
  145. }
  146. return VM_FAULT_SIGBUS;
  147. }
  148. ret = vmf_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
  149. mutex_unlock(&ctx->status_mutex);
  150. return ret;
  151. }
  152. static const struct vm_operations_struct cxl_mmap_vmops = {
  153. .fault = cxl_mmap_fault,
  154. };
  155. /*
  156. * Map a per-context mmio space into the given vma.
  157. */
  158. int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
  159. {
  160. u64 start = vma->vm_pgoff << PAGE_SHIFT;
  161. u64 len = vma->vm_end - vma->vm_start;
  162. if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
  163. if (start + len > ctx->afu->adapter->ps_size)
  164. return -EINVAL;
  165. if (cxl_is_power9()) {
  166. /*
  167. * Make sure there is a valid problem state
  168. * area space for this AFU.
  169. */
  170. if (ctx->master && !ctx->afu->psa) {
  171. pr_devel("AFU doesn't support mmio space\n");
  172. return -EINVAL;
  173. }
  174. /* Can't mmap until the AFU is enabled */
  175. if (!ctx->afu->enabled)
  176. return -EBUSY;
  177. }
  178. } else {
  179. if (start + len > ctx->psn_size)
  180. return -EINVAL;
  181. /* Make sure there is a valid per process space for this AFU */
  182. if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
  183. pr_devel("AFU doesn't support mmio space\n");
  184. return -EINVAL;
  185. }
  186. /* Can't mmap until the AFU is enabled */
  187. if (!ctx->afu->enabled)
  188. return -EBUSY;
  189. }
  190. pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
  191. ctx->psn_phys, ctx->pe , ctx->master);
  192. vma->vm_flags |= VM_IO | VM_PFNMAP;
  193. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  194. vma->vm_ops = &cxl_mmap_vmops;
  195. return 0;
  196. }
  197. /*
  198. * Detach a context from the hardware. This disables interrupts and doesn't
  199. * return until all outstanding interrupts for this context have completed. The
  200. * hardware should no longer access *ctx after this has returned.
  201. */
  202. int __detach_context(struct cxl_context *ctx)
  203. {
  204. enum cxl_context_status status;
  205. mutex_lock(&ctx->status_mutex);
  206. status = ctx->status;
  207. ctx->status = CLOSED;
  208. mutex_unlock(&ctx->status_mutex);
  209. if (status != STARTED)
  210. return -EBUSY;
  211. /* Only warn if we detached while the link was OK.
  212. * If detach fails when hw is down, we don't care.
  213. */
  214. WARN_ON(cxl_ops->detach_process(ctx) &&
  215. cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
  216. flush_work(&ctx->fault_work); /* Only needed for dedicated process */
  217. /*
  218. * Wait until no further interrupts are presented by the PSL
  219. * for this context.
  220. */
  221. if (cxl_ops->irq_wait)
  222. cxl_ops->irq_wait(ctx);
  223. /* release the reference to the group leader and mm handling pid */
  224. put_pid(ctx->pid);
  225. cxl_ctx_put();
  226. /* Decrease the attached context count on the adapter */
  227. cxl_adapter_context_put(ctx->afu->adapter);
  228. /* Decrease the mm count on the context */
  229. cxl_context_mm_count_put(ctx);
  230. if (ctx->mm)
  231. mm_context_remove_copro(ctx->mm);
  232. ctx->mm = NULL;
  233. return 0;
  234. }
  235. /*
  236. * Detach the given context from the AFU. This doesn't actually
  237. * free the context but it should stop the context running in hardware
  238. * (ie. prevent this context from generating any further interrupts
  239. * so that it can be freed).
  240. */
  241. void cxl_context_detach(struct cxl_context *ctx)
  242. {
  243. int rc;
  244. rc = __detach_context(ctx);
  245. if (rc)
  246. return;
  247. afu_release_irqs(ctx, ctx);
  248. wake_up_all(&ctx->wq);
  249. }
  250. /*
  251. * Detach all contexts on the given AFU.
  252. */
  253. void cxl_context_detach_all(struct cxl_afu *afu)
  254. {
  255. struct cxl_context *ctx;
  256. int tmp;
  257. mutex_lock(&afu->contexts_lock);
  258. idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
  259. /*
  260. * Anything done in here needs to be setup before the IDR is
  261. * created and torn down after the IDR removed
  262. */
  263. cxl_context_detach(ctx);
  264. /*
  265. * We are force detaching - remove any active PSA mappings so
  266. * userspace cannot interfere with the card if it comes back.
  267. * Easiest way to exercise this is to unbind and rebind the
  268. * driver via sysfs while it is in use.
  269. */
  270. mutex_lock(&ctx->mapping_lock);
  271. if (ctx->mapping)
  272. unmap_mapping_range(ctx->mapping, 0, 0, 1);
  273. mutex_unlock(&ctx->mapping_lock);
  274. }
  275. mutex_unlock(&afu->contexts_lock);
  276. }
  277. static void reclaim_ctx(struct rcu_head *rcu)
  278. {
  279. struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
  280. if (cxl_is_power8())
  281. free_page((u64)ctx->sstp);
  282. if (ctx->ff_page)
  283. __free_page(ctx->ff_page);
  284. ctx->sstp = NULL;
  285. kfree(ctx->irq_bitmap);
  286. /* Drop ref to the afu device taken during cxl_context_init */
  287. cxl_afu_put(ctx->afu);
  288. kfree(ctx);
  289. }
  290. void cxl_context_free(struct cxl_context *ctx)
  291. {
  292. if (ctx->kernelapi && ctx->mapping)
  293. cxl_release_mapping(ctx);
  294. mutex_lock(&ctx->afu->contexts_lock);
  295. idr_remove(&ctx->afu->contexts_idr, ctx->pe);
  296. mutex_unlock(&ctx->afu->contexts_lock);
  297. call_rcu(&ctx->rcu, reclaim_ctx);
  298. }
  299. void cxl_context_mm_count_get(struct cxl_context *ctx)
  300. {
  301. if (ctx->mm)
  302. atomic_inc(&ctx->mm->mm_count);
  303. }
  304. void cxl_context_mm_count_put(struct cxl_context *ctx)
  305. {
  306. if (ctx->mm)
  307. mmdrop(ctx->mm);
  308. }