context.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /*
  2. * Copyright 2014 IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/bitmap.h>
  12. #include <linux/sched.h>
  13. #include <linux/pid.h>
  14. #include <linux/fs.h>
  15. #include <linux/mm.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/slab.h>
  18. #include <linux/idr.h>
  19. #include <linux/sched/mm.h>
  20. #include <asm/cputable.h>
  21. #include <asm/current.h>
  22. #include <asm/copro.h>
  23. #include "cxl.h"
  24. /*
  25. * Allocates space for a CXL context.
  26. */
  27. struct cxl_context *cxl_context_alloc(void)
  28. {
  29. return kzalloc(sizeof(struct cxl_context), GFP_KERNEL);
  30. }
  31. /*
  32. * Initialises a CXL context.
  33. */
  34. int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
  35. {
  36. int i;
  37. spin_lock_init(&ctx->sste_lock);
  38. ctx->afu = afu;
  39. ctx->master = master;
  40. ctx->pid = NULL; /* Set in start work ioctl */
  41. mutex_init(&ctx->mapping_lock);
  42. ctx->mapping = NULL;
  43. /*
  44. * Allocate the segment table before we put it in the IDR so that we
  45. * can always access it when dereferenced from IDR. For the same
  46. * reason, the segment table is only destroyed after the context is
  47. * removed from the IDR. Access to this in the IOCTL is protected by
  48. * Linux filesytem symantics (can't IOCTL until open is complete).
  49. */
  50. i = cxl_alloc_sst(ctx);
  51. if (i)
  52. return i;
  53. INIT_WORK(&ctx->fault_work, cxl_handle_fault);
  54. init_waitqueue_head(&ctx->wq);
  55. spin_lock_init(&ctx->lock);
  56. ctx->irq_bitmap = NULL;
  57. ctx->pending_irq = false;
  58. ctx->pending_fault = false;
  59. ctx->pending_afu_err = false;
  60. INIT_LIST_HEAD(&ctx->irq_names);
  61. INIT_LIST_HEAD(&ctx->extra_irq_contexts);
  62. /*
  63. * When we have to destroy all contexts in cxl_context_detach_all() we
  64. * end up with afu_release_irqs() called from inside a
  65. * idr_for_each_entry(). Hence we need to make sure that anything
  66. * dereferenced from this IDR is ok before we allocate the IDR here.
  67. * This clears out the IRQ ranges to ensure this.
  68. */
  69. for (i = 0; i < CXL_IRQ_RANGES; i++)
  70. ctx->irqs.range[i] = 0;
  71. mutex_init(&ctx->status_mutex);
  72. ctx->status = OPENED;
  73. /*
  74. * Allocating IDR! We better make sure everything's setup that
  75. * dereferences from it.
  76. */
  77. mutex_lock(&afu->contexts_lock);
  78. idr_preload(GFP_KERNEL);
  79. i = idr_alloc(&ctx->afu->contexts_idr, ctx, ctx->afu->adapter->min_pe,
  80. ctx->afu->num_procs, GFP_NOWAIT);
  81. idr_preload_end();
  82. mutex_unlock(&afu->contexts_lock);
  83. if (i < 0)
  84. return i;
  85. ctx->pe = i;
  86. if (cpu_has_feature(CPU_FTR_HVMODE)) {
  87. ctx->elem = &ctx->afu->native->spa[i];
  88. ctx->external_pe = ctx->pe;
  89. } else {
  90. ctx->external_pe = -1; /* assigned when attaching */
  91. }
  92. ctx->pe_inserted = false;
  93. /*
  94. * take a ref on the afu so that it stays alive at-least till
  95. * this context is reclaimed inside reclaim_ctx.
  96. */
  97. cxl_afu_get(afu);
  98. return 0;
  99. }
  100. void cxl_context_set_mapping(struct cxl_context *ctx,
  101. struct address_space *mapping)
  102. {
  103. mutex_lock(&ctx->mapping_lock);
  104. ctx->mapping = mapping;
  105. mutex_unlock(&ctx->mapping_lock);
  106. }
  107. static int cxl_mmap_fault(struct vm_fault *vmf)
  108. {
  109. struct vm_area_struct *vma = vmf->vma;
  110. struct cxl_context *ctx = vma->vm_file->private_data;
  111. u64 area, offset;
  112. offset = vmf->pgoff << PAGE_SHIFT;
  113. pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
  114. __func__, ctx->pe, vmf->address, offset);
  115. if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
  116. area = ctx->afu->psn_phys;
  117. if (offset >= ctx->afu->adapter->ps_size)
  118. return VM_FAULT_SIGBUS;
  119. } else {
  120. area = ctx->psn_phys;
  121. if (offset >= ctx->psn_size)
  122. return VM_FAULT_SIGBUS;
  123. }
  124. mutex_lock(&ctx->status_mutex);
  125. if (ctx->status != STARTED) {
  126. mutex_unlock(&ctx->status_mutex);
  127. pr_devel("%s: Context not started, failing problem state access\n", __func__);
  128. if (ctx->mmio_err_ff) {
  129. if (!ctx->ff_page) {
  130. ctx->ff_page = alloc_page(GFP_USER);
  131. if (!ctx->ff_page)
  132. return VM_FAULT_OOM;
  133. memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
  134. }
  135. get_page(ctx->ff_page);
  136. vmf->page = ctx->ff_page;
  137. vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
  138. return 0;
  139. }
  140. return VM_FAULT_SIGBUS;
  141. }
  142. vm_insert_pfn(vma, vmf->address, (area + offset) >> PAGE_SHIFT);
  143. mutex_unlock(&ctx->status_mutex);
  144. return VM_FAULT_NOPAGE;
  145. }
  146. static const struct vm_operations_struct cxl_mmap_vmops = {
  147. .fault = cxl_mmap_fault,
  148. };
  149. /*
  150. * Map a per-context mmio space into the given vma.
  151. */
  152. int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
  153. {
  154. u64 start = vma->vm_pgoff << PAGE_SHIFT;
  155. u64 len = vma->vm_end - vma->vm_start;
  156. if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
  157. if (start + len > ctx->afu->adapter->ps_size)
  158. return -EINVAL;
  159. } else {
  160. if (start + len > ctx->psn_size)
  161. return -EINVAL;
  162. }
  163. if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
  164. /* make sure there is a valid per process space for this AFU */
  165. if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
  166. pr_devel("AFU doesn't support mmio space\n");
  167. return -EINVAL;
  168. }
  169. /* Can't mmap until the AFU is enabled */
  170. if (!ctx->afu->enabled)
  171. return -EBUSY;
  172. }
  173. pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
  174. ctx->psn_phys, ctx->pe , ctx->master);
  175. vma->vm_flags |= VM_IO | VM_PFNMAP;
  176. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  177. vma->vm_ops = &cxl_mmap_vmops;
  178. return 0;
  179. }
  180. /*
  181. * Detach a context from the hardware. This disables interrupts and doesn't
  182. * return until all outstanding interrupts for this context have completed. The
  183. * hardware should no longer access *ctx after this has returned.
  184. */
  185. int __detach_context(struct cxl_context *ctx)
  186. {
  187. enum cxl_context_status status;
  188. mutex_lock(&ctx->status_mutex);
  189. status = ctx->status;
  190. ctx->status = CLOSED;
  191. mutex_unlock(&ctx->status_mutex);
  192. if (status != STARTED)
  193. return -EBUSY;
  194. /* Only warn if we detached while the link was OK.
  195. * If detach fails when hw is down, we don't care.
  196. */
  197. WARN_ON(cxl_ops->detach_process(ctx) &&
  198. cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
  199. flush_work(&ctx->fault_work); /* Only needed for dedicated process */
  200. /*
  201. * Wait until no further interrupts are presented by the PSL
  202. * for this context.
  203. */
  204. if (cxl_ops->irq_wait)
  205. cxl_ops->irq_wait(ctx);
  206. /* release the reference to the group leader and mm handling pid */
  207. put_pid(ctx->pid);
  208. cxl_ctx_put();
  209. /* Decrease the attached context count on the adapter */
  210. cxl_adapter_context_put(ctx->afu->adapter);
  211. /* Decrease the mm count on the context */
  212. cxl_context_mm_count_put(ctx);
  213. ctx->mm = NULL;
  214. return 0;
  215. }
  216. /*
  217. * Detach the given context from the AFU. This doesn't actually
  218. * free the context but it should stop the context running in hardware
  219. * (ie. prevent this context from generating any further interrupts
  220. * so that it can be freed).
  221. */
  222. void cxl_context_detach(struct cxl_context *ctx)
  223. {
  224. int rc;
  225. rc = __detach_context(ctx);
  226. if (rc)
  227. return;
  228. afu_release_irqs(ctx, ctx);
  229. wake_up_all(&ctx->wq);
  230. }
  231. /*
  232. * Detach all contexts on the given AFU.
  233. */
  234. void cxl_context_detach_all(struct cxl_afu *afu)
  235. {
  236. struct cxl_context *ctx;
  237. int tmp;
  238. mutex_lock(&afu->contexts_lock);
  239. idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
  240. /*
  241. * Anything done in here needs to be setup before the IDR is
  242. * created and torn down after the IDR removed
  243. */
  244. cxl_context_detach(ctx);
  245. /*
  246. * We are force detaching - remove any active PSA mappings so
  247. * userspace cannot interfere with the card if it comes back.
  248. * Easiest way to exercise this is to unbind and rebind the
  249. * driver via sysfs while it is in use.
  250. */
  251. mutex_lock(&ctx->mapping_lock);
  252. if (ctx->mapping)
  253. unmap_mapping_range(ctx->mapping, 0, 0, 1);
  254. mutex_unlock(&ctx->mapping_lock);
  255. }
  256. mutex_unlock(&afu->contexts_lock);
  257. }
  258. static void reclaim_ctx(struct rcu_head *rcu)
  259. {
  260. struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
  261. free_page((u64)ctx->sstp);
  262. if (ctx->ff_page)
  263. __free_page(ctx->ff_page);
  264. ctx->sstp = NULL;
  265. kfree(ctx->irq_bitmap);
  266. /* Drop ref to the afu device taken during cxl_context_init */
  267. cxl_afu_put(ctx->afu);
  268. kfree(ctx);
  269. }
  270. void cxl_context_free(struct cxl_context *ctx)
  271. {
  272. if (ctx->kernelapi && ctx->mapping)
  273. cxl_release_mapping(ctx);
  274. mutex_lock(&ctx->afu->contexts_lock);
  275. idr_remove(&ctx->afu->contexts_idr, ctx->pe);
  276. mutex_unlock(&ctx->afu->contexts_lock);
  277. call_rcu(&ctx->rcu, reclaim_ctx);
  278. }
  279. void cxl_context_mm_count_get(struct cxl_context *ctx)
  280. {
  281. if (ctx->mm)
  282. atomic_inc(&ctx->mm->mm_count);
  283. }
  284. void cxl_context_mm_count_put(struct cxl_context *ctx)
  285. {
  286. if (ctx->mm)
  287. mmdrop(ctx->mm);
  288. }