|
@@ -13,6 +13,7 @@
|
|
|
|
|
|
#include <linux/random.h>
|
|
|
#include <linux/workqueue.h>
|
|
|
+#include <linux/scatterlist.h>
|
|
|
#include <rdma/ib_verbs.h>
|
|
|
|
|
|
#include "smc_pnet.h"
|
|
@@ -295,6 +296,46 @@ int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/* synchronize buffer usage for cpu access */
|
|
|
+void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev,
|
|
|
+ struct smc_buf_desc *buf_slot,
|
|
|
+ enum dma_data_direction data_direction)
|
|
|
+{
|
|
|
+ struct scatterlist *sg;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ /* for now there is just one DMA address */
|
|
|
+ for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
|
|
|
+ buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
|
|
|
+ if (!sg_dma_len(sg))
|
|
|
+ break;
|
|
|
+ ib_dma_sync_single_for_cpu(smcibdev->ibdev,
|
|
|
+ sg_dma_address(sg),
|
|
|
+ sg_dma_len(sg),
|
|
|
+ data_direction);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/* synchronize buffer usage for device access */
|
|
|
+void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev,
|
|
|
+ struct smc_buf_desc *buf_slot,
|
|
|
+ enum dma_data_direction data_direction)
|
|
|
+{
|
|
|
+ struct scatterlist *sg;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ /* for now there is just one DMA address */
|
|
|
+ for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
|
|
|
+ buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
|
|
|
+ if (!sg_dma_len(sg))
|
|
|
+ break;
|
|
|
+ ib_dma_sync_single_for_device(smcibdev->ibdev,
|
|
|
+ sg_dma_address(sg),
|
|
|
+ sg_dma_len(sg),
|
|
|
+ data_direction);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/* Map a new TX or RX buffer SG-table to DMA */
|
|
|
int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
|
|
|
struct smc_buf_desc *buf_slot,
|