|
@@ -54,7 +54,7 @@ struct dma_buf_attachment;
|
|
|
* @release: release this buffer; to be called after the last dma_buf_put.
|
|
|
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
|
|
|
* caches and allocate backing storage (if not yet done)
|
|
|
- * respectively pin the objet into memory.
|
|
|
+ * respectively pin the object into memory.
|
|
|
* @end_cpu_access: [optional] called after cpu access to flush caches.
|
|
|
* @kmap_atomic: maps a page from the buffer into kernel address
|
|
|
* space, users may not block until the subsequent unmap call.
|
|
@@ -93,10 +93,8 @@ struct dma_buf_ops {
|
|
|
/* after final dma_buf_put() */
|
|
|
void (*release)(struct dma_buf *);
|
|
|
|
|
|
- int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
|
|
|
- enum dma_data_direction);
|
|
|
- void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
|
|
|
- enum dma_data_direction);
|
|
|
+ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
|
|
+ void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
|
|
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
|
|
|
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
|
|
|
void *(*kmap)(struct dma_buf *, unsigned long);
|
|
@@ -224,9 +222,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
|
|
|
enum dma_data_direction);
|
|
|
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
|
|
|
enum dma_data_direction);
|
|
|
-int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
|
|
|
+int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
|
|
|
enum dma_data_direction dir);
|
|
|
-void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
|
|
|
+void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
|
|
|
enum dma_data_direction dir);
|
|
|
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
|
|
|
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
|