|
@@ -38,6 +38,7 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
|
|
|
* @dev: back pointer to the ion_device
|
|
|
* @heap: back pointer to the heap the buffer came from
|
|
|
* @flags: buffer specific flags
|
|
|
+ * @private_flags: internal buffer specific flags
|
|
|
* @size: size of the buffer
|
|
|
* @priv_virt: private data to the buffer representable as
|
|
|
* a void *
|
|
@@ -66,6 +67,7 @@ struct ion_buffer {
|
|
|
struct ion_device *dev;
|
|
|
struct ion_heap *heap;
|
|
|
unsigned long flags;
|
|
|
+ unsigned long private_flags;
|
|
|
size_t size;
|
|
|
union {
|
|
|
void *priv_virt;
|
|
@@ -98,7 +100,11 @@ void ion_buffer_destroy(struct ion_buffer *buffer);
|
|
|
* @map_user map memory to userspace
|
|
|
*
|
|
|
* allocate, phys, and map_user return 0 on success, -errno on error.
|
|
|
- * map_dma and map_kernel return pointer on success, ERR_PTR on error.
|
|
|
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
|
|
|
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
|
|
|
+ * the buffer's private_flags when called from a shrinker. In that
|
|
|
+ * case, the pages being free'd must be truly free'd back to the
|
|
|
+ * system, not put in a page pool or otherwise cached.
|
|
|
*/
|
|
|
struct ion_heap_ops {
|
|
|
int (*allocate)(struct ion_heap *heap,
|
|
@@ -122,6 +128,17 @@ struct ion_heap_ops {
|
|
|
*/
|
|
|
#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
|
|
|
|
|
|
+/**
|
|
|
+ * private flags - flags internal to ion
|
|
|
+ */
|
|
|
+/*
|
|
|
+ * Buffer is being freed from a shrinker function. Skip any possible
|
|
|
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
|
|
|
+ * any buffer storage that came from the system allocator will be
|
|
|
+ * returned to the system allocator.
|
|
|
+ */
|
|
|
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
|
|
|
+
|
|
|
/**
|
|
|
* struct ion_heap - represents a heap in the system
|
|
|
* @node: rb node to put the heap on the device's tree of heaps
|
|
@@ -257,6 +274,29 @@ void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
|
|
|
*/
|
|
|
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
|
|
|
|
|
|
+/**
|
|
|
+ * ion_heap_freelist_shrink - drain the deferred free
|
|
|
+ * list, skipping any heap-specific
|
|
|
+ * pooling or caching mechanisms
|
|
|
+ *
|
|
|
+ * @heap: the heap
|
|
|
+ * @size: amount of memory to drain in bytes
|
|
|
+ *
|
|
|
+ * Drains the indicated amount of memory from the deferred freelist immediately.
|
|
|
+ * Returns the total amount freed. The total freed may be higher depending
|
|
|
+ * on the size of the items in the list, or lower if there is insufficient
|
|
|
+ * total memory on the freelist.
|
|
|
+ *
|
|
|
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
|
|
|
+ * page pools or otherwise cache the pages. Everything must be
|
|
|
+ * genuinely free'd back to the system. If you're free'ing from a
|
|
|
+ * shrinker you probably want to use this. Note that this relies on
|
|
|
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
|
|
|
+ * flag.
|
|
|
+ */
|
|
|
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
|
|
|
+ size_t size);
|
|
|
+
|
|
|
/**
|
|
|
* ion_heap_freelist_size - returns the size of the freelist in bytes
|
|
|
* @heap: the heap
|