|
@@ -37,41 +37,59 @@
|
|
|
#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
|
|
|
#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
|
|
|
|
|
|
-/*
|
|
|
- * ashmem_area - anonymous shared memory area
|
|
|
- * Lifecycle: From our parent file's open() until its release()
|
|
|
- * Locking: Protected by `ashmem_mutex'
|
|
|
- * Big Note: Mappings do NOT pin this structure; it dies on close()
|
|
|
+/**
|
|
|
+ * struct ashmem_area - The anonymous shared memory area
|
|
|
+ * @name: The optional name in /proc/pid/maps
|
|
|
+ * @unpinned_list: The list of all ashmem areas
|
|
|
+ * @file: The shmem-based backing file
|
|
|
+ * @size: The size of the mapping, in bytes
|
|
|
+ * @prot_masks: The allowed protection bits, as vm_flags
|
|
|
+ *
|
|
|
+ * The lifecycle of this structure is from our parent file's open() until
|
|
|
+ * its release(). It is also protected by 'ashmem_mutex'
|
|
|
+ *
|
|
|
+ * Warning: Mappings do NOT pin this structure; It dies on close()
|
|
|
*/
|
|
|
struct ashmem_area {
|
|
|
- char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
|
|
|
- struct list_head unpinned_list; /* list of all ashmem areas */
|
|
|
- struct file *file; /* the shmem-based backing file */
|
|
|
- size_t size; /* size of the mapping, in bytes */
|
|
|
- unsigned long prot_mask; /* allowed prot bits, as vm_flags */
|
|
|
+ char name[ASHMEM_FULL_NAME_LEN];
|
|
|
+ struct list_head unpinned_list;
|
|
|
+ struct file *file;
|
|
|
+ size_t size;
|
|
|
+ unsigned long prot_mask;
|
|
|
};
|
|
|
|
|
|
-/*
|
|
|
- * ashmem_range - represents an interval of unpinned (evictable) pages
|
|
|
- * Lifecycle: From unpin to pin
|
|
|
- * Locking: Protected by `ashmem_mutex'
|
|
|
+/**
|
|
|
+ * struct ashmem_range - A range of unpinned/evictable pages
|
|
|
+ * @lru: The entry in the LRU list
|
|
|
+ * @unpinned: The entry in its area's unpinned list
|
|
|
+ * @asma: The associated anonymous shared memory area.
|
|
|
+ * @pgstart: The starting page (inclusive)
|
|
|
+ * @pgend: The ending page (inclusive)
|
|
|
+ * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
|
|
|
+ *
|
|
|
+ * The lifecycle of this structure is from unpin to pin.
|
|
|
+ * It is protected by 'ashmem_mutex'
|
|
|
*/
|
|
|
struct ashmem_range {
|
|
|
- struct list_head lru; /* entry in LRU list */
|
|
|
- struct list_head unpinned; /* entry in its area's unpinned list */
|
|
|
- struct ashmem_area *asma; /* associated area */
|
|
|
- size_t pgstart; /* starting page, inclusive */
|
|
|
- size_t pgend; /* ending page, inclusive */
|
|
|
- unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
|
|
|
+ struct list_head lru;
|
|
|
+ struct list_head unpinned;
|
|
|
+ struct ashmem_area *asma;
|
|
|
+ size_t pgstart;
|
|
|
+ size_t pgend;
|
|
|
+ unsigned int purged;
|
|
|
};
|
|
|
|
|
|
/* LRU list of unpinned pages, protected by ashmem_mutex */
|
|
|
static LIST_HEAD(ashmem_lru_list);
|
|
|
|
|
|
-/* Count of pages on our LRU list, protected by ashmem_mutex */
|
|
|
+/**
|
|
|
+ * long lru_count - The count of pages on our LRU list.
|
|
|
+ *
|
|
|
+ * This is protected by ashmem_mutex.
|
|
|
+ */
|
|
|
static unsigned long lru_count;
|
|
|
|
|
|
-/*
|
|
|
+/**
|
|
|
* ashmem_mutex - protects the list of and each individual ashmem_area
|
|
|
*
|
|
|
* Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
|
|
@@ -105,28 +123,43 @@ static struct kmem_cache *ashmem_range_cachep __read_mostly;
|
|
|
|
|
|
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
|
|
|
|
|
|
+/**
|
|
|
+ * lru_add() - Adds a range of memory to the LRU list
|
|
|
+ * @range: The memory range being added.
|
|
|
+ *
|
|
|
+ * The range is first added to the end (tail) of the LRU list.
|
|
|
+ * After this, the size of the range is added to @lru_count
|
|
|
+ */
|
|
|
static inline void lru_add(struct ashmem_range *range)
|
|
|
{
|
|
|
list_add_tail(&range->lru, &ashmem_lru_list);
|
|
|
lru_count += range_size(range);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * lru_del() - Removes a range of memory from the LRU list
|
|
|
+ * @range: The memory range being removed
|
|
|
+ *
|
|
|
+ * The range is first deleted from the LRU list.
|
|
|
+ * After this, the size of the range is removed from @lru_count
|
|
|
+ */
|
|
|
static inline void lru_del(struct ashmem_range *range)
|
|
|
{
|
|
|
list_del(&range->lru);
|
|
|
lru_count -= range_size(range);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * range_alloc - allocate and initialize a new ashmem_range structure
|
|
|
+/**
|
|
|
+ * range_alloc() - Allocates and initializes a new ashmem_range structure
|
|
|
+ * @asma: The associated ashmem_area
|
|
|
+ * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
|
|
|
+ * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
|
|
|
+ * @start: The starting page (inclusive)
|
|
|
+ * @end: The ending page (inclusive)
|
|
|
*
|
|
|
- * 'asma' - associated ashmem_area
|
|
|
- * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
|
|
|
- * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
|
|
|
- * 'start' - starting page, inclusive
|
|
|
- * 'end' - ending page, inclusive
|
|
|
+ * This function is protected by ashmem_mutex.
|
|
|
*
|
|
|
- * Caller must hold ashmem_mutex.
|
|
|
+ * Return: 0 if successful, or -ENOMEM if there is an error
|
|
|
*/
|
|
|
static int range_alloc(struct ashmem_area *asma,
|
|
|
struct ashmem_range *prev_range, unsigned int purged,
|
|
@@ -151,6 +184,10 @@ static int range_alloc(struct ashmem_area *asma,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * range_del() - Deletes and dealloctes an ashmem_range structure
|
|
|
+ * @range: The associated ashmem_range that has previously been allocated
|
|
|
+ */
|
|
|
static void range_del(struct ashmem_range *range)
|
|
|
{
|
|
|
list_del(&range->unpinned);
|