|
@@ -33,6 +33,8 @@
|
|
* when freed).
|
|
* when freed).
|
|
*/
|
|
*/
|
|
|
|
|
|
|
|
+#define pr_fmt(fmt) "[TTM] " fmt
|
|
|
|
+
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/list.h>
|
|
#include <linux/list.h>
|
|
#include <linux/seq_file.h> /* for seq_printf */
|
|
#include <linux/seq_file.h> /* for seq_printf */
|
|
@@ -221,18 +223,13 @@ static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
|
|
m->options.small = val;
|
|
m->options.small = val;
|
|
else if (attr == &ttm_page_pool_alloc_size) {
|
|
else if (attr == &ttm_page_pool_alloc_size) {
|
|
if (val > NUM_PAGES_TO_ALLOC*8) {
|
|
if (val > NUM_PAGES_TO_ALLOC*8) {
|
|
- printk(KERN_ERR TTM_PFX
|
|
|
|
- "Setting allocation size to %lu "
|
|
|
|
- "is not allowed. Recommended size is "
|
|
|
|
- "%lu\n",
|
|
|
|
|
|
+ pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
|
|
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
|
|
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
|
|
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
|
|
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
|
|
return size;
|
|
return size;
|
|
} else if (val > NUM_PAGES_TO_ALLOC) {
|
|
} else if (val > NUM_PAGES_TO_ALLOC) {
|
|
- printk(KERN_WARNING TTM_PFX
|
|
|
|
- "Setting allocation size to "
|
|
|
|
- "larger than %lu is not recommended.\n",
|
|
|
|
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
|
|
|
|
|
|
+ pr_warn("Setting allocation size to larger than %lu is not recommended\n",
|
|
|
|
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
|
|
}
|
|
}
|
|
m->options.alloc_size = val;
|
|
m->options.alloc_size = val;
|
|
}
|
|
}
|
|
@@ -313,15 +310,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool,
|
|
if (pool->type & IS_UC) {
|
|
if (pool->type & IS_UC) {
|
|
r = set_pages_array_uc(pages, cpages);
|
|
r = set_pages_array_uc(pages, cpages);
|
|
if (r)
|
|
if (r)
|
|
- pr_err(TTM_PFX
|
|
|
|
- "%s: Failed to set %d pages to uc!\n",
|
|
|
|
|
|
+ pr_err("%s: Failed to set %d pages to uc!\n",
|
|
pool->dev_name, cpages);
|
|
pool->dev_name, cpages);
|
|
}
|
|
}
|
|
if (pool->type & IS_WC) {
|
|
if (pool->type & IS_WC) {
|
|
r = set_pages_array_wc(pages, cpages);
|
|
r = set_pages_array_wc(pages, cpages);
|
|
if (r)
|
|
if (r)
|
|
- pr_err(TTM_PFX
|
|
|
|
- "%s: Failed to set %d pages to wc!\n",
|
|
|
|
|
|
+ pr_err("%s: Failed to set %d pages to wc!\n",
|
|
pool->dev_name, cpages);
|
|
pool->dev_name, cpages);
|
|
}
|
|
}
|
|
return r;
|
|
return r;
|
|
@@ -387,8 +382,8 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
|
|
/* Don't set WB on WB page pool. */
|
|
/* Don't set WB on WB page pool. */
|
|
if (npages && !(pool->type & IS_CACHED) &&
|
|
if (npages && !(pool->type & IS_CACHED) &&
|
|
set_pages_array_wb(pages, npages))
|
|
set_pages_array_wb(pages, npages))
|
|
- pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
|
|
|
|
- pool->dev_name, npages);
|
|
|
|
|
|
+ pr_err("%s: Failed to set %d pages to wb!\n",
|
|
|
|
+ pool->dev_name, npages);
|
|
|
|
|
|
list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
|
|
list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
|
|
list_del(&d_page->page_list);
|
|
list_del(&d_page->page_list);
|
|
@@ -400,8 +395,8 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
|
|
{
|
|
{
|
|
/* Don't set WB on WB page pool. */
|
|
/* Don't set WB on WB page pool. */
|
|
if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
|
|
if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
|
|
- pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
|
|
|
|
- pool->dev_name, 1);
|
|
|
|
|
|
+ pr_err("%s: Failed to set %d pages to wb!\n",
|
|
|
|
+ pool->dev_name, 1);
|
|
|
|
|
|
list_del(&d_page->page_list);
|
|
list_del(&d_page->page_list);
|
|
__ttm_dma_free_page(pool, d_page);
|
|
__ttm_dma_free_page(pool, d_page);
|
|
@@ -430,17 +425,16 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
|
|
#if 0
|
|
#if 0
|
|
if (nr_free > 1) {
|
|
if (nr_free > 1) {
|
|
pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
|
|
pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
|
|
- pool->dev_name, pool->name, current->pid,
|
|
|
|
- npages_to_free, nr_free);
|
|
|
|
|
|
+ pool->dev_name, pool->name, current->pid,
|
|
|
|
+ npages_to_free, nr_free);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
|
|
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
|
|
|
|
if (!pages_to_free) {
|
|
if (!pages_to_free) {
|
|
- pr_err(TTM_PFX
|
|
|
|
- "%s: Failed to allocate memory for pool free operation.\n",
|
|
|
|
- pool->dev_name);
|
|
|
|
|
|
+ pr_err("%s: Failed to allocate memory for pool free operation\n",
|
|
|
|
+ pool->dev_name);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
INIT_LIST_HEAD(&d_pages);
|
|
INIT_LIST_HEAD(&d_pages);
|
|
@@ -723,23 +717,21 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
|
|
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
|
|
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
|
|
|
|
|
|
if (!caching_array) {
|
|
if (!caching_array) {
|
|
- pr_err(TTM_PFX
|
|
|
|
- "%s: Unable to allocate table for new pages.",
|
|
|
|
- pool->dev_name);
|
|
|
|
|
|
+ pr_err("%s: Unable to allocate table for new pages\n",
|
|
|
|
+ pool->dev_name);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
if (count > 1) {
|
|
if (count > 1) {
|
|
pr_debug("%s: (%s:%d) Getting %d pages\n",
|
|
pr_debug("%s: (%s:%d) Getting %d pages\n",
|
|
- pool->dev_name, pool->name, current->pid,
|
|
|
|
- count);
|
|
|
|
|
|
+ pool->dev_name, pool->name, current->pid, count);
|
|
}
|
|
}
|
|
|
|
|
|
for (i = 0, cpages = 0; i < count; ++i) {
|
|
for (i = 0, cpages = 0; i < count; ++i) {
|
|
dma_p = __ttm_dma_alloc_page(pool);
|
|
dma_p = __ttm_dma_alloc_page(pool);
|
|
if (!dma_p) {
|
|
if (!dma_p) {
|
|
- pr_err(TTM_PFX "%s: Unable to get page %u.\n",
|
|
|
|
- pool->dev_name, i);
|
|
|
|
|
|
+ pr_err("%s: Unable to get page %u\n",
|
|
|
|
+ pool->dev_name, i);
|
|
|
|
|
|
/* store already allocated pages in the pool after
|
|
/* store already allocated pages in the pool after
|
|
* setting the caching state */
|
|
* setting the caching state */
|
|
@@ -821,8 +813,8 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
|
|
struct dma_page *d_page;
|
|
struct dma_page *d_page;
|
|
unsigned cpages = 0;
|
|
unsigned cpages = 0;
|
|
|
|
|
|
- pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n",
|
|
|
|
- pool->dev_name, pool->name, r);
|
|
|
|
|
|
+ pr_err("%s: Failed to fill %s pool (r:%d)!\n",
|
|
|
|
+ pool->dev_name, pool->name, r);
|
|
|
|
|
|
list_for_each_entry(d_page, &d_pages, page_list) {
|
|
list_for_each_entry(d_page, &d_pages, page_list) {
|
|
cpages++;
|
|
cpages++;
|
|
@@ -1038,8 +1030,8 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
|
|
nr_free = shrink_pages;
|
|
nr_free = shrink_pages;
|
|
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
|
|
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
|
|
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
|
|
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
|
|
- p->pool->dev_name, p->pool->name, current->pid, nr_free,
|
|
|
|
- shrink_pages);
|
|
|
|
|
|
+ p->pool->dev_name, p->pool->name, current->pid,
|
|
|
|
+ nr_free, shrink_pages);
|
|
}
|
|
}
|
|
mutex_unlock(&_manager->lock);
|
|
mutex_unlock(&_manager->lock);
|
|
/* return estimated number of unused pages in pool */
|
|
/* return estimated number of unused pages in pool */
|
|
@@ -1064,7 +1056,7 @@ int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
|
|
|
|
|
|
WARN_ON(_manager);
|
|
WARN_ON(_manager);
|
|
|
|
|
|
- printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n");
|
|
|
|
|
|
+ pr_info("Initializing DMA pool allocator\n");
|
|
|
|
|
|
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
|
|
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
|
|
if (!_manager)
|
|
if (!_manager)
|
|
@@ -1097,7 +1089,7 @@ void ttm_dma_page_alloc_fini(void)
|
|
{
|
|
{
|
|
struct device_pools *p, *t;
|
|
struct device_pools *p, *t;
|
|
|
|
|
|
- printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n");
|
|
|
|
|
|
+ pr_info("Finalizing DMA pool allocator\n");
|
|
ttm_dma_pool_mm_shrink_fini(_manager);
|
|
ttm_dma_pool_mm_shrink_fini(_manager);
|
|
|
|
|
|
list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
|
|
list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
|