|
@@ -41,6 +41,13 @@
|
|
|
|
|
|
#include "mlx5_core.h"
|
|
#include "mlx5_core.h"
|
|
|
|
|
|
|
|
+struct mlx5_db_pgdir {
|
|
|
|
+ struct list_head list;
|
|
|
|
+ unsigned long *bitmap;
|
|
|
|
+ __be32 *db_page;
|
|
|
|
+ dma_addr_t db_dma;
|
|
|
|
+};
|
|
|
|
+
|
|
/* Handling for queue buffers -- we allocate a bunch of memory and
|
|
/* Handling for queue buffers -- we allocate a bunch of memory and
|
|
* register it in a memory region at HCA virtual address 0.
|
|
* register it in a memory region at HCA virtual address 0.
|
|
*/
|
|
*/
|
|
@@ -102,17 +109,28 @@ EXPORT_SYMBOL_GPL(mlx5_buf_free);
|
|
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
|
|
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
|
|
int node)
|
|
int node)
|
|
{
|
|
{
|
|
|
|
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
|
|
struct mlx5_db_pgdir *pgdir;
|
|
struct mlx5_db_pgdir *pgdir;
|
|
|
|
|
|
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
|
|
pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
|
|
if (!pgdir)
|
|
if (!pgdir)
|
|
return NULL;
|
|
return NULL;
|
|
|
|
|
|
- bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
|
|
|
|
|
|
+ pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
|
|
|
|
+ sizeof(unsigned long),
|
|
|
|
+ GFP_KERNEL);
|
|
|
|
+
|
|
|
|
+ if (!pgdir->bitmap) {
|
|
|
|
+ kfree(pgdir);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ bitmap_fill(pgdir->bitmap, db_per_page);
|
|
|
|
|
|
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
|
|
pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
|
|
&pgdir->db_dma, node);
|
|
&pgdir->db_dma, node);
|
|
if (!pgdir->db_page) {
|
|
if (!pgdir->db_page) {
|
|
|
|
+ kfree(pgdir->bitmap);
|
|
kfree(pgdir);
|
|
kfree(pgdir);
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
@@ -123,18 +141,19 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
|
|
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
|
|
static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
|
|
struct mlx5_db *db)
|
|
struct mlx5_db *db)
|
|
{
|
|
{
|
|
|
|
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
|
|
int offset;
|
|
int offset;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
|
|
|
|
- if (i >= MLX5_DB_PER_PAGE)
|
|
|
|
|
|
+ i = find_first_bit(pgdir->bitmap, db_per_page);
|
|
|
|
+ if (i >= db_per_page)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
__clear_bit(i, pgdir->bitmap);
|
|
__clear_bit(i, pgdir->bitmap);
|
|
|
|
|
|
db->u.pgdir = pgdir;
|
|
db->u.pgdir = pgdir;
|
|
db->index = i;
|
|
db->index = i;
|
|
- offset = db->index * L1_CACHE_BYTES;
|
|
|
|
|
|
+ offset = db->index * cache_line_size();
|
|
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
|
|
db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
|
|
db->dma = pgdir->db_dma + offset;
|
|
db->dma = pgdir->db_dma + offset;
|
|
|
|
|
|
@@ -181,14 +200,16 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc);
|
|
|
|
|
|
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
|
|
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
|
|
{
|
|
{
|
|
|
|
+ u32 db_per_page = PAGE_SIZE / cache_line_size();
|
|
mutex_lock(&dev->priv.pgdir_mutex);
|
|
mutex_lock(&dev->priv.pgdir_mutex);
|
|
|
|
|
|
__set_bit(db->index, db->u.pgdir->bitmap);
|
|
__set_bit(db->index, db->u.pgdir->bitmap);
|
|
|
|
|
|
- if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
|
|
|
|
|
|
+ if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
|
|
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
|
|
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
|
|
db->u.pgdir->db_page, db->u.pgdir->db_dma);
|
|
db->u.pgdir->db_page, db->u.pgdir->db_dma);
|
|
list_del(&db->u.pgdir->list);
|
|
list_del(&db->u.pgdir->list);
|
|
|
|
+ kfree(db->u.pgdir->bitmap);
|
|
kfree(db->u.pgdir);
|
|
kfree(db->u.pgdir);
|
|
}
|
|
}
|
|
|
|
|