|
|
@@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
|
|
|
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
|
|
|
if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
|
|
|
rdev->stats.pbl.max = rdev->stats.pbl.cur;
|
|
|
+ kref_get(&rdev->pbl_kref);
|
|
|
} else
|
|
|
rdev->stats.pbl.fail++;
|
|
|
mutex_unlock(&rdev->stats.lock);
|
|
|
return (u32)addr;
|
|
|
}
|
|
|
|
|
|
+static void destroy_pblpool(struct kref *kref)
|
|
|
+{
|
|
|
+ struct c4iw_rdev *rdev;
|
|
|
+
|
|
|
+ rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
|
|
|
+ gen_pool_destroy(rdev->pbl_pool);
|
|
|
+ complete(&rdev->pbl_compl);
|
|
|
+}
|
|
|
+
|
|
|
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
|
|
{
|
|
|
pr_debug("addr 0x%x size %d\n", addr, size);
|
|
|
@@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
|
|
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
|
|
|
mutex_unlock(&rdev->stats.lock);
|
|
|
gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
|
|
|
+ kref_put(&rdev->pbl_kref, destroy_pblpool);
|
|
|
}
|
|
|
|
|
|
int c4iw_pblpool_create(struct c4iw_rdev *rdev)
|
|
|
@@ -310,7 +321,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
|
|
|
|
|
|
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
|
|
|
{
|
|
|
- gen_pool_destroy(rdev->pbl_pool);
|
|
|
+ kref_put(&rdev->pbl_kref, destroy_pblpool);
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -331,12 +342,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
|
|
|
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
|
|
|
if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
|
|
|
rdev->stats.rqt.max = rdev->stats.rqt.cur;
|
|
|
+ kref_get(&rdev->rqt_kref);
|
|
|
} else
|
|
|
rdev->stats.rqt.fail++;
|
|
|
mutex_unlock(&rdev->stats.lock);
|
|
|
return (u32)addr;
|
|
|
}
|
|
|
|
|
|
+static void destroy_rqtpool(struct kref *kref)
|
|
|
+{
|
|
|
+ struct c4iw_rdev *rdev;
|
|
|
+
|
|
|
+ rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
|
|
|
+ gen_pool_destroy(rdev->rqt_pool);
|
|
|
+ complete(&rdev->rqt_compl);
|
|
|
+}
|
|
|
+
|
|
|
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
|
|
{
|
|
|
pr_debug("addr 0x%x size %d\n", addr, size << 6);
|
|
|
@@ -344,6 +365,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
|
|
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
|
|
|
mutex_unlock(&rdev->stats.lock);
|
|
|
gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
|
|
|
+ kref_put(&rdev->rqt_kref, destroy_rqtpool);
|
|
|
}
|
|
|
|
|
|
int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
|
|
|
@@ -380,7 +402,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
|
|
|
|
|
|
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
|
|
|
{
|
|
|
- gen_pool_destroy(rdev->rqt_pool);
|
|
|
+ kref_put(&rdev->rqt_kref, destroy_rqtpool);
|
|
|
}
|
|
|
|
|
|
/*
|