|
@@ -626,24 +626,36 @@ void ubi_refill_pools(struct ubi_device *ubi)
|
|
|
*/
|
|
|
int ubi_wl_get_peb(struct ubi_device *ubi)
|
|
|
{
|
|
|
- int ret;
|
|
|
+ int ret, retried = 0;
|
|
|
struct ubi_fm_pool *pool = &ubi->fm_pool;
|
|
|
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
|
|
|
|
|
|
- if (!pool->size || !wl_pool->size || pool->used == pool->size ||
|
|
|
- wl_pool->used == wl_pool->size)
|
|
|
+again:
|
|
|
+ spin_lock(&ubi->wl_lock);
|
|
|
+ /* We check here also for the WL pool because at this point we can
|
|
|
+ * refill the WL pool synchronous. */
|
|
|
+ if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
|
|
|
+ spin_unlock(&ubi->wl_lock);
|
|
|
ubi_update_fastmap(ubi);
|
|
|
-
|
|
|
- /* we got not a single free PEB */
|
|
|
- if (!pool->size)
|
|
|
- ret = -ENOSPC;
|
|
|
- else {
|
|
|
spin_lock(&ubi->wl_lock);
|
|
|
- ret = pool->pebs[pool->used++];
|
|
|
- prot_queue_add(ubi, ubi->lookuptbl[ret]);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (pool->used == pool->size) {
|
|
|
spin_unlock(&ubi->wl_lock);
|
|
|
+ if (retried) {
|
|
|
+ ubi_err(ubi, "Unable to get a free PEB from user WL pool");
|
|
|
+ ret = -ENOSPC;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ retried = 1;
|
|
|
+ goto again;
|
|
|
}
|
|
|
|
|
|
+ ubi_assert(pool->used < pool->size);
|
|
|
+ ret = pool->pebs[pool->used++];
|
|
|
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
|
|
|
+ spin_unlock(&ubi->wl_lock);
|
|
|
+out:
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -656,7 +668,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
|
|
|
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
|
|
|
int pnum;
|
|
|
|
|
|
- if (pool->used == pool->size || !pool->size) {
|
|
|
+ if (pool->used == pool->size) {
|
|
|
/* We cannot update the fastmap here because this
|
|
|
* function is called in atomic context.
|
|
|
* Let's fail here and refill/update it as soon as possible. */
|