|
@@ -2026,7 +2026,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
|
|
|
dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
|
|
|
if (!dma_segment_array) {
|
|
|
printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
|
|
|
- return -ENOMEM;
|
|
|
+ goto cleanup_ht;
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < segment_count; ++i) {
|
|
@@ -2037,15 +2037,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
|
|
|
GFP_KERNEL);
|
|
|
if (!hba->hash_tbl_segments[i]) {
|
|
|
printk(KERN_ERR PFX "hash segment alloc failed\n");
|
|
|
- while (--i >= 0) {
|
|
|
- dma_free_coherent(&hba->pcidev->dev,
|
|
|
- BNX2FC_HASH_TBL_CHUNK_SIZE,
|
|
|
- hba->hash_tbl_segments[i],
|
|
|
- dma_segment_array[i]);
|
|
|
- hba->hash_tbl_segments[i] = NULL;
|
|
|
- }
|
|
|
- kfree(dma_segment_array);
|
|
|
- return -ENOMEM;
|
|
|
+ goto cleanup_dma;
|
|
|
}
|
|
|
memset(hba->hash_tbl_segments[i], 0,
|
|
|
BNX2FC_HASH_TBL_CHUNK_SIZE);
|
|
@@ -2057,8 +2049,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
|
|
|
GFP_KERNEL);
|
|
|
if (!hba->hash_tbl_pbl) {
|
|
|
printk(KERN_ERR PFX "hash table pbl alloc failed\n");
|
|
|
- kfree(dma_segment_array);
|
|
|
- return -ENOMEM;
|
|
|
+ goto cleanup_dma;
|
|
|
}
|
|
|
memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
|
|
|
|
|
@@ -2083,6 +2074,22 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
|
|
|
}
|
|
|
kfree(dma_segment_array);
|
|
|
return 0;
|
|
|
+
|
|
|
+cleanup_dma:
|
|
|
+ for (i = 0; i < segment_count; ++i) {
|
|
|
+ if (hba->hash_tbl_segments[i])
|
|
|
+ dma_free_coherent(&hba->pcidev->dev,
|
|
|
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
|
|
|
+ hba->hash_tbl_segments[i],
|
|
|
+ dma_segment_array[i]);
|
|
|
+ }
|
|
|
+
|
|
|
+ kfree(dma_segment_array);
|
|
|
+
|
|
|
+cleanup_ht:
|
|
|
+ kfree(hba->hash_tbl_segments);
|
|
|
+ hba->hash_tbl_segments = NULL;
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
/**
|