|
@@ -234,9 +234,8 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
|
|
/* performant mode helper functions */
|
|
/* performant mode helper functions */
|
|
static void calc_bucket_map(int *bucket, int num_buckets,
|
|
static void calc_bucket_map(int *bucket, int num_buckets,
|
|
int nsgs, int min_blocks, u32 *bucket_map);
|
|
int nsgs, int min_blocks, u32 *bucket_map);
|
|
-static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
|
|
|
|
-static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h);
|
|
|
|
-static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h);
|
|
|
|
|
|
+static void hpsa_free_performant_mode(struct ctlr_info *h);
|
|
|
|
+static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
|
|
static inline u32 next_command(struct ctlr_info *h, u8 q);
|
|
static inline u32 next_command(struct ctlr_info *h, u8 q);
|
|
static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
|
|
static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
|
|
u32 *cfg_base_addr, u64 *cfg_base_addr_index,
|
|
u32 *cfg_base_addr, u64 *cfg_base_addr_index,
|
|
@@ -1630,6 +1629,7 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
|
|
* since it didn't get added to scsi mid layer
|
|
* since it didn't get added to scsi mid layer
|
|
*/
|
|
*/
|
|
fixup_botched_add(h, added[i]);
|
|
fixup_botched_add(h, added[i]);
|
|
|
|
+ added[i] = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
free_and_out:
|
|
free_and_out:
|
|
@@ -1753,7 +1753,7 @@ static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
|
|
h->cmd_sg_list = NULL;
|
|
h->cmd_sg_list = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
-static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
|
|
|
|
|
|
+static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
@@ -6435,9 +6435,11 @@ static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
|
|
if (h->msix_vector) {
|
|
if (h->msix_vector) {
|
|
if (h->pdev->msix_enabled)
|
|
if (h->pdev->msix_enabled)
|
|
pci_disable_msix(h->pdev);
|
|
pci_disable_msix(h->pdev);
|
|
|
|
+ h->msix_vector = 0;
|
|
} else if (h->msi_vector) {
|
|
} else if (h->msi_vector) {
|
|
if (h->pdev->msi_enabled)
|
|
if (h->pdev->msi_enabled)
|
|
pci_disable_msi(h->pdev);
|
|
pci_disable_msi(h->pdev);
|
|
|
|
+ h->msi_vector = 0;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -6576,10 +6578,14 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
|
|
|
|
|
|
static void hpsa_free_cfgtables(struct ctlr_info *h)
|
|
static void hpsa_free_cfgtables(struct ctlr_info *h)
|
|
{
|
|
{
|
|
- if (h->transtable)
|
|
|
|
|
|
+ if (h->transtable) {
|
|
iounmap(h->transtable);
|
|
iounmap(h->transtable);
|
|
- if (h->cfgtable)
|
|
|
|
|
|
+ h->transtable = NULL;
|
|
|
|
+ }
|
|
|
|
+ if (h->cfgtable) {
|
|
iounmap(h->cfgtable);
|
|
iounmap(h->cfgtable);
|
|
|
|
+ h->cfgtable = NULL;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
/* Find and map CISS config table and transfer table
|
|
/* Find and map CISS config table and transfer table
|
|
@@ -6796,6 +6802,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h)
|
|
{
|
|
{
|
|
hpsa_free_cfgtables(h); /* pci_init 4 */
|
|
hpsa_free_cfgtables(h); /* pci_init 4 */
|
|
iounmap(h->vaddr); /* pci_init 3 */
|
|
iounmap(h->vaddr); /* pci_init 3 */
|
|
|
|
+ h->vaddr = NULL;
|
|
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
|
|
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
|
|
pci_release_regions(h->pdev); /* pci_init 2 */
|
|
pci_release_regions(h->pdev); /* pci_init 2 */
|
|
pci_disable_device(h->pdev); /* pci_init 1 */
|
|
pci_disable_device(h->pdev); /* pci_init 1 */
|
|
@@ -6866,6 +6873,7 @@ clean4: /* cfgtables, vaddr, intmode+region, pci */
|
|
hpsa_free_cfgtables(h);
|
|
hpsa_free_cfgtables(h);
|
|
clean3: /* vaddr, intmode+region, pci */
|
|
clean3: /* vaddr, intmode+region, pci */
|
|
iounmap(h->vaddr);
|
|
iounmap(h->vaddr);
|
|
|
|
+ h->vaddr = NULL;
|
|
clean2: /* intmode+region, pci */
|
|
clean2: /* intmode+region, pci */
|
|
hpsa_disable_interrupt_mode(h);
|
|
hpsa_disable_interrupt_mode(h);
|
|
pci_release_regions(h->pdev);
|
|
pci_release_regions(h->pdev);
|
|
@@ -6955,16 +6963,23 @@ out_disable:
|
|
static void hpsa_free_cmd_pool(struct ctlr_info *h)
|
|
static void hpsa_free_cmd_pool(struct ctlr_info *h)
|
|
{
|
|
{
|
|
kfree(h->cmd_pool_bits);
|
|
kfree(h->cmd_pool_bits);
|
|
- if (h->cmd_pool)
|
|
|
|
|
|
+ h->cmd_pool_bits = NULL;
|
|
|
|
+ if (h->cmd_pool) {
|
|
pci_free_consistent(h->pdev,
|
|
pci_free_consistent(h->pdev,
|
|
h->nr_cmds * sizeof(struct CommandList),
|
|
h->nr_cmds * sizeof(struct CommandList),
|
|
h->cmd_pool,
|
|
h->cmd_pool,
|
|
h->cmd_pool_dhandle);
|
|
h->cmd_pool_dhandle);
|
|
- if (h->errinfo_pool)
|
|
|
|
|
|
+ h->cmd_pool = NULL;
|
|
|
|
+ h->cmd_pool_dhandle = 0;
|
|
|
|
+ }
|
|
|
|
+ if (h->errinfo_pool) {
|
|
pci_free_consistent(h->pdev,
|
|
pci_free_consistent(h->pdev,
|
|
h->nr_cmds * sizeof(struct ErrorInfo),
|
|
h->nr_cmds * sizeof(struct ErrorInfo),
|
|
h->errinfo_pool,
|
|
h->errinfo_pool,
|
|
h->errinfo_pool_dhandle);
|
|
h->errinfo_pool_dhandle);
|
|
|
|
+ h->errinfo_pool = NULL;
|
|
|
|
+ h->errinfo_pool_dhandle = 0;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
|
|
static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
|
|
@@ -7012,12 +7027,14 @@ static void hpsa_free_irqs(struct ctlr_info *h)
|
|
i = h->intr_mode;
|
|
i = h->intr_mode;
|
|
irq_set_affinity_hint(h->intr[i], NULL);
|
|
irq_set_affinity_hint(h->intr[i], NULL);
|
|
free_irq(h->intr[i], &h->q[i]);
|
|
free_irq(h->intr[i], &h->q[i]);
|
|
|
|
+ h->q[i] = 0;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
for (i = 0; i < h->msix_vector; i++) {
|
|
for (i = 0; i < h->msix_vector; i++) {
|
|
irq_set_affinity_hint(h->intr[i], NULL);
|
|
irq_set_affinity_hint(h->intr[i], NULL);
|
|
free_irq(h->intr[i], &h->q[i]);
|
|
free_irq(h->intr[i], &h->q[i]);
|
|
|
|
+ h->q[i] = 0;
|
|
}
|
|
}
|
|
for (; i < MAX_REPLY_QUEUES; i++)
|
|
for (; i < MAX_REPLY_QUEUES; i++)
|
|
h->q[i] = 0;
|
|
h->q[i] = 0;
|
|
@@ -7070,6 +7087,7 @@ static int hpsa_request_irqs(struct ctlr_info *h,
|
|
intxhandler, IRQF_SHARED, h->devname,
|
|
intxhandler, IRQF_SHARED, h->devname,
|
|
&h->q[h->intr_mode]);
|
|
&h->q[h->intr_mode]);
|
|
}
|
|
}
|
|
|
|
+ irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
|
|
}
|
|
}
|
|
if (rc) {
|
|
if (rc) {
|
|
dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
|
|
dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
|
|
@@ -7114,23 +7132,17 @@ static void hpsa_free_reply_queues(struct ctlr_info *h)
|
|
h->reply_queue[i].head = NULL;
|
|
h->reply_queue[i].head = NULL;
|
|
h->reply_queue[i].busaddr = 0;
|
|
h->reply_queue[i].busaddr = 0;
|
|
}
|
|
}
|
|
|
|
+ h->reply_queue_size = 0;
|
|
}
|
|
}
|
|
|
|
|
|
static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
|
|
static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
|
|
{
|
|
{
|
|
- hpsa_free_irqs(h);
|
|
|
|
- hpsa_free_sg_chain_blocks(h);
|
|
|
|
- hpsa_free_cmd_pool(h);
|
|
|
|
- kfree(h->blockFetchTable); /* perf 2 */
|
|
|
|
- hpsa_free_reply_queues(h); /* perf 1 */
|
|
|
|
- hpsa_free_ioaccel1_cmd_and_bft(h); /* perf 1 */
|
|
|
|
- hpsa_free_ioaccel2_cmd_and_bft(h); /* perf 1 */
|
|
|
|
- hpsa_free_cfgtables(h); /* pci_init 4 */
|
|
|
|
- iounmap(h->vaddr); /* pci_init 3 */
|
|
|
|
- hpsa_disable_interrupt_mode(h); /* pci_init 2 */
|
|
|
|
- pci_disable_device(h->pdev);
|
|
|
|
- pci_release_regions(h->pdev); /* pci_init 2 */
|
|
|
|
- kfree(h);
|
|
|
|
|
|
+ hpsa_free_performant_mode(h); /* init_one 7 */
|
|
|
|
+ hpsa_free_sg_chain_blocks(h); /* init_one 6 */
|
|
|
|
+ hpsa_free_cmd_pool(h); /* init_one 5 */
|
|
|
|
+ hpsa_free_irqs(h); /* init_one 4 */
|
|
|
|
+ hpsa_free_pci_init(h); /* init_one 3 */
|
|
|
|
+ kfree(h); /* init_one 1 */
|
|
}
|
|
}
|
|
|
|
|
|
/* Called when controller lockup detected. */
|
|
/* Called when controller lockup detected. */
|
|
@@ -7403,10 +7415,13 @@ reinit_after_soft_reset:
|
|
*/
|
|
*/
|
|
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
|
|
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
|
|
h = kzalloc(sizeof(*h), GFP_KERNEL);
|
|
h = kzalloc(sizeof(*h), GFP_KERNEL);
|
|
- if (!h)
|
|
|
|
|
|
+ if (!h) {
|
|
|
|
+ dev_err(&pdev->dev, "Failed to allocate controller head\n");
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
|
|
h->pdev = pdev;
|
|
h->pdev = pdev;
|
|
|
|
+
|
|
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
|
|
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
|
|
INIT_LIST_HEAD(&h->offline_device_list);
|
|
INIT_LIST_HEAD(&h->offline_device_list);
|
|
spin_lock_init(&h->lock);
|
|
spin_lock_init(&h->lock);
|
|
@@ -7424,20 +7439,21 @@ reinit_after_soft_reset:
|
|
h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
|
|
h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
|
|
if (!h->resubmit_wq) {
|
|
if (!h->resubmit_wq) {
|
|
rc = -ENOMEM;
|
|
rc = -ENOMEM;
|
|
- goto clean1;
|
|
|
|
|
|
+ goto clean1; /* aer/h */
|
|
}
|
|
}
|
|
|
|
|
|
/* Allocate and clear per-cpu variable lockup_detected */
|
|
/* Allocate and clear per-cpu variable lockup_detected */
|
|
h->lockup_detected = alloc_percpu(u32);
|
|
h->lockup_detected = alloc_percpu(u32);
|
|
if (!h->lockup_detected) {
|
|
if (!h->lockup_detected) {
|
|
|
|
+ dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
|
|
rc = -ENOMEM;
|
|
rc = -ENOMEM;
|
|
- goto clean1;
|
|
|
|
|
|
+ goto clean1; /* wq/aer/h */
|
|
}
|
|
}
|
|
set_lockup_detected_for_all_cpus(h, 0);
|
|
set_lockup_detected_for_all_cpus(h, 0);
|
|
|
|
|
|
rc = hpsa_pci_init(h);
|
|
rc = hpsa_pci_init(h);
|
|
- if (rc != 0)
|
|
|
|
- goto clean1;
|
|
|
|
|
|
+ if (rc)
|
|
|
|
+ goto clean2; /* lockup, wq/aer/h */
|
|
|
|
|
|
sprintf(h->devname, HPSA "%d", number_of_controllers);
|
|
sprintf(h->devname, HPSA "%d", number_of_controllers);
|
|
h->ctlr = number_of_controllers;
|
|
h->ctlr = number_of_controllers;
|
|
@@ -7453,23 +7469,25 @@ reinit_after_soft_reset:
|
|
dac = 0;
|
|
dac = 0;
|
|
} else {
|
|
} else {
|
|
dev_err(&pdev->dev, "no suitable DMA available\n");
|
|
dev_err(&pdev->dev, "no suitable DMA available\n");
|
|
- goto clean2;
|
|
|
|
|
|
+ goto clean3; /* pci, lockup, wq/aer/h */
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
/* make sure the board interrupts are off */
|
|
/* make sure the board interrupts are off */
|
|
h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
|
h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
|
|
|
|
|
- if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
|
|
|
|
- goto clean2;
|
|
|
|
|
|
+ rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
|
|
|
|
+ if (rc)
|
|
|
|
+ goto clean3; /* pci, lockup, wq/aer/h */
|
|
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
|
|
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
|
|
h->devname, pdev->device,
|
|
h->devname, pdev->device,
|
|
h->intr[h->intr_mode], dac ? "" : " not");
|
|
h->intr[h->intr_mode], dac ? "" : " not");
|
|
rc = hpsa_alloc_cmd_pool(h);
|
|
rc = hpsa_alloc_cmd_pool(h);
|
|
if (rc)
|
|
if (rc)
|
|
- goto clean2_and_free_irqs;
|
|
|
|
- if (hpsa_allocate_sg_chain_blocks(h))
|
|
|
|
- goto clean4;
|
|
|
|
|
|
+ goto clean4; /* irq, pci, lockup, wq/aer/h */
|
|
|
|
+ rc = hpsa_alloc_sg_chain_blocks(h);
|
|
|
|
+ if (rc)
|
|
|
|
+ goto clean5; /* cmd, irq, pci, lockup, wq/aer/h */
|
|
init_waitqueue_head(&h->scan_wait_queue);
|
|
init_waitqueue_head(&h->scan_wait_queue);
|
|
init_waitqueue_head(&h->abort_cmd_wait_queue);
|
|
init_waitqueue_head(&h->abort_cmd_wait_queue);
|
|
h->scan_finished = 1; /* no scan currently in progress */
|
|
h->scan_finished = 1; /* no scan currently in progress */
|
|
@@ -7479,9 +7497,12 @@ reinit_after_soft_reset:
|
|
h->hba_mode_enabled = 0;
|
|
h->hba_mode_enabled = 0;
|
|
h->scsi_host = NULL;
|
|
h->scsi_host = NULL;
|
|
spin_lock_init(&h->devlock);
|
|
spin_lock_init(&h->devlock);
|
|
- hpsa_put_ctlr_into_performant_mode(h);
|
|
|
|
|
|
+ rc = hpsa_put_ctlr_into_performant_mode(h);
|
|
|
|
+ if (rc)
|
|
|
|
+ goto clean6; /* sg, cmd, irq, pci, lockup, wq/aer/h */
|
|
|
|
|
|
- /* At this point, the controller is ready to take commands.
|
|
|
|
|
|
+ /*
|
|
|
|
+ * At this point, the controller is ready to take commands.
|
|
* Now, if reset_devices and the hard reset didn't work, try
|
|
* Now, if reset_devices and the hard reset didn't work, try
|
|
* the soft reset and see if that works.
|
|
* the soft reset and see if that works.
|
|
*/
|
|
*/
|
|
@@ -7536,17 +7557,17 @@ reinit_after_soft_reset:
|
|
goto reinit_after_soft_reset;
|
|
goto reinit_after_soft_reset;
|
|
}
|
|
}
|
|
|
|
|
|
- /* Enable Accelerated IO path at driver layer */
|
|
|
|
- h->acciopath_status = 1;
|
|
|
|
|
|
+ /* Enable Accelerated IO path at driver layer */
|
|
|
|
+ h->acciopath_status = 1;
|
|
|
|
|
|
|
|
|
|
/* Turn the interrupts on so we can service requests */
|
|
/* Turn the interrupts on so we can service requests */
|
|
h->access.set_intr_mask(h, HPSA_INTR_ON);
|
|
h->access.set_intr_mask(h, HPSA_INTR_ON);
|
|
|
|
|
|
hpsa_hba_inquiry(h);
|
|
hpsa_hba_inquiry(h);
|
|
- rc = hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
|
|
|
|
|
|
+ rc = hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
|
|
if (rc)
|
|
if (rc)
|
|
- goto clean4;
|
|
|
|
|
|
+ goto clean7;
|
|
|
|
|
|
/* Monitor the controller for firmware lockups */
|
|
/* Monitor the controller for firmware lockups */
|
|
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
|
|
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
|
|
@@ -7558,22 +7579,32 @@ reinit_after_soft_reset:
|
|
h->heartbeat_sample_interval);
|
|
h->heartbeat_sample_interval);
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
-clean4:
|
|
|
|
|
|
+clean7: /* perf, sg, cmd, irq, pci, lockup, wq/aer/h */
|
|
|
|
+ kfree(h->hba_inquiry_data);
|
|
|
|
+ hpsa_free_performant_mode(h);
|
|
|
|
+ h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
|
|
|
+clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
|
|
hpsa_free_sg_chain_blocks(h);
|
|
hpsa_free_sg_chain_blocks(h);
|
|
|
|
+clean5: /* cmd, irq, pci, lockup, wq/aer/h */
|
|
hpsa_free_cmd_pool(h);
|
|
hpsa_free_cmd_pool(h);
|
|
- hpsa_free_ioaccel1_cmd_and_bft(h);
|
|
|
|
- hpsa_free_ioaccel2_cmd_and_bft(h);
|
|
|
|
-clean2_and_free_irqs:
|
|
|
|
|
|
+clean4: /* irq, pci, lockup, wq/aer/h */
|
|
hpsa_free_irqs(h);
|
|
hpsa_free_irqs(h);
|
|
-clean2:
|
|
|
|
|
|
+clean3: /* pci, lockup, wq/aer/h */
|
|
hpsa_free_pci_init(h);
|
|
hpsa_free_pci_init(h);
|
|
-clean1:
|
|
|
|
- if (h->resubmit_wq)
|
|
|
|
|
|
+clean2: /* lockup, wq/aer/h */
|
|
|
|
+ if (h->lockup_detected) {
|
|
|
|
+ free_percpu(h->lockup_detected);
|
|
|
|
+ h->lockup_detected = NULL;
|
|
|
|
+ }
|
|
|
|
+clean1: /* wq/aer/h */
|
|
|
|
+ if (h->resubmit_wq) {
|
|
destroy_workqueue(h->resubmit_wq);
|
|
destroy_workqueue(h->resubmit_wq);
|
|
- if (h->rescan_ctlr_wq)
|
|
|
|
|
|
+ h->resubmit_wq = NULL;
|
|
|
|
+ }
|
|
|
|
+ if (h->rescan_ctlr_wq) {
|
|
destroy_workqueue(h->rescan_ctlr_wq);
|
|
destroy_workqueue(h->rescan_ctlr_wq);
|
|
- if (h->lockup_detected)
|
|
|
|
- free_percpu(h->lockup_detected);
|
|
|
|
|
|
+ h->rescan_ctlr_wq = NULL;
|
|
|
|
+ }
|
|
kfree(h);
|
|
kfree(h);
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
@@ -7621,7 +7652,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
|
|
*/
|
|
*/
|
|
hpsa_flush_cache(h);
|
|
hpsa_flush_cache(h);
|
|
h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
|
h->access.set_intr_mask(h, HPSA_INTR_OFF);
|
|
- hpsa_free_irqs(h);
|
|
|
|
|
|
+ hpsa_free_irqs(h); /* init_one 4 */
|
|
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
|
|
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
|
|
}
|
|
}
|
|
|
|
|
|
@@ -7629,8 +7660,10 @@ static void hpsa_free_device_info(struct ctlr_info *h)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < h->ndevices; i++)
|
|
|
|
|
|
+ for (i = 0; i < h->ndevices; i++) {
|
|
kfree(h->dev[i]);
|
|
kfree(h->dev[i]);
|
|
|
|
+ h->dev[i] = NULL;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static void hpsa_remove_one(struct pci_dev *pdev)
|
|
static void hpsa_remove_one(struct pci_dev *pdev)
|
|
@@ -7652,26 +7685,29 @@ static void hpsa_remove_one(struct pci_dev *pdev)
|
|
cancel_delayed_work_sync(&h->rescan_ctlr_work);
|
|
cancel_delayed_work_sync(&h->rescan_ctlr_work);
|
|
destroy_workqueue(h->rescan_ctlr_wq);
|
|
destroy_workqueue(h->rescan_ctlr_wq);
|
|
destroy_workqueue(h->resubmit_wq);
|
|
destroy_workqueue(h->resubmit_wq);
|
|
- hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
|
|
|
|
|
|
|
|
- /* includes hpsa_free_irqs */
|
|
|
|
|
|
+ /* includes hpsa_free_irqs - init_one 4 */
|
|
/* includes hpsa_disable_interrupt_mode - pci_init 2 */
|
|
/* includes hpsa_disable_interrupt_mode - pci_init 2 */
|
|
hpsa_shutdown(pdev);
|
|
hpsa_shutdown(pdev);
|
|
|
|
|
|
- hpsa_free_device_info(h);
|
|
|
|
- hpsa_free_sg_chain_blocks(h);
|
|
|
|
- kfree(h->blockFetchTable); /* perf 2 */
|
|
|
|
- hpsa_free_reply_queues(h); /* perf 1 */
|
|
|
|
- hpsa_free_ioaccel1_cmd_and_bft(h); /* perf 1 */
|
|
|
|
- hpsa_free_ioaccel2_cmd_and_bft(h); /* perf 1 */
|
|
|
|
- hpsa_free_cmd_pool(h); /* init_one 5 */
|
|
|
|
- kfree(h->hba_inquiry_data);
|
|
|
|
|
|
+ hpsa_free_device_info(h); /* scan */
|
|
|
|
+
|
|
|
|
+ hpsa_unregister_scsi(h); /* init_one "8" */
|
|
|
|
+ kfree(h->hba_inquiry_data); /* init_one "8" */
|
|
|
|
+ h->hba_inquiry_data = NULL; /* init_one "8" */
|
|
|
|
+ hpsa_free_performant_mode(h); /* init_one 7 */
|
|
|
|
+ hpsa_free_sg_chain_blocks(h); /* init_one 6 */
|
|
|
|
+ hpsa_free_cmd_pool(h); /* init_one 5 */
|
|
|
|
+
|
|
|
|
+ /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
|
|
|
|
|
|
/* includes hpsa_disable_interrupt_mode - pci_init 2 */
|
|
/* includes hpsa_disable_interrupt_mode - pci_init 2 */
|
|
- hpsa_free_pci_init(h);
|
|
|
|
|
|
+ hpsa_free_pci_init(h); /* init_one 3 */
|
|
|
|
|
|
- free_percpu(h->lockup_detected);
|
|
|
|
- kfree(h);
|
|
|
|
|
|
+ free_percpu(h->lockup_detected); /* init_one 2 */
|
|
|
|
+ h->lockup_detected = NULL; /* init_one 2 */
|
|
|
|
+ /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
|
|
|
|
+ kfree(h); /* init_one 1 */
|
|
}
|
|
}
|
|
|
|
|
|
static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
|
|
static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
|
|
@@ -7729,7 +7765,10 @@ static void calc_bucket_map(int bucket[], int num_buckets,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-/* return -ENODEV or other reason on error, 0 on success */
|
|
|
|
|
|
+/*
|
|
|
|
+ * return -ENODEV on err, 0 on success (or no action)
|
|
|
|
+ * allocates numerous items that must be freed later
|
|
|
|
+ */
|
|
static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
|
|
static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|
|
@@ -7914,12 +7953,16 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
|
|
/* Free ioaccel1 mode command blocks and block fetch table */
|
|
/* Free ioaccel1 mode command blocks and block fetch table */
|
|
static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
|
|
static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
|
|
{
|
|
{
|
|
- if (h->ioaccel_cmd_pool)
|
|
|
|
|
|
+ if (h->ioaccel_cmd_pool) {
|
|
pci_free_consistent(h->pdev,
|
|
pci_free_consistent(h->pdev,
|
|
h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
|
|
h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
|
|
h->ioaccel_cmd_pool,
|
|
h->ioaccel_cmd_pool,
|
|
h->ioaccel_cmd_pool_dhandle);
|
|
h->ioaccel_cmd_pool_dhandle);
|
|
|
|
+ h->ioaccel_cmd_pool = NULL;
|
|
|
|
+ h->ioaccel_cmd_pool_dhandle = 0;
|
|
|
|
+ }
|
|
kfree(h->ioaccel1_blockFetchTable);
|
|
kfree(h->ioaccel1_blockFetchTable);
|
|
|
|
+ h->ioaccel1_blockFetchTable = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
/* Allocate ioaccel1 mode command blocks and block fetch table */
|
|
/* Allocate ioaccel1 mode command blocks and block fetch table */
|
|
@@ -7963,12 +8006,16 @@ static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
|
|
{
|
|
{
|
|
hpsa_free_ioaccel2_sg_chain_blocks(h);
|
|
hpsa_free_ioaccel2_sg_chain_blocks(h);
|
|
|
|
|
|
- if (h->ioaccel2_cmd_pool)
|
|
|
|
|
|
+ if (h->ioaccel2_cmd_pool) {
|
|
pci_free_consistent(h->pdev,
|
|
pci_free_consistent(h->pdev,
|
|
h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
|
|
h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
|
|
h->ioaccel2_cmd_pool,
|
|
h->ioaccel2_cmd_pool,
|
|
h->ioaccel2_cmd_pool_dhandle);
|
|
h->ioaccel2_cmd_pool_dhandle);
|
|
|
|
+ h->ioaccel2_cmd_pool = NULL;
|
|
|
|
+ h->ioaccel2_cmd_pool_dhandle = 0;
|
|
|
|
+ }
|
|
kfree(h->ioaccel2_blockFetchTable);
|
|
kfree(h->ioaccel2_blockFetchTable);
|
|
|
|
+ h->ioaccel2_blockFetchTable = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
/* Allocate ioaccel2 mode command blocks and block fetch table */
|
|
/* Allocate ioaccel2 mode command blocks and block fetch table */
|
|
@@ -8013,33 +8060,46 @@ clean_up:
|
|
return rc;
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
|
|
-static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
|
|
|
|
|
+/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
|
|
|
|
+static void hpsa_free_performant_mode(struct ctlr_info *h)
|
|
|
|
+{
|
|
|
|
+ kfree(h->blockFetchTable);
|
|
|
|
+ h->blockFetchTable = NULL;
|
|
|
|
+ hpsa_free_reply_queues(h);
|
|
|
|
+ hpsa_free_ioaccel1_cmd_and_bft(h);
|
|
|
|
+ hpsa_free_ioaccel2_cmd_and_bft(h);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* return -ENODEV on error, 0 on success (or no action)
|
|
|
|
+ * allocates numerous items that must be freed later
|
|
|
|
+ */
|
|
|
|
+static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
|
{
|
|
{
|
|
u32 trans_support;
|
|
u32 trans_support;
|
|
unsigned long transMethod = CFGTBL_Trans_Performant |
|
|
unsigned long transMethod = CFGTBL_Trans_Performant |
|
|
CFGTBL_Trans_use_short_tags;
|
|
CFGTBL_Trans_use_short_tags;
|
|
- int i;
|
|
|
|
|
|
+ int i, rc;
|
|
|
|
|
|
if (hpsa_simple_mode)
|
|
if (hpsa_simple_mode)
|
|
- return;
|
|
|
|
|
|
+ return 0;
|
|
|
|
|
|
trans_support = readl(&(h->cfgtable->TransportSupport));
|
|
trans_support = readl(&(h->cfgtable->TransportSupport));
|
|
if (!(trans_support & PERFORMANT_MODE))
|
|
if (!(trans_support & PERFORMANT_MODE))
|
|
- return;
|
|
|
|
|
|
+ return 0;
|
|
|
|
|
|
/* Check for I/O accelerator mode support */
|
|
/* Check for I/O accelerator mode support */
|
|
if (trans_support & CFGTBL_Trans_io_accel1) {
|
|
if (trans_support & CFGTBL_Trans_io_accel1) {
|
|
transMethod |= CFGTBL_Trans_io_accel1 |
|
|
transMethod |= CFGTBL_Trans_io_accel1 |
|
|
CFGTBL_Trans_enable_directed_msix;
|
|
CFGTBL_Trans_enable_directed_msix;
|
|
- if (hpsa_alloc_ioaccel1_cmd_and_bft(h))
|
|
|
|
- goto clean_up;
|
|
|
|
- } else {
|
|
|
|
- if (trans_support & CFGTBL_Trans_io_accel2) {
|
|
|
|
- transMethod |= CFGTBL_Trans_io_accel2 |
|
|
|
|
|
|
+ rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
|
|
|
|
+ if (rc)
|
|
|
|
+ return rc;
|
|
|
|
+ } else if (trans_support & CFGTBL_Trans_io_accel2) {
|
|
|
|
+ transMethod |= CFGTBL_Trans_io_accel2 |
|
|
CFGTBL_Trans_enable_directed_msix;
|
|
CFGTBL_Trans_enable_directed_msix;
|
|
- if (hpsa_alloc_ioaccel2_cmd_and_bft(h))
|
|
|
|
- goto clean_up;
|
|
|
|
- }
|
|
|
|
|
|
+ rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
|
|
|
|
+ if (rc)
|
|
|
|
+ return rc;
|
|
}
|
|
}
|
|
|
|
|
|
h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
|
|
h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
|
|
@@ -8051,8 +8111,10 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
|
h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
|
|
h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
|
|
h->reply_queue_size,
|
|
h->reply_queue_size,
|
|
&(h->reply_queue[i].busaddr));
|
|
&(h->reply_queue[i].busaddr));
|
|
- if (!h->reply_queue[i].head)
|
|
|
|
- goto clean_up;
|
|
|
|
|
|
+ if (!h->reply_queue[i].head) {
|
|
|
|
+ rc = -ENOMEM;
|
|
|
|
+ goto clean1; /* rq, ioaccel */
|
|
|
|
+ }
|
|
h->reply_queue[i].size = h->max_commands;
|
|
h->reply_queue[i].size = h->max_commands;
|
|
h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
|
|
h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
|
|
h->reply_queue[i].current_entry = 0;
|
|
h->reply_queue[i].current_entry = 0;
|
|
@@ -8061,15 +8123,24 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
|
/* Need a block fetch table for performant mode */
|
|
/* Need a block fetch table for performant mode */
|
|
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
|
|
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
|
|
sizeof(u32)), GFP_KERNEL);
|
|
sizeof(u32)), GFP_KERNEL);
|
|
- if (!h->blockFetchTable)
|
|
|
|
- goto clean_up;
|
|
|
|
|
|
+ if (!h->blockFetchTable) {
|
|
|
|
+ rc = -ENOMEM;
|
|
|
|
+ goto clean1; /* rq, ioaccel */
|
|
|
|
+ }
|
|
|
|
|
|
- hpsa_enter_performant_mode(h, trans_support);
|
|
|
|
- return;
|
|
|
|
|
|
+ rc = hpsa_enter_performant_mode(h, trans_support);
|
|
|
|
+ if (rc)
|
|
|
|
+ goto clean2; /* bft, rq, ioaccel */
|
|
|
|
+ return 0;
|
|
|
|
|
|
-clean_up:
|
|
|
|
- hpsa_free_reply_queues(h);
|
|
|
|
|
|
+clean2: /* bft, rq, ioaccel */
|
|
kfree(h->blockFetchTable);
|
|
kfree(h->blockFetchTable);
|
|
|
|
+ h->blockFetchTable = NULL;
|
|
|
|
+clean1: /* rq, ioaccel */
|
|
|
|
+ hpsa_free_reply_queues(h);
|
|
|
|
+ hpsa_free_ioaccel1_cmd_and_bft(h);
|
|
|
|
+ hpsa_free_ioaccel2_cmd_and_bft(h);
|
|
|
|
+ return rc;
|
|
}
|
|
}
|
|
|
|
|
|
static int is_accelerated_cmd(struct CommandList *c)
|
|
static int is_accelerated_cmd(struct CommandList *c)
|