|
@@ -89,12 +89,13 @@ DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
|
|
|
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
|
|
|
{
|
|
|
struct pcie_port *pp = &ks_pcie->pp;
|
|
|
+ struct device *dev = pp->dev;
|
|
|
unsigned int retries;
|
|
|
|
|
|
dw_pcie_setup_rc(pp);
|
|
|
|
|
|
if (dw_pcie_link_up(pp)) {
|
|
|
- dev_err(pp->dev, "Link already up\n");
|
|
|
+ dev_err(dev, "Link already up\n");
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -105,7 +106,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- dev_err(pp->dev, "phy link never came up\n");
|
|
|
+ dev_err(dev, "phy link never came up\n");
|
|
|
return -ETIMEDOUT;
|
|
|
}
|
|
|
|
|
@@ -115,9 +116,10 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
|
|
|
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
|
|
|
u32 offset = irq - ks_pcie->msi_host_irqs[0];
|
|
|
struct pcie_port *pp = &ks_pcie->pp;
|
|
|
+ struct device *dev = pp->dev;
|
|
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
|
|
|
|
- dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
|
|
|
+ dev_dbg(dev, "%s, irq %d\n", __func__, irq);
|
|
|
|
|
|
/*
|
|
|
* The chained irq handler installation would have replaced normal
|
|
@@ -142,10 +144,11 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
|
|
|
unsigned int irq = irq_desc_get_irq(desc);
|
|
|
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
|
|
|
struct pcie_port *pp = &ks_pcie->pp;
|
|
|
+ struct device *dev = pp->dev;
|
|
|
u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
|
|
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
|
|
|
|
- dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
|
|
|
+ dev_dbg(dev, ": Handling legacy irq %d\n", irq);
|
|
|
|
|
|
/*
|
|
|
* The chained irq handler installation would have replaced normal
|
|
@@ -310,6 +313,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
|
|
|
struct platform_device *pdev)
|
|
|
{
|
|
|
struct pcie_port *pp = &ks_pcie->pp;
|
|
|
+ struct device *dev = pp->dev;
|
|
|
int ret;
|
|
|
|
|
|
ret = ks_pcie_get_irq_controller_info(ks_pcie,
|
|
@@ -332,12 +336,12 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
|
|
|
*/
|
|
|
ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
|
|
|
if (ks_pcie->error_irq <= 0)
|
|
|
- dev_info(&pdev->dev, "no error IRQ defined\n");
|
|
|
+ dev_info(dev, "no error IRQ defined\n");
|
|
|
else {
|
|
|
ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
|
|
|
IRQF_SHARED, "pcie-error-irq", ks_pcie);
|
|
|
if (ret < 0) {
|
|
|
- dev_err(&pdev->dev, "failed to request error IRQ %d\n",
|
|
|
+ dev_err(dev, "failed to request error IRQ %d\n",
|
|
|
ks_pcie->error_irq);
|
|
|
return ret;
|
|
|
}
|
|
@@ -347,7 +351,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
|
|
|
pp->ops = &keystone_pcie_host_ops;
|
|
|
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
|
|
|
if (ret) {
|
|
|
- dev_err(&pdev->dev, "failed to initialize host\n");
|
|
|
+ dev_err(dev, "failed to initialize host\n");
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -381,12 +385,12 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
|
|
struct phy *phy;
|
|
|
int ret;
|
|
|
|
|
|
- ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
|
|
|
- GFP_KERNEL);
|
|
|
+ ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
|
|
|
if (!ks_pcie)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
pp = &ks_pcie->pp;
|
|
|
+ pp->dev = dev;
|
|
|
|
|
|
/* initialize SerDes Phy if present */
|
|
|
phy = devm_phy_get(dev, "pcie-phy");
|
|
@@ -408,7 +412,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
|
|
devm_iounmap(dev, reg_p);
|
|
|
devm_release_mem_region(dev, res->start, resource_size(res));
|
|
|
|
|
|
- pp->dev = dev;
|
|
|
ks_pcie->np = dev->of_node;
|
|
|
platform_set_drvdata(pdev, ks_pcie);
|
|
|
ks_pcie->clk = devm_clk_get(dev, "pcie");
|