|
@@ -44,6 +44,7 @@ static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
|
|
|
static struct gic_pending_regs pending_regs[NR_CPUS];
|
|
|
static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
|
|
|
static DEFINE_SPINLOCK(gic_lock);
|
|
|
+static struct irq_domain *gic_irq_domain;
|
|
|
|
|
|
#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
|
|
|
cycle_t gic_read_count(void)
|
|
@@ -230,24 +231,26 @@ unsigned int gic_get_int(void)
|
|
|
|
|
|
static void gic_mask_irq(struct irq_data *d)
|
|
|
{
|
|
|
- GIC_CLR_INTR_MASK(d->irq - gic_irq_base);
|
|
|
+ GIC_CLR_INTR_MASK(d->hwirq);
|
|
|
}
|
|
|
|
|
|
static void gic_unmask_irq(struct irq_data *d)
|
|
|
{
|
|
|
- GIC_SET_INTR_MASK(d->irq - gic_irq_base);
|
|
|
+ GIC_SET_INTR_MASK(d->hwirq);
|
|
|
}
|
|
|
|
|
|
static void gic_ack_irq(struct irq_data *d)
|
|
|
{
|
|
|
+ unsigned int irq = d->hwirq;
|
|
|
+
|
|
|
/* Clear edge detector */
|
|
|
- if (gic_irq_flags[d->irq - gic_irq_base] & GIC_TRIG_EDGE)
|
|
|
- GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), d->irq - gic_irq_base);
|
|
|
+ if (gic_irq_flags[irq] & GIC_TRIG_EDGE)
|
|
|
+ GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
|
|
|
}
|
|
|
|
|
|
static int gic_set_type(struct irq_data *d, unsigned int type)
|
|
|
{
|
|
|
- unsigned int irq = d->irq - gic_irq_base;
|
|
|
+ unsigned int irq = d->hwirq;
|
|
|
unsigned long flags;
|
|
|
bool is_edge;
|
|
|
|
|
@@ -302,7 +305,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
|
|
|
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
|
|
|
bool force)
|
|
|
{
|
|
|
- unsigned int irq = (d->irq - gic_irq_base);
|
|
|
+ unsigned int irq = d->hwirq;
|
|
|
cpumask_t tmp = CPU_MASK_NONE;
|
|
|
unsigned long flags;
|
|
|
int i;
|
|
@@ -345,6 +348,7 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
|
|
|
unsigned int flags)
|
|
|
{
|
|
|
struct gic_shared_intr_map *map_ptr;
|
|
|
+ int i;
|
|
|
|
|
|
/* Setup Intr to Pin mapping */
|
|
|
if (pin & GIC_MAP_TO_NMI_MSK) {
|
|
@@ -382,6 +386,8 @@ static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
|
|
|
GIC_CLR_INTR_MASK(intr);
|
|
|
|
|
|
/* Initialise per-cpu Interrupt software masks */
|
|
|
+ for (i = 0; i < NR_CPUS; i++)
|
|
|
+ clear_bit(intr, pcpu_masks[i].pcpu_mask);
|
|
|
set_bit(intr, pcpu_masks[cpu].pcpu_mask);
|
|
|
|
|
|
if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0))
|
|
@@ -433,6 +439,29 @@ static void __init gic_basic_init(int numintrs, int numvpes,
|
|
|
vpe_local_setup(numvpes);
|
|
|
}
|
|
|
|
|
|
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
|
|
+ irq_hw_number_t hw)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ irq_set_chip_and_handler(virq, &gic_irq_controller, handle_level_irq);
|
|
|
+
|
|
|
+ spin_lock_irqsave(&gic_lock, flags);
|
|
|
+ GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(hw)),
|
|
|
+ GIC_MAP_TO_PIN_MSK | 0);
|
|
|
+ /* Map to VPE 0 by default */
|
|
|
+ GIC_SH_MAP_TO_VPE_SMASK(hw, 0);
|
|
|
+ set_bit(hw, pcpu_masks[0].pcpu_mask);
|
|
|
+ spin_unlock_irqrestore(&gic_lock, flags);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static struct irq_domain_ops gic_irq_domain_ops = {
|
|
|
+ .map = gic_irq_domain_map,
|
|
|
+ .xlate = irq_domain_xlate_twocell,
|
|
|
+};
|
|
|
+
|
|
|
void __init gic_init(unsigned long gic_base_addr,
|
|
|
unsigned long gic_addrspace_size,
|
|
|
struct gic_intr_map *intr_map, unsigned int intr_map_size,
|
|
@@ -454,7 +483,10 @@ void __init gic_init(unsigned long gic_base_addr,
|
|
|
GIC_SH_CONFIG_NUMVPES_SHF;
|
|
|
numvpes = numvpes + 1;
|
|
|
|
|
|
- gic_basic_init(numintrs, numvpes, intr_map, intr_map_size);
|
|
|
+ gic_irq_domain = irq_domain_add_simple(NULL, GIC_NUM_INTRS, irqbase,
|
|
|
+ &gic_irq_domain_ops, NULL);
|
|
|
+ if (!gic_irq_domain)
|
|
|
+ panic("Failed to add GIC IRQ domain");
|
|
|
|
|
|
- gic_platform_init(numintrs, &gic_irq_controller);
|
|
|
+ gic_basic_init(numintrs, numvpes, intr_map, intr_map_size);
|
|
|
}
|