|
@@ -41,6 +41,7 @@
|
|
|
|
|
|
#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
|
|
|
#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
|
|
|
+#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
|
|
|
|
|
|
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
|
|
|
|
|
@@ -82,6 +83,7 @@ struct its_node {
|
|
|
u64 flags;
|
|
|
u32 ite_size;
|
|
|
u32 device_ids;
|
|
|
+ int numa_node;
|
|
|
};
|
|
|
|
|
|
#define ITS_ITT_ALIGN SZ_256
|
|
@@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d)
|
|
|
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|
|
bool force)
|
|
|
{
|
|
|
- unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
|
|
+ unsigned int cpu;
|
|
|
+ const struct cpumask *cpu_mask = cpu_online_mask;
|
|
|
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
|
|
struct its_collection *target_col;
|
|
|
u32 id = its_get_event_id(d);
|
|
|
|
|
|
+ /* lpi cannot be routed to a redistributor that is on a foreign node */
|
|
|
+ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
|
|
|
+ if (its_dev->its->numa_node >= 0) {
|
|
|
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
|
|
|
+ if (!cpumask_intersects(mask_val, cpu_mask))
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ cpu = cpumask_any_and(mask_val, cpu_mask);
|
|
|
+
|
|
|
if (cpu >= nr_cpu_ids)
|
|
|
return -EINVAL;
|
|
|
|
|
@@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void)
|
|
|
list_for_each_entry(its, &its_nodes, entry) {
|
|
|
u64 target;
|
|
|
|
|
|
+ /* avoid cross node collections and its mapping */
|
|
|
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
|
|
|
+ struct device_node *cpu_node;
|
|
|
+
|
|
|
+ cpu_node = of_get_cpu_node(cpu, NULL);
|
|
|
+ if (its->numa_node != NUMA_NO_NODE &&
|
|
|
+ its->numa_node != of_node_to_nid(cpu_node))
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* We now have to bind each collection to its target
|
|
|
* redistributor.
|
|
@@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain,
|
|
|
{
|
|
|
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
|
|
|
u32 event = its_get_event_id(d);
|
|
|
+ const struct cpumask *cpu_mask = cpu_online_mask;
|
|
|
+
|
|
|
+ /* get the cpu_mask of local node */
|
|
|
+ if (its_dev->its->numa_node >= 0)
|
|
|
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
|
|
|
|
|
|
/* Bind the LPI to the first possible CPU */
|
|
|
- its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
|
|
|
+ its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
|
|
|
|
|
|
/* Map the GIC IRQ and event to the device */
|
|
|
its_send_mapvi(its_dev, d->hwirq, event);
|
|
@@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
|
|
|
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
|
|
|
}
|
|
|
|
|
|
+static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
|
|
|
+{
|
|
|
+ struct its_node *its = data;
|
|
|
+
|
|
|
+ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
|
|
|
+}
|
|
|
+
|
|
|
static const struct gic_quirk its_quirks[] = {
|
|
|
#ifdef CONFIG_CAVIUM_ERRATUM_22375
|
|
|
{
|
|
@@ -1451,6 +1487,14 @@ static const struct gic_quirk its_quirks[] = {
|
|
|
.mask = 0xffff0fff,
|
|
|
.init = its_enable_quirk_cavium_22375,
|
|
|
},
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_CAVIUM_ERRATUM_23144
|
|
|
+ {
|
|
|
+ .desc = "ITS: Cavium erratum 23144",
|
|
|
+ .iidr = 0xa100034c, /* ThunderX pass 1.x */
|
|
|
+ .mask = 0xffff0fff,
|
|
|
+ .init = its_enable_quirk_cavium_23144,
|
|
|
+ },
|
|
|
#endif
|
|
|
{
|
|
|
}
|
|
@@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node,
|
|
|
its->base = its_base;
|
|
|
its->phys_base = res.start;
|
|
|
its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
|
|
|
+ its->numa_node = of_node_to_nid(node);
|
|
|
|
|
|
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
|
|
|
if (!its->cmd_base) {
|