|
@@ -802,6 +802,8 @@ struct mdesc_mblock {
|
|
};
|
|
};
|
|
static struct mdesc_mblock *mblocks;
|
|
static struct mdesc_mblock *mblocks;
|
|
static int num_mblocks;
|
|
static int num_mblocks;
|
|
|
|
+static int find_numa_node_for_addr(unsigned long pa,
|
|
|
|
+ struct node_mem_mask *pnode_mask);
|
|
|
|
|
|
static unsigned long ra_to_pa(unsigned long addr)
|
|
static unsigned long ra_to_pa(unsigned long addr)
|
|
{
|
|
{
|
|
@@ -821,6 +823,9 @@ static unsigned long ra_to_pa(unsigned long addr)
|
|
|
|
|
|
static int find_node(unsigned long addr)
|
|
static int find_node(unsigned long addr)
|
|
{
|
|
{
|
|
|
|
+ static bool search_mdesc = true;
|
|
|
|
+ static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
|
|
|
|
+ static int last_index;
|
|
int i;
|
|
int i;
|
|
|
|
|
|
addr = ra_to_pa(addr);
|
|
addr = ra_to_pa(addr);
|
|
@@ -830,10 +835,27 @@ static int find_node(unsigned long addr)
|
|
if ((addr & p->mask) == p->val)
|
|
if ((addr & p->mask) == p->val)
|
|
return i;
|
|
return i;
|
|
}
|
|
}
|
|
- /* The following condition has been observed on LDOM guests.*/
|
|
|
|
- WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
|
|
|
|
- " rule. Some physical memory will be owned by node 0.");
|
|
|
|
- return 0;
|
|
|
|
|
|
+ /* The following condition has been observed on LDOM guests because
|
|
|
|
+ * node_masks only contains the best latency mask and value.
|
|
|
|
+ * LDOM guest's mdesc can contain a single latency group to
|
|
|
|
+ * cover multiple address range. Print warning message only if the
|
|
|
|
+ * address cannot be found in node_masks nor mdesc.
|
|
|
|
+ */
|
|
|
|
+ if ((search_mdesc) &&
|
|
|
|
+ ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
|
|
|
|
+ /* find the available node in the mdesc */
|
|
|
|
+ last_index = find_numa_node_for_addr(addr, &last_mem_mask);
|
|
|
|
+ numadbg("find_node: latency group for address 0x%lx is %d\n",
|
|
|
|
+ addr, last_index);
|
|
|
|
+ if ((last_index < 0) || (last_index >= num_node_masks)) {
|
|
|
|
+ /* WARN_ONCE() and use default group 0 */
|
|
|
|
+ WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
|
|
|
|
+ search_mdesc = false;
|
|
|
|
+ last_index = 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return last_index;
|
|
}
|
|
}
|
|
|
|
|
|
static u64 memblock_nid_range(u64 start, u64 end, int *nid)
|
|
static u64 memblock_nid_range(u64 start, u64 end, int *nid)
|
|
@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to)
|
|
return numa_latency[from][to];
|
|
return numa_latency[from][to];
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int find_numa_node_for_addr(unsigned long pa,
|
|
|
|
+ struct node_mem_mask *pnode_mask)
|
|
|
|
+{
|
|
|
|
+ struct mdesc_handle *md = mdesc_grab();
|
|
|
|
+ u64 node, arc;
|
|
|
|
+ int i = 0;
|
|
|
|
+
|
|
|
|
+ node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
|
|
|
|
+ if (node == MDESC_NODE_NULL)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ mdesc_for_each_node_by_name(md, node, "group") {
|
|
|
|
+ mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
|
|
|
|
+ u64 target = mdesc_arc_target(md, arc);
|
|
|
|
+ struct mdesc_mlgroup *m = find_mlgroup(target);
|
|
|
|
+
|
|
|
|
+ if (!m)
|
|
|
|
+ continue;
|
|
|
|
+ if ((pa & m->mask) == m->match) {
|
|
|
|
+ if (pnode_mask) {
|
|
|
|
+ pnode_mask->mask = m->mask;
|
|
|
|
+ pnode_mask->val = m->match;
|
|
|
|
+ }
|
|
|
|
+ mdesc_release(md);
|
|
|
|
+ return i;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ i++;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+out:
|
|
|
|
+ mdesc_release(md);
|
|
|
|
+ return -1;
|
|
|
|
+}
|
|
|
|
+
|
|
static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
|
|
static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
|
|
{
|
|
{
|
|
int i;
|
|
int i;
|