|
@@ -614,45 +614,68 @@ static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
|
|
|
+static void find_back_node_value(struct mdesc_handle *hp, u64 node,
|
|
|
+ char *srch_val,
|
|
|
+ void (*func)(struct mdesc_handle *, u64, int),
|
|
|
+ u64 val, int depth)
|
|
|
{
|
|
|
- u64 a;
|
|
|
-
|
|
|
- mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
|
|
|
- u64 t = mdesc_arc_target(hp, a);
|
|
|
- const char *name;
|
|
|
- const u64 *id;
|
|
|
+ u64 arc;
|
|
|
|
|
|
- name = mdesc_node_name(hp, t);
|
|
|
- if (!strcmp(name, "cpu")) {
|
|
|
- id = mdesc_get_property(hp, t, "id", NULL);
|
|
|
- if (*id < NR_CPUS)
|
|
|
- cpu_data(*id).core_id = core_id;
|
|
|
- } else {
|
|
|
- u64 j;
|
|
|
+ /* Since we have an estimate of recursion depth, do a sanity check. */
|
|
|
+ if (depth == 0)
|
|
|
+ return;
|
|
|
|
|
|
- mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) {
|
|
|
- u64 n = mdesc_arc_target(hp, j);
|
|
|
- const char *n_name;
|
|
|
+ mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
|
|
|
+ u64 n = mdesc_arc_target(hp, arc);
|
|
|
+ const char *name = mdesc_node_name(hp, n);
|
|
|
|
|
|
- n_name = mdesc_node_name(hp, n);
|
|
|
- if (strcmp(n_name, "cpu"))
|
|
|
- continue;
|
|
|
+ if (!strcmp(srch_val, name))
|
|
|
+ (*func)(hp, n, val);
|
|
|
|
|
|
- id = mdesc_get_property(hp, n, "id", NULL);
|
|
|
- if (*id < NR_CPUS)
|
|
|
- cpu_data(*id).core_id = core_id;
|
|
|
- }
|
|
|
- }
|
|
|
+ find_back_node_value(hp, n, srch_val, func, val, depth-1);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void __mark_core_id(struct mdesc_handle *hp, u64 node,
|
|
|
+ int core_id)
|
|
|
+{
|
|
|
+ const u64 *id = mdesc_get_property(hp, node, "id", NULL);
|
|
|
+
|
|
|
+ if (*id < num_possible_cpus())
|
|
|
+ cpu_data(*id).core_id = core_id;
|
|
|
+}
|
|
|
+
|
|
|
+static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
|
|
|
+ int sock_id)
|
|
|
+{
|
|
|
+ const u64 *id = mdesc_get_property(hp, node, "id", NULL);
|
|
|
+
|
|
|
+ if (*id < num_possible_cpus())
|
|
|
+ cpu_data(*id).sock_id = sock_id;
|
|
|
+}
|
|
|
+
|
|
|
+static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
|
|
|
+ int core_id)
|
|
|
+{
|
|
|
+ find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
|
|
|
+}
|
|
|
+
|
|
|
+static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
|
|
|
+ int sock_id)
|
|
|
+{
|
|
|
+ find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
|
|
|
+}
|
|
|
+
|
|
|
static void set_core_ids(struct mdesc_handle *hp)
|
|
|
{
|
|
|
int idx;
|
|
|
u64 mp;
|
|
|
|
|
|
idx = 1;
|
|
|
+
|
|
|
+ /* Identify unique cores by looking for cpus backpointed to by
|
|
|
+ * level 1 instruction caches.
|
|
|
+ */
|
|
|
mdesc_for_each_node_by_name(hp, mp, "cache") {
|
|
|
const u64 *level;
|
|
|
const char *type;
|
|
@@ -667,11 +690,72 @@ static void set_core_ids(struct mdesc_handle *hp)
|
|
|
continue;
|
|
|
|
|
|
mark_core_ids(hp, mp, idx);
|
|
|
+ idx++;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
|
|
|
+{
|
|
|
+ u64 mp;
|
|
|
+ int idx = 1;
|
|
|
+ int fnd = 0;
|
|
|
+
|
|
|
+ /* Identify unique sockets by looking for cpus backpointed to by
|
|
|
+ * shared level n caches.
|
|
|
+ */
|
|
|
+ mdesc_for_each_node_by_name(hp, mp, "cache") {
|
|
|
+ const u64 *cur_lvl;
|
|
|
+
|
|
|
+ cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
|
|
|
+ if (*cur_lvl != level)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ mark_sock_ids(hp, mp, idx);
|
|
|
+ idx++;
|
|
|
+ fnd = 1;
|
|
|
+ }
|
|
|
+ return fnd;
|
|
|
+}
|
|
|
+
|
|
|
+static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
|
|
|
+{
|
|
|
+ int idx = 1;
|
|
|
|
|
|
+ mdesc_for_each_node_by_name(hp, mp, "socket") {
|
|
|
+ u64 a;
|
|
|
+
|
|
|
+ mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
|
|
|
+ u64 t = mdesc_arc_target(hp, a);
|
|
|
+ const char *name;
|
|
|
+ const u64 *id;
|
|
|
+
|
|
|
+ name = mdesc_node_name(hp, t);
|
|
|
+ if (strcmp(name, "cpu"))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ id = mdesc_get_property(hp, t, "id", NULL);
|
|
|
+ if (*id < num_possible_cpus())
|
|
|
+ cpu_data(*id).sock_id = idx;
|
|
|
+ }
|
|
|
idx++;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void set_sock_ids(struct mdesc_handle *hp)
|
|
|
+{
|
|
|
+ u64 mp;
|
|
|
+
|
|
|
+ /* If machine description exposes sockets data use it.
|
|
|
+ * Otherwise fallback to use shared L3 or L2 caches.
|
|
|
+ */
|
|
|
+ mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
|
|
|
+ if (mp != MDESC_NODE_NULL)
|
|
|
+ return set_sock_ids_by_socket(hp, mp);
|
|
|
+
|
|
|
+ if (!set_sock_ids_by_cache(hp, 3))
|
|
|
+ set_sock_ids_by_cache(hp, 2);
|
|
|
+}
|
|
|
+
|
|
|
static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
|
|
|
{
|
|
|
u64 a;
|
|
@@ -707,7 +791,6 @@ static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
|
|
|
continue;
|
|
|
|
|
|
mark_proc_ids(hp, mp, idx);
|
|
|
-
|
|
|
idx++;
|
|
|
}
|
|
|
}
|
|
@@ -900,6 +983,7 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask)
|
|
|
|
|
|
set_core_ids(hp);
|
|
|
set_proc_ids(hp);
|
|
|
+ set_sock_ids(hp);
|
|
|
|
|
|
mdesc_release(hp);
|
|
|
|