cacheinfo.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849
  1. /*
  2. * Processor cache information made available to userspace via sysfs;
  3. * intended to be compatible with x86 intel_cacheinfo implementation.
  4. *
  5. * Copyright 2008 IBM Corporation
  6. * Author: Nathan Lynch
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License version
  10. * 2 as published by the Free Software Foundation.
  11. */
  12. #include <linux/cpu.h>
  13. #include <linux/cpumask.h>
  14. #include <linux/kernel.h>
  15. #include <linux/kobject.h>
  16. #include <linux/list.h>
  17. #include <linux/notifier.h>
  18. #include <linux/of.h>
  19. #include <linux/percpu.h>
  20. #include <linux/slab.h>
  21. #include <asm/prom.h>
  22. #include "cacheinfo.h"
  23. /* per-cpu object for tracking:
  24. * - a "cache" kobject for the top-level directory
  25. * - a list of "index" objects representing the cpu's local cache hierarchy
  26. */
  27. struct cache_dir {
  28. struct kobject *kobj; /* bare (not embedded) kobject for cache
  29. * directory */
  30. struct cache_index_dir *index; /* list of index objects */
  31. };
  32. /* "index" object: each cpu's cache directory has an index
  33. * subdirectory corresponding to a cache object associated with the
  34. * cpu. This object's lifetime is managed via the embedded kobject.
  35. */
  36. struct cache_index_dir {
  37. struct kobject kobj;
  38. struct cache_index_dir *next; /* next index in parent directory */
  39. struct cache *cache;
  40. };
  41. /* Template for determining which OF properties to query for a given
  42. * cache type */
  43. struct cache_type_info {
  44. const char *name;
  45. const char *size_prop;
  46. /* Allow for both [di]-cache-line-size and
  47. * [di]-cache-block-size properties. According to the PowerPC
  48. * Processor binding, -line-size should be provided if it
  49. * differs from the cache block size (that which is operated
  50. * on by cache instructions), so we look for -line-size first.
  51. * See cache_get_line_size(). */
  52. const char *line_size_props[2];
  53. const char *nr_sets_prop;
  54. };
  55. /* These are used to index the cache_type_info array. */
  56. #define CACHE_TYPE_UNIFIED 0
  57. #define CACHE_TYPE_INSTRUCTION 1
  58. #define CACHE_TYPE_DATA 2
  59. static const struct cache_type_info cache_type_info[] = {
  60. {
  61. /* PowerPC Processor binding says the [di]-cache-*
  62. * must be equal on unified caches, so just use
  63. * d-cache properties. */
  64. .name = "Unified",
  65. .size_prop = "d-cache-size",
  66. .line_size_props = { "d-cache-line-size",
  67. "d-cache-block-size", },
  68. .nr_sets_prop = "d-cache-sets",
  69. },
  70. {
  71. .name = "Instruction",
  72. .size_prop = "i-cache-size",
  73. .line_size_props = { "i-cache-line-size",
  74. "i-cache-block-size", },
  75. .nr_sets_prop = "i-cache-sets",
  76. },
  77. {
  78. .name = "Data",
  79. .size_prop = "d-cache-size",
  80. .line_size_props = { "d-cache-line-size",
  81. "d-cache-block-size", },
  82. .nr_sets_prop = "d-cache-sets",
  83. },
  84. };
  85. /* Cache object: each instance of this corresponds to a distinct cache
  86. * in the system. There are separate objects for Harvard caches: one
  87. * each for instruction and data, and each refers to the same OF node.
  88. * The refcount of the OF node is elevated for the lifetime of the
  89. * cache object. A cache object is released when its shared_cpu_map
  90. * is cleared (see cache_cpu_clear).
  91. *
  92. * A cache object is on two lists: an unsorted global list
  93. * (cache_list) of cache objects; and a singly-linked list
  94. * representing the local cache hierarchy, which is ordered by level
  95. * (e.g. L1d -> L1i -> L2 -> L3).
  96. */
  97. struct cache {
  98. struct device_node *ofnode; /* OF node for this cache, may be cpu */
  99. struct cpumask shared_cpu_map; /* online CPUs using this cache */
  100. int type; /* split cache disambiguation */
  101. int level; /* level not explicit in device tree */
  102. struct list_head list; /* global list of cache objects */
  103. struct cache *next_local; /* next cache of >= level */
  104. };
  105. static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
  106. /* traversal/modification of this list occurs only at cpu hotplug time;
  107. * access is serialized by cpu hotplug locking
  108. */
  109. static LIST_HEAD(cache_list);
  110. static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
  111. {
  112. return container_of(k, struct cache_index_dir, kobj);
  113. }
  114. static const char *cache_type_string(const struct cache *cache)
  115. {
  116. return cache_type_info[cache->type].name;
  117. }
  118. static void cache_init(struct cache *cache, int type, int level,
  119. struct device_node *ofnode)
  120. {
  121. cache->type = type;
  122. cache->level = level;
  123. cache->ofnode = of_node_get(ofnode);
  124. INIT_LIST_HEAD(&cache->list);
  125. list_add(&cache->list, &cache_list);
  126. }
  127. static struct cache *new_cache(int type, int level, struct device_node *ofnode)
  128. {
  129. struct cache *cache;
  130. cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  131. if (cache)
  132. cache_init(cache, type, level, ofnode);
  133. return cache;
  134. }
  135. static void release_cache_debugcheck(struct cache *cache)
  136. {
  137. struct cache *iter;
  138. list_for_each_entry(iter, &cache_list, list)
  139. WARN_ONCE(iter->next_local == cache,
  140. "cache for %s(%s) refers to cache for %s(%s)\n",
  141. iter->ofnode->full_name,
  142. cache_type_string(iter),
  143. cache->ofnode->full_name,
  144. cache_type_string(cache));
  145. }
  146. static void release_cache(struct cache *cache)
  147. {
  148. if (!cache)
  149. return;
  150. pr_debug("freeing L%d %s cache for %s\n", cache->level,
  151. cache_type_string(cache), cache->ofnode->full_name);
  152. release_cache_debugcheck(cache);
  153. list_del(&cache->list);
  154. of_node_put(cache->ofnode);
  155. kfree(cache);
  156. }
  157. static void cache_cpu_set(struct cache *cache, int cpu)
  158. {
  159. struct cache *next = cache;
  160. while (next) {
  161. WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
  162. "CPU %i already accounted in %s(%s)\n",
  163. cpu, next->ofnode->full_name,
  164. cache_type_string(next));
  165. cpumask_set_cpu(cpu, &next->shared_cpu_map);
  166. next = next->next_local;
  167. }
  168. }
  169. static int cache_size(const struct cache *cache, unsigned int *ret)
  170. {
  171. const char *propname;
  172. const __be32 *cache_size;
  173. propname = cache_type_info[cache->type].size_prop;
  174. cache_size = of_get_property(cache->ofnode, propname, NULL);
  175. if (!cache_size)
  176. return -ENODEV;
  177. *ret = of_read_number(cache_size, 1);
  178. return 0;
  179. }
  180. static int cache_size_kb(const struct cache *cache, unsigned int *ret)
  181. {
  182. unsigned int size;
  183. if (cache_size(cache, &size))
  184. return -ENODEV;
  185. *ret = size / 1024;
  186. return 0;
  187. }
  188. /* not cache_line_size() because that's a macro in include/linux/cache.h */
  189. static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
  190. {
  191. const __be32 *line_size;
  192. int i, lim;
  193. lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
  194. for (i = 0; i < lim; i++) {
  195. const char *propname;
  196. propname = cache_type_info[cache->type].line_size_props[i];
  197. line_size = of_get_property(cache->ofnode, propname, NULL);
  198. if (line_size)
  199. break;
  200. }
  201. if (!line_size)
  202. return -ENODEV;
  203. *ret = of_read_number(line_size, 1);
  204. return 0;
  205. }
  206. static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
  207. {
  208. const char *propname;
  209. const __be32 *nr_sets;
  210. propname = cache_type_info[cache->type].nr_sets_prop;
  211. nr_sets = of_get_property(cache->ofnode, propname, NULL);
  212. if (!nr_sets)
  213. return -ENODEV;
  214. *ret = of_read_number(nr_sets, 1);
  215. return 0;
  216. }
  217. static int cache_associativity(const struct cache *cache, unsigned int *ret)
  218. {
  219. unsigned int line_size;
  220. unsigned int nr_sets;
  221. unsigned int size;
  222. if (cache_nr_sets(cache, &nr_sets))
  223. goto err;
  224. /* If the cache is fully associative, there is no need to
  225. * check the other properties.
  226. */
  227. if (nr_sets == 1) {
  228. *ret = 0;
  229. return 0;
  230. }
  231. if (cache_get_line_size(cache, &line_size))
  232. goto err;
  233. if (cache_size(cache, &size))
  234. goto err;
  235. if (!(nr_sets > 0 && size > 0 && line_size > 0))
  236. goto err;
  237. *ret = (size / nr_sets) / line_size;
  238. return 0;
  239. err:
  240. return -ENODEV;
  241. }
  242. /* helper for dealing with split caches */
  243. static struct cache *cache_find_first_sibling(struct cache *cache)
  244. {
  245. struct cache *iter;
  246. if (cache->type == CACHE_TYPE_UNIFIED)
  247. return cache;
  248. list_for_each_entry(iter, &cache_list, list)
  249. if (iter->ofnode == cache->ofnode && iter->next_local == cache)
  250. return iter;
  251. return cache;
  252. }
  253. /* return the first cache on a local list matching node */
  254. static struct cache *cache_lookup_by_node(const struct device_node *node)
  255. {
  256. struct cache *cache = NULL;
  257. struct cache *iter;
  258. list_for_each_entry(iter, &cache_list, list) {
  259. if (iter->ofnode != node)
  260. continue;
  261. cache = cache_find_first_sibling(iter);
  262. break;
  263. }
  264. return cache;
  265. }
  266. static bool cache_node_is_unified(const struct device_node *np)
  267. {
  268. return of_get_property(np, "cache-unified", NULL);
  269. }
  270. static struct cache *cache_do_one_devnode_unified(struct device_node *node,
  271. int level)
  272. {
  273. struct cache *cache;
  274. pr_debug("creating L%d ucache for %s\n", level, node->full_name);
  275. cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
  276. return cache;
  277. }
  278. static struct cache *cache_do_one_devnode_split(struct device_node *node,
  279. int level)
  280. {
  281. struct cache *dcache, *icache;
  282. pr_debug("creating L%d dcache and icache for %s\n", level,
  283. node->full_name);
  284. dcache = new_cache(CACHE_TYPE_DATA, level, node);
  285. icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
  286. if (!dcache || !icache)
  287. goto err;
  288. dcache->next_local = icache;
  289. return dcache;
  290. err:
  291. release_cache(dcache);
  292. release_cache(icache);
  293. return NULL;
  294. }
  295. static struct cache *cache_do_one_devnode(struct device_node *node, int level)
  296. {
  297. struct cache *cache;
  298. if (cache_node_is_unified(node))
  299. cache = cache_do_one_devnode_unified(node, level);
  300. else
  301. cache = cache_do_one_devnode_split(node, level);
  302. return cache;
  303. }
  304. static struct cache *cache_lookup_or_instantiate(struct device_node *node,
  305. int level)
  306. {
  307. struct cache *cache;
  308. cache = cache_lookup_by_node(node);
  309. WARN_ONCE(cache && cache->level != level,
  310. "cache level mismatch on lookup (got %d, expected %d)\n",
  311. cache->level, level);
  312. if (!cache)
  313. cache = cache_do_one_devnode(node, level);
  314. return cache;
  315. }
  316. static void link_cache_lists(struct cache *smaller, struct cache *bigger)
  317. {
  318. while (smaller->next_local) {
  319. if (smaller->next_local == bigger)
  320. return; /* already linked */
  321. smaller = smaller->next_local;
  322. }
  323. smaller->next_local = bigger;
  324. }
  325. static void do_subsidiary_caches_debugcheck(struct cache *cache)
  326. {
  327. WARN_ON_ONCE(cache->level != 1);
  328. WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
  329. }
  330. static void do_subsidiary_caches(struct cache *cache)
  331. {
  332. struct device_node *subcache_node;
  333. int level = cache->level;
  334. do_subsidiary_caches_debugcheck(cache);
  335. while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
  336. struct cache *subcache;
  337. level++;
  338. subcache = cache_lookup_or_instantiate(subcache_node, level);
  339. of_node_put(subcache_node);
  340. if (!subcache)
  341. break;
  342. link_cache_lists(cache, subcache);
  343. cache = subcache;
  344. }
  345. }
  346. static struct cache *cache_chain_instantiate(unsigned int cpu_id)
  347. {
  348. struct device_node *cpu_node;
  349. struct cache *cpu_cache = NULL;
  350. pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
  351. cpu_node = of_get_cpu_node(cpu_id, NULL);
  352. WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
  353. if (!cpu_node)
  354. goto out;
  355. cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
  356. if (!cpu_cache)
  357. goto out;
  358. do_subsidiary_caches(cpu_cache);
  359. cache_cpu_set(cpu_cache, cpu_id);
  360. out:
  361. of_node_put(cpu_node);
  362. return cpu_cache;
  363. }
  364. static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
  365. {
  366. struct cache_dir *cache_dir;
  367. struct device *dev;
  368. struct kobject *kobj = NULL;
  369. dev = get_cpu_device(cpu_id);
  370. WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
  371. if (!dev)
  372. goto err;
  373. kobj = kobject_create_and_add("cache", &dev->kobj);
  374. if (!kobj)
  375. goto err;
  376. cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
  377. if (!cache_dir)
  378. goto err;
  379. cache_dir->kobj = kobj;
  380. WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
  381. per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
  382. return cache_dir;
  383. err:
  384. kobject_put(kobj);
  385. return NULL;
  386. }
  387. static void cache_index_release(struct kobject *kobj)
  388. {
  389. struct cache_index_dir *index;
  390. index = kobj_to_cache_index_dir(kobj);
  391. pr_debug("freeing index directory for L%d %s cache\n",
  392. index->cache->level, cache_type_string(index->cache));
  393. kfree(index);
  394. }
  395. static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
  396. {
  397. struct kobj_attribute *kobj_attr;
  398. kobj_attr = container_of(attr, struct kobj_attribute, attr);
  399. return kobj_attr->show(k, kobj_attr, buf);
  400. }
  401. static struct cache *index_kobj_to_cache(struct kobject *k)
  402. {
  403. struct cache_index_dir *index;
  404. index = kobj_to_cache_index_dir(k);
  405. return index->cache;
  406. }
  407. static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
  408. {
  409. unsigned int size_kb;
  410. struct cache *cache;
  411. cache = index_kobj_to_cache(k);
  412. if (cache_size_kb(cache, &size_kb))
  413. return -ENODEV;
  414. return sprintf(buf, "%uK\n", size_kb);
  415. }
  416. static struct kobj_attribute cache_size_attr =
  417. __ATTR(size, 0444, size_show, NULL);
  418. static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
  419. {
  420. unsigned int line_size;
  421. struct cache *cache;
  422. cache = index_kobj_to_cache(k);
  423. if (cache_get_line_size(cache, &line_size))
  424. return -ENODEV;
  425. return sprintf(buf, "%u\n", line_size);
  426. }
  427. static struct kobj_attribute cache_line_size_attr =
  428. __ATTR(coherency_line_size, 0444, line_size_show, NULL);
  429. static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
  430. {
  431. unsigned int nr_sets;
  432. struct cache *cache;
  433. cache = index_kobj_to_cache(k);
  434. if (cache_nr_sets(cache, &nr_sets))
  435. return -ENODEV;
  436. return sprintf(buf, "%u\n", nr_sets);
  437. }
  438. static struct kobj_attribute cache_nr_sets_attr =
  439. __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
  440. static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
  441. {
  442. unsigned int associativity;
  443. struct cache *cache;
  444. cache = index_kobj_to_cache(k);
  445. if (cache_associativity(cache, &associativity))
  446. return -ENODEV;
  447. return sprintf(buf, "%u\n", associativity);
  448. }
  449. static struct kobj_attribute cache_assoc_attr =
  450. __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
  451. static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
  452. {
  453. struct cache *cache;
  454. cache = index_kobj_to_cache(k);
  455. return sprintf(buf, "%s\n", cache_type_string(cache));
  456. }
  457. static struct kobj_attribute cache_type_attr =
  458. __ATTR(type, 0444, type_show, NULL);
  459. static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
  460. {
  461. struct cache_index_dir *index;
  462. struct cache *cache;
  463. index = kobj_to_cache_index_dir(k);
  464. cache = index->cache;
  465. return sprintf(buf, "%d\n", cache->level);
  466. }
  467. static struct kobj_attribute cache_level_attr =
  468. __ATTR(level, 0444, level_show, NULL);
  469. static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
  470. {
  471. struct cache_index_dir *index;
  472. struct cache *cache;
  473. int len;
  474. int n = 0;
  475. index = kobj_to_cache_index_dir(k);
  476. cache = index->cache;
  477. len = PAGE_SIZE - 2;
  478. if (len > 1) {
  479. n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
  480. buf[n++] = '\n';
  481. buf[n] = '\0';
  482. }
  483. return n;
  484. }
  485. static struct kobj_attribute cache_shared_cpu_map_attr =
  486. __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
  487. /* Attributes which should always be created -- the kobject/sysfs core
  488. * does this automatically via kobj_type->default_attrs. This is the
  489. * minimum data required to uniquely identify a cache.
  490. */
  491. static struct attribute *cache_index_default_attrs[] = {
  492. &cache_type_attr.attr,
  493. &cache_level_attr.attr,
  494. &cache_shared_cpu_map_attr.attr,
  495. NULL,
  496. };
  497. /* Attributes which should be created if the cache device node has the
  498. * right properties -- see cacheinfo_create_index_opt_attrs
  499. */
  500. static struct kobj_attribute *cache_index_opt_attrs[] = {
  501. &cache_size_attr,
  502. &cache_line_size_attr,
  503. &cache_nr_sets_attr,
  504. &cache_assoc_attr,
  505. };
  506. static const struct sysfs_ops cache_index_ops = {
  507. .show = cache_index_show,
  508. };
  509. static struct kobj_type cache_index_type = {
  510. .release = cache_index_release,
  511. .sysfs_ops = &cache_index_ops,
  512. .default_attrs = cache_index_default_attrs,
  513. };
  514. static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
  515. {
  516. const char *cache_name;
  517. const char *cache_type;
  518. struct cache *cache;
  519. char *buf;
  520. int i;
  521. buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  522. if (!buf)
  523. return;
  524. cache = dir->cache;
  525. cache_name = cache->ofnode->full_name;
  526. cache_type = cache_type_string(cache);
  527. /* We don't want to create an attribute that can't provide a
  528. * meaningful value. Check the return value of each optional
  529. * attribute's ->show method before registering the
  530. * attribute.
  531. */
  532. for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
  533. struct kobj_attribute *attr;
  534. ssize_t rc;
  535. attr = cache_index_opt_attrs[i];
  536. rc = attr->show(&dir->kobj, attr, buf);
  537. if (rc <= 0) {
  538. pr_debug("not creating %s attribute for "
  539. "%s(%s) (rc = %zd)\n",
  540. attr->attr.name, cache_name,
  541. cache_type, rc);
  542. continue;
  543. }
  544. if (sysfs_create_file(&dir->kobj, &attr->attr))
  545. pr_debug("could not create %s attribute for %s(%s)\n",
  546. attr->attr.name, cache_name, cache_type);
  547. }
  548. kfree(buf);
  549. }
  550. static void cacheinfo_create_index_dir(struct cache *cache, int index,
  551. struct cache_dir *cache_dir)
  552. {
  553. struct cache_index_dir *index_dir;
  554. int rc;
  555. index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
  556. if (!index_dir)
  557. goto err;
  558. index_dir->cache = cache;
  559. rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
  560. cache_dir->kobj, "index%d", index);
  561. if (rc)
  562. goto err;
  563. index_dir->next = cache_dir->index;
  564. cache_dir->index = index_dir;
  565. cacheinfo_create_index_opt_attrs(index_dir);
  566. return;
  567. err:
  568. kfree(index_dir);
  569. }
  570. static void cacheinfo_sysfs_populate(unsigned int cpu_id,
  571. struct cache *cache_list)
  572. {
  573. struct cache_dir *cache_dir;
  574. struct cache *cache;
  575. int index = 0;
  576. cache_dir = cacheinfo_create_cache_dir(cpu_id);
  577. if (!cache_dir)
  578. return;
  579. cache = cache_list;
  580. while (cache) {
  581. cacheinfo_create_index_dir(cache, index, cache_dir);
  582. index++;
  583. cache = cache->next_local;
  584. }
  585. }
  586. void cacheinfo_cpu_online(unsigned int cpu_id)
  587. {
  588. struct cache *cache;
  589. cache = cache_chain_instantiate(cpu_id);
  590. if (!cache)
  591. return;
  592. cacheinfo_sysfs_populate(cpu_id, cache);
  593. }
  594. /* functions needed to remove cache entry for cpu offline or suspend/resume */
  595. #if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
  596. defined(CONFIG_HOTPLUG_CPU)
  597. static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
  598. {
  599. struct device_node *cpu_node;
  600. struct cache *cache;
  601. cpu_node = of_get_cpu_node(cpu_id, NULL);
  602. WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
  603. if (!cpu_node)
  604. return NULL;
  605. cache = cache_lookup_by_node(cpu_node);
  606. of_node_put(cpu_node);
  607. return cache;
  608. }
  609. static void remove_index_dirs(struct cache_dir *cache_dir)
  610. {
  611. struct cache_index_dir *index;
  612. index = cache_dir->index;
  613. while (index) {
  614. struct cache_index_dir *next;
  615. next = index->next;
  616. kobject_put(&index->kobj);
  617. index = next;
  618. }
  619. }
  620. static void remove_cache_dir(struct cache_dir *cache_dir)
  621. {
  622. remove_index_dirs(cache_dir);
  623. /* Remove cache dir from sysfs */
  624. kobject_del(cache_dir->kobj);
  625. kobject_put(cache_dir->kobj);
  626. kfree(cache_dir);
  627. }
  628. static void cache_cpu_clear(struct cache *cache, int cpu)
  629. {
  630. while (cache) {
  631. struct cache *next = cache->next_local;
  632. WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
  633. "CPU %i not accounted in %s(%s)\n",
  634. cpu, cache->ofnode->full_name,
  635. cache_type_string(cache));
  636. cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
  637. /* Release the cache object if all the cpus using it
  638. * are offline */
  639. if (cpumask_empty(&cache->shared_cpu_map))
  640. release_cache(cache);
  641. cache = next;
  642. }
  643. }
  644. void cacheinfo_cpu_offline(unsigned int cpu_id)
  645. {
  646. struct cache_dir *cache_dir;
  647. struct cache *cache;
  648. /* Prevent userspace from seeing inconsistent state - remove
  649. * the sysfs hierarchy first */
  650. cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
  651. /* careful, sysfs population may have failed */
  652. if (cache_dir)
  653. remove_cache_dir(cache_dir);
  654. per_cpu(cache_dir_pcpu, cpu_id) = NULL;
  655. /* clear the CPU's bit in its cache chain, possibly freeing
  656. * cache objects */
  657. cache = cache_lookup_by_cpu(cpu_id);
  658. if (cache)
  659. cache_cpu_clear(cache, cpu_id);
  660. }
  661. #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */