|
@@ -467,7 +467,7 @@ static int nest_imc_event_init(struct perf_event *event)
|
|
* Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
|
|
* Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
|
|
* Get the base memory addresss for this cpu.
|
|
* Get the base memory addresss for this cpu.
|
|
*/
|
|
*/
|
|
- chip_id = topology_physical_package_id(event->cpu);
|
|
|
|
|
|
+ chip_id = cpu_to_chip_id(event->cpu);
|
|
pcni = pmu->mem_info;
|
|
pcni = pmu->mem_info;
|
|
do {
|
|
do {
|
|
if (pcni->id == chip_id) {
|
|
if (pcni->id == chip_id) {
|
|
@@ -524,19 +524,19 @@ static int nest_imc_event_init(struct perf_event *event)
|
|
*/
|
|
*/
|
|
static int core_imc_mem_init(int cpu, int size)
|
|
static int core_imc_mem_init(int cpu, int size)
|
|
{
|
|
{
|
|
- int phys_id, rc = 0, core_id = (cpu / threads_per_core);
|
|
|
|
|
|
+ int nid, rc = 0, core_id = (cpu / threads_per_core);
|
|
struct imc_mem_info *mem_info;
|
|
struct imc_mem_info *mem_info;
|
|
|
|
|
|
/*
|
|
/*
|
|
* alloc_pages_node() will allocate memory for core in the
|
|
* alloc_pages_node() will allocate memory for core in the
|
|
* local node only.
|
|
* local node only.
|
|
*/
|
|
*/
|
|
- phys_id = topology_physical_package_id(cpu);
|
|
|
|
|
|
+ nid = cpu_to_node(cpu);
|
|
mem_info = &core_imc_pmu->mem_info[core_id];
|
|
mem_info = &core_imc_pmu->mem_info[core_id];
|
|
mem_info->id = core_id;
|
|
mem_info->id = core_id;
|
|
|
|
|
|
/* We need only vbase for core counters */
|
|
/* We need only vbase for core counters */
|
|
- mem_info->vbase = page_address(alloc_pages_node(phys_id,
|
|
|
|
|
|
+ mem_info->vbase = page_address(alloc_pages_node(nid,
|
|
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
|
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
|
__GFP_NOWARN, get_order(size)));
|
|
__GFP_NOWARN, get_order(size)));
|
|
if (!mem_info->vbase)
|
|
if (!mem_info->vbase)
|
|
@@ -797,14 +797,14 @@ static int core_imc_event_init(struct perf_event *event)
|
|
static int thread_imc_mem_alloc(int cpu_id, int size)
|
|
static int thread_imc_mem_alloc(int cpu_id, int size)
|
|
{
|
|
{
|
|
u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
|
|
u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
|
|
- int phys_id = topology_physical_package_id(cpu_id);
|
|
|
|
|
|
+ int nid = cpu_to_node(cpu_id);
|
|
|
|
|
|
if (!local_mem) {
|
|
if (!local_mem) {
|
|
/*
|
|
/*
|
|
* This case could happen only once at start, since we dont
|
|
* This case could happen only once at start, since we dont
|
|
* free the memory in cpu offline path.
|
|
* free the memory in cpu offline path.
|
|
*/
|
|
*/
|
|
- local_mem = page_address(alloc_pages_node(phys_id,
|
|
|
|
|
|
+ local_mem = page_address(alloc_pages_node(nid,
|
|
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
|
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
|
|
__GFP_NOWARN, get_order(size)));
|
|
__GFP_NOWARN, get_order(size)));
|
|
if (!local_mem)
|
|
if (!local_mem)
|