perf bench numa: Fix the condition checks for max number of NUMA nodes

In systems having higher node numbers available like node
255, perf numa bench will fail with SIGABORT.

  <<>>
  perf: bench/numa.c:1416: init: Assertion `!(g->p.nr_nodes > 64 || g->p.nr_nodes < 0)' failed.
  Aborted (core dumped)
  <<>>

Snippet from 'numactl -H' below on a powerpc system where the highest
node number available is 255:

  available: 6 nodes (0,8,252-255)
  node 0 cpus: <cpu-list>
  node 0 size: 519587 MB
  node 0 free: 516659 MB
  node 8 cpus: <cpu-list>
  node 8 size: 523607 MB
  node 8 free: 486757 MB
  node 252 cpus:
  node 252 size: 0 MB
  node 252 free: 0 MB
  node 253 cpus:
  node 253 size: 0 MB
  node 253 free: 0 MB
  node 254 cpus:
  node 254 size: 0 MB
  node 254 free: 0 MB
  node 255 cpus:
  node 255 size: 0 MB
  node 255 free: 0 MB
  node distances:
  node   0   8  252  253  254  255

Note: <cpu-list> expands to actual cpu list in the original output.
These nodes 252-255 are to represent the memory on GPUs and are valid
nodes.

The perf numa bench init code has a condition check to see if the number
of NUMA nodes (nr_nodes) exceeds MAX_NR_NODES. The value of MAX_NR_NODES
defined in perf code is 64. And the 'nr_nodes' is the value from
numa_max_node() which represents the highest node number available in the
system. In some systems where we could have NUMA node 255, this condition
check fails and results in SIGABORT.

The numa benchmark uses static value of MAX_NR_NODES in the code to
represent size of two NUMA node arrays and node bitmask used for setting
memory policy. Patch adds a fix to dynamically allocate size for the
two arrays and bitmask value based on the node numbers available in the
system. With the fix, perf numa benchmark will work with node configuration
on any system and thus removes the static MAX_NR_NODES value.

Signed-off-by: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Reviewed-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lore.kernel.org/lkml/1614271802-1503-1-git-send-email-atrajeev@linux.vnet.ibm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Athira Rajeev 2021-02-25 11:50:02 -05:00 committed by Arnaldo Carvalho de Melo
parent ffc52b7ae5
commit 394e4306b0
1 changed files with 29 additions and 13 deletions

View File

@ -344,18 +344,22 @@ static void mempol_restore(void)
static void bind_to_memnode(int node)
{
unsigned long nodemask;
struct bitmask *node_mask;
int ret;
if (node == NUMA_NO_NODE)
return;
BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
nodemask = 1L << node;
node_mask = numa_allocate_nodemask();
BUG_ON(!node_mask);
ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8);
dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret);
numa_bitmask_clearall(node_mask);
numa_bitmask_setbit(node_mask, node);
ret = set_mempolicy(MPOL_BIND, node_mask->maskp, node_mask->size + 1);
dprintf("binding to node %d, mask: %016lx => %d\n", node, *node_mask->maskp, ret);
numa_bitmask_free(node_mask);
BUG_ON(ret);
}
@ -876,8 +880,6 @@ static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
prctl(0, bytes_worked);
}
#define MAX_NR_NODES 64
/*
* Count the number of nodes a process's threads
* are spread out on.
@ -888,10 +890,15 @@ static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
*/
static int count_process_nodes(int process_nr)
{
char node_present[MAX_NR_NODES] = { 0, };
char *node_present;
int nodes;
int n, t;
node_present = (char *)malloc(g->p.nr_nodes * sizeof(char));
BUG_ON(!node_present);
for (nodes = 0; nodes < g->p.nr_nodes; nodes++)
node_present[nodes] = 0;
for (t = 0; t < g->p.nr_threads; t++) {
struct thread_data *td;
int task_nr;
@ -901,17 +908,20 @@ static int count_process_nodes(int process_nr)
td = g->threads + task_nr;
node = numa_node_of_cpu(td->curr_cpu);
if (node < 0) /* curr_cpu was likely still -1 */
if (node < 0) /* curr_cpu was likely still -1 */ {
free(node_present);
return 0;
}
node_present[node] = 1;
}
nodes = 0;
for (n = 0; n < MAX_NR_NODES; n++)
for (n = 0; n < g->p.nr_nodes; n++)
nodes += node_present[n];
free(node_present);
return nodes;
}
@ -980,7 +990,7 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
{
unsigned int loops_done_min, loops_done_max;
int process_groups;
int nodes[MAX_NR_NODES];
int *nodes;
int distance;
int nr_min;
int nr_max;
@ -994,6 +1004,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
if (!g->p.show_convergence && !g->p.measure_convergence)
return;
nodes = (int *)malloc(g->p.nr_nodes * sizeof(int));
BUG_ON(!nodes);
for (node = 0; node < g->p.nr_nodes; node++)
nodes[node] = 0;
@ -1035,8 +1047,10 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
BUG_ON(sum > g->p.nr_tasks);
if (0 && (sum < g->p.nr_tasks))
if (0 && (sum < g->p.nr_tasks)) {
free(nodes);
return;
}
/*
* Count the number of distinct process groups present
@ -1088,6 +1102,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
}
tprintf("\n");
}
free(nodes);
}
static void show_summary(double runtime_ns_max, int l, double *convergence)
@ -1413,7 +1429,7 @@ static int init(void)
g->p.nr_nodes = numa_max_node() + 1;
/* char array in count_process_nodes(): */
BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0);
BUG_ON(g->p.nr_nodes < 0);
if (g->p.show_quiet && !g->p.show_details)
g->p.show_details = -1;