powerpc fixes for 4.16 #3

The main attraction is a fix for a bug in the new drmem code, which was causing
 an oops on boot on some versions of Qemu.
 
 There's also a fix for XIVE (Power9 interrupt controller) on KVM, as well as a
 few other minor fixes.
 
 Thanks to:
   Corentin Labbe, Cyril Bur, Cédric Le Goater, Daniel Black, Nathan Fontenot,
   Nicholas Piggin.
 -----BEGIN PGP SIGNATURE-----
 
 iQIwBAABCAAaBQJah/esExxtcGVAZWxsZXJtYW4uaWQuYXUACgkQUevqPMjhpYCi
 KA/+IaDlvxKezRBNQnj6GElBrgfUzICH6MtG6qo+rCKsTMgbAiZJkk3vz/JgqKY/
 EusTNCwcqLaPBDgwoSmbazdtnj7YOwBGdIQOq+rC/qeSV0/gpdo02dPUWaMMOE/x
 nj+zASrOsv/o9XWX4XmJeuYWhW/8a/nWXKa+oLt3g/5pIHHP5TXTzMHvHH0Rn23D
 1ejwDHDwMNL3p2jHlcf+v1DDol51/Kaa8e8KwJJMf00HVfFVXtdnH7do6I1qBeC0
 t7PLDeWnpyY+3M1fNJ303EXIqc9DArUCn6tdhy6om96rGvBddORFuRkS4kkXbx76
 pnTRPWnPa9aeC2rU+C84sJDQJgeBCpMYOvw96Yr1SxFhE9z0T+9YYTnZxiB7GISK
 5BAf3EzE9dc0RtStrfTKnvcuz2OffPq2VZi3sqjiHFDit2TsF+i7ZkX/CR9UAmaH
 HPk4Fbi/IzlSfx/RffrOXYrpsTNcUmzvA/Tj83qGhM30LCMQ24o84eGTZN36a/eg
 Z+7/MZawtNElsNNpJz6MYtvEQkHZyrTUS+9iyqRwLXCIy/JIHZYwb4aRtu41SET8
 lWwuaLjfwdpVPCeEkiNQwCswtppt4j2XS8Ggqef9GkqElsk6JKyxuZWTv2hRfJUK
 DTQvPV4PIhvhrB6qvT11qOm5yDSSV9f6yaLJ5dm3BiXulF0=
 =8QPF
 -----END PGP SIGNATURE-----

Merge tag 'powerpc-4.16-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
 "The main attraction is a fix for a bug in the new drmem code, which
  was causing an oops on boot on some versions of Qemu.

  There's also a fix for XIVE (Power9 interrupt controller) on KVM, as
  well as a few other minor fixes.

  Thanks to: Corentin Labbe, Cyril Bur, Cédric Le Goater, Daniel Black,
  Nathan Fontenot, Nicholas Piggin"

* tag 'powerpc-4.16-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/pseries: Check for zero filled ibm,dynamic-memory property
  powerpc/pseries: Add empty update_numa_cpu_lookup_table() for NUMA=n
  powerpc/powernv: IMC fix out of bounds memory access at shutdown
  powerpc/xive: Use hw CPU ids when configuring the CPU queues
  powerpc: Expose TSCR via sysfs only on powernv
This commit is contained in:
Linus Torvalds 2018-02-17 09:48:26 -08:00
commit ee78ad7848
5 changed files with 29 additions and 10 deletions

View File

@ -81,6 +81,9 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
{
return 0;
}
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
#endif /* CONFIG_NUMA */
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)

View File

@ -788,7 +788,8 @@ static int register_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
device_create_file(s, &dev_attr_pir);
if (cpu_has_feature(CPU_FTR_ARCH_206))
if (cpu_has_feature(CPU_FTR_ARCH_206) &&
!firmware_has_feature(FW_FEATURE_LPAR))
device_create_file(s, &dev_attr_tscr);
#endif /* CONFIG_PPC64 */
@ -873,7 +874,8 @@ static int unregister_cpu_online(unsigned int cpu)
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
device_remove_file(s, &dev_attr_pir);
if (cpu_has_feature(CPU_FTR_ARCH_206))
if (cpu_has_feature(CPU_FTR_ARCH_206) &&
!firmware_has_feature(FW_FEATURE_LPAR))
device_remove_file(s, &dev_attr_tscr);
#endif /* CONFIG_PPC64 */

View File

@ -216,6 +216,8 @@ static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
u32 i, n_lmbs;
n_lmbs = of_read_number(prop++, 1);
if (n_lmbs == 0)
return;
for (i = 0; i < n_lmbs; i++) {
read_drconf_v1_cell(&lmb, &prop);
@ -245,6 +247,8 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
u32 i, j, lmb_sets;
lmb_sets = of_read_number(prop++, 1);
if (lmb_sets == 0)
return;
for (i = 0; i < lmb_sets; i++) {
read_drconf_v2_cell(&dr_cell, &prop);
@ -354,6 +358,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
struct drmem_lmb *lmb;
drmem_info->n_lmbs = of_read_number(prop++, 1);
if (drmem_info->n_lmbs == 0)
return;
drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
GFP_KERNEL);
@ -373,6 +379,8 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
int lmb_index;
lmb_sets = of_read_number(prop++, 1);
if (lmb_sets == 0)
return;
/* first pass, calculate the number of LMBs */
p = prop;

View File

@ -199,9 +199,11 @@ static void disable_nest_pmu_counters(void)
const struct cpumask *l_cpumask;
get_online_cpus();
for_each_online_node(nid) {
for_each_node_with_cpus(nid) {
l_cpumask = cpumask_of_node(nid);
cpu = cpumask_first(l_cpumask);
cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
if (cpu >= nr_cpu_ids)
continue;
opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(cpu));
}

View File

@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
if (rc) {
pr_err("Error %lld getting queue info prio %d\n", rc, prio);
pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
target, prio);
rc = -EIO;
goto fail;
}
@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
/* Configure and enable the queue in HW */
rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
if (rc) {
pr_err("Error %lld setting queue for prio %d\n", rc, prio);
pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
target, prio);
rc = -EIO;
} else {
q->qpage = qpage;
@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
if (IS_ERR(qpage))
return PTR_ERR(qpage);
return xive_spapr_configure_queue(cpu, q, prio, qpage,
xive_queue_shift);
return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
q, prio, qpage, xive_queue_shift);
}
static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
struct xive_q *q = &xc->queue[prio];
unsigned int alloc_order;
long rc;
int hw_cpu = get_hard_smp_processor_id(cpu);
rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0);
rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
if (rc)
pr_err("Error %ld setting queue for prio %d\n", rc, prio);
pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
hw_cpu, prio);
alloc_order = xive_alloc_order(xive_queue_shift);
free_pages((unsigned long)q->qpage, alloc_order);