mirror of https://gitee.com/openkylin/linux.git
x86/platform/uv/BAU: Add wait_completion to bau_operations
Remove the present wait_completion routine and add a function pointer by the same name to the bau_operations struct. Rather than switching on the UV hub version during message processing, set the architecture-specific uv*_wait_completion during initialization. The uv123_bau_ops struct must be split into uv1 and uv2_3 versions to accommodate the corresponding wait_completion routines. Signed-off-by: Andrew Banman <abanman@hpe.com> Acked-by: Ingo Molnar <mingo@kernel.org> Acked-by: Mike Travis <mike.travis@hpe.com> Cc: sivanich@hpe.com Cc: rja@hpe.com Cc: akpm@linux-foundation.org Link: http://lkml.kernel.org/r/1489077734-111753-6-git-send-email-abanman@hpe.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
dfeb28f068
commit
2620bbbf1f
|
@ -672,6 +672,8 @@ struct bau_operations {
|
||||||
void (*write_g_sw_ack)(int pnode, unsigned long mmr);
|
void (*write_g_sw_ack)(int pnode, unsigned long mmr);
|
||||||
void (*write_payload_first)(int pnode, unsigned long mmr);
|
void (*write_payload_first)(int pnode, unsigned long mmr);
|
||||||
void (*write_payload_last)(int pnode, unsigned long mmr);
|
void (*write_payload_last)(int pnode, unsigned long mmr);
|
||||||
|
int (*wait_completion)(struct bau_desc*,
|
||||||
|
struct bau_control*, long try);
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
|
static inline void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
|
||||||
|
|
|
@ -686,14 +686,6 @@ static int uv2_3_wait_completion(struct bau_desc *bau_desc,
|
||||||
return FLUSH_COMPLETE;
|
return FLUSH_COMPLETE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int wait_completion(struct bau_desc *bau_desc, struct bau_control *bcp, long try)
|
|
||||||
{
|
|
||||||
if (bcp->uvhub_version == UV_BAU_V1)
|
|
||||||
return uv1_wait_completion(bau_desc, bcp, try);
|
|
||||||
else
|
|
||||||
return uv2_3_wait_completion(bau_desc, bcp, try);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Our retries are blocked by all destination sw ack resources being
|
* Our retries are blocked by all destination sw ack resources being
|
||||||
* in use, and a timeout is pending. In that case hardware immediately
|
* in use, and a timeout is pending. In that case hardware immediately
|
||||||
|
@ -922,7 +914,7 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp,
|
||||||
write_mmr_activation(index);
|
write_mmr_activation(index);
|
||||||
|
|
||||||
try++;
|
try++;
|
||||||
completion_stat = wait_completion(bau_desc, bcp, try);
|
completion_stat = ops.wait_completion(bau_desc, bcp, try);
|
||||||
|
|
||||||
handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
|
handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
|
||||||
|
|
||||||
|
@ -2135,7 +2127,7 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct bau_operations uv123_bau_ops __initconst = {
|
static const struct bau_operations uv1_bau_ops __initconst = {
|
||||||
.bau_gpa_to_offset = uv_gpa_to_offset,
|
.bau_gpa_to_offset = uv_gpa_to_offset,
|
||||||
.read_l_sw_ack = read_mmr_sw_ack,
|
.read_l_sw_ack = read_mmr_sw_ack,
|
||||||
.read_g_sw_ack = read_gmmr_sw_ack,
|
.read_g_sw_ack = read_gmmr_sw_ack,
|
||||||
|
@ -2143,6 +2135,18 @@ static const struct bau_operations uv123_bau_ops __initconst = {
|
||||||
.write_g_sw_ack = write_gmmr_sw_ack,
|
.write_g_sw_ack = write_gmmr_sw_ack,
|
||||||
.write_payload_first = write_mmr_payload_first,
|
.write_payload_first = write_mmr_payload_first,
|
||||||
.write_payload_last = write_mmr_payload_last,
|
.write_payload_last = write_mmr_payload_last,
|
||||||
|
.wait_completion = uv1_wait_completion,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct bau_operations uv2_3_bau_ops __initconst = {
|
||||||
|
.bau_gpa_to_offset = uv_gpa_to_offset,
|
||||||
|
.read_l_sw_ack = read_mmr_sw_ack,
|
||||||
|
.read_g_sw_ack = read_gmmr_sw_ack,
|
||||||
|
.write_l_sw_ack = write_mmr_sw_ack,
|
||||||
|
.write_g_sw_ack = write_gmmr_sw_ack,
|
||||||
|
.write_payload_first = write_mmr_payload_first,
|
||||||
|
.write_payload_last = write_mmr_payload_last,
|
||||||
|
.wait_completion = uv2_3_wait_completion,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct bau_operations uv4_bau_ops __initconst = {
|
static const struct bau_operations uv4_bau_ops __initconst = {
|
||||||
|
@ -2153,6 +2157,7 @@ static const struct bau_operations uv4_bau_ops __initconst = {
|
||||||
.write_g_sw_ack = write_gmmr_proc_sw_ack,
|
.write_g_sw_ack = write_gmmr_proc_sw_ack,
|
||||||
.write_payload_first = write_mmr_proc_payload_first,
|
.write_payload_first = write_mmr_proc_payload_first,
|
||||||
.write_payload_last = write_mmr_proc_payload_last,
|
.write_payload_last = write_mmr_proc_payload_last,
|
||||||
|
.wait_completion = uv2_3_wait_completion,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2174,11 +2179,11 @@ static int __init uv_bau_init(void)
|
||||||
if (is_uv4_hub())
|
if (is_uv4_hub())
|
||||||
ops = uv4_bau_ops;
|
ops = uv4_bau_ops;
|
||||||
else if (is_uv3_hub())
|
else if (is_uv3_hub())
|
||||||
ops = uv123_bau_ops;
|
ops = uv2_3_bau_ops;
|
||||||
else if (is_uv2_hub())
|
else if (is_uv2_hub())
|
||||||
ops = uv123_bau_ops;
|
ops = uv2_3_bau_ops;
|
||||||
else if (is_uv1_hub())
|
else if (is_uv1_hub())
|
||||||
ops = uv123_bau_ops;
|
ops = uv1_bau_ops;
|
||||||
|
|
||||||
for_each_possible_cpu(cur_cpu) {
|
for_each_possible_cpu(cur_cpu) {
|
||||||
mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
|
mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
|
||||||
|
|
Loading…
Reference in New Issue