mirror of https://gitee.com/openkylin/linux.git
Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 platform updates from Ingo Molnar: "The main change is the addition of SGI/UV4 support" * 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits) x86/platform/UV: Fix incorrect nodes and pnodes for cpuless and memoryless nodes x86/platform/UV: Remove Obsolete GRU MMR address translation x86/platform/UV: Update physical address conversions for UV4 x86/platform/UV: Build GAM reference tables x86/platform/UV: Support UV4 socket address changes x86/platform/UV: Add obtaining GAM Range Table from UV BIOS x86/platform/UV: Add UV4 addressing discovery function x86/platform/UV: Fold blade info into per node hub info structs x86/platform/UV: Allocate common per node hub info structs on local node x86/platform/UV: Move blade local processor ID to the per cpu info struct x86/platform/UV: Move scir info to the per cpu info struct x86/platform/UV: Create per cpu info structs to replace per hub info structs x86/platform/UV: Update MMIOH setup function to work for both UV3 and UV4 x86/platform/UV: Clean up redunduncies after merge of UV4 MMR definitions x86/platform/UV: Add UV4 Specific MMR definitions x86/platform/UV: Prep for UV4 MMR updates x86/platform/UV: Add UV MMR Illegal Access Function x86/platform/UV: Add UV4 Specific Defines x86/platform/UV: Add UV Architecture Defines x86/platform/UV: Add Initial UV4 definitions ...
This commit is contained in:
commit
bc231d9ede
|
@ -131,6 +131,7 @@ parameter is applicable:
|
|||
More X86-64 boot options can be found in
|
||||
Documentation/x86/x86_64/boot-options.txt .
|
||||
X86 Either 32-bit or 64-bit x86 (same as X86-32+X86-64)
|
||||
X86_UV SGI UV support is enabled.
|
||||
XEN Xen support is enabled
|
||||
|
||||
In addition, the following text indicates that the option:
|
||||
|
@ -542,6 +543,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
Format: <int> (must be >=0)
|
||||
Default: 64
|
||||
|
||||
bau= [X86_UV] Enable the BAU on SGI UV. The default
|
||||
behavior is to disable the BAU (i.e. bau=0).
|
||||
Format: { "0" | "1" }
|
||||
0 - Disable the BAU.
|
||||
1 - Enable the BAU.
|
||||
unset - Disable the BAU.
|
||||
|
||||
baycom_epp= [HW,AX25]
|
||||
Format: <io>,<mode>
|
||||
|
||||
|
|
|
@ -17,27 +17,6 @@ static inline unsigned int get_bios_ebda(void)
|
|||
return address; /* 0 means none */
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the sanitized length of the EBDA in bytes, if it exists.
|
||||
*/
|
||||
static inline unsigned int get_bios_ebda_length(void)
|
||||
{
|
||||
unsigned int address;
|
||||
unsigned int length;
|
||||
|
||||
address = get_bios_ebda();
|
||||
if (!address)
|
||||
return 0;
|
||||
|
||||
/* EBDA length is byte 0 of the EBDA (stored in KiB) */
|
||||
length = *(unsigned char *)phys_to_virt(address);
|
||||
length <<= 10;
|
||||
|
||||
/* Trim the length if it extends beyond 640KiB */
|
||||
length = min_t(unsigned int, (640 * 1024) - address, length);
|
||||
return length;
|
||||
}
|
||||
|
||||
void reserve_ebda_region(void);
|
||||
|
||||
#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
|
||||
|
|
|
@ -51,15 +51,66 @@ enum {
|
|||
BIOS_STATUS_UNAVAIL = -EBUSY
|
||||
};
|
||||
|
||||
/* Address map parameters */
|
||||
struct uv_gam_parameters {
|
||||
u64 mmr_base;
|
||||
u64 gru_base;
|
||||
u8 mmr_shift; /* Convert PNode to MMR space offset */
|
||||
u8 gru_shift; /* Convert PNode to GRU space offset */
|
||||
u8 gpa_shift; /* Size of offset field in GRU phys addr */
|
||||
u8 unused1;
|
||||
};
|
||||
|
||||
/* UV_TABLE_GAM_RANGE_ENTRY values */
|
||||
#define UV_GAM_RANGE_TYPE_UNUSED 0 /* End of table */
|
||||
#define UV_GAM_RANGE_TYPE_RAM 1 /* Normal RAM */
|
||||
#define UV_GAM_RANGE_TYPE_NVRAM 2 /* Non-volatile memory */
|
||||
#define UV_GAM_RANGE_TYPE_NV_WINDOW 3 /* NVMDIMM block window */
|
||||
#define UV_GAM_RANGE_TYPE_NV_MAILBOX 4 /* NVMDIMM mailbox */
|
||||
#define UV_GAM_RANGE_TYPE_HOLE 5 /* Unused address range */
|
||||
#define UV_GAM_RANGE_TYPE_MAX 6
|
||||
|
||||
/* The structure stores PA bits 56:26, for 64MB granularity */
|
||||
#define UV_GAM_RANGE_SHFT 26 /* 64MB */
|
||||
|
||||
struct uv_gam_range_entry {
|
||||
char type; /* Entry type: GAM_RANGE_TYPE_UNUSED, etc. */
|
||||
char unused1;
|
||||
u16 nasid; /* HNasid */
|
||||
u16 sockid; /* Socket ID, high bits of APIC ID */
|
||||
u16 pnode; /* Index to MMR and GRU spaces */
|
||||
u32 pxm; /* ACPI proximity domain number */
|
||||
u32 limit; /* PA bits 56:26 (UV_GAM_RANGE_SHFT) */
|
||||
};
|
||||
|
||||
#define UV_SYSTAB_SIG "UVST"
|
||||
#define UV_SYSTAB_VERSION_1 1 /* UV1/2/3 BIOS version */
|
||||
#define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */
|
||||
#define UV_SYSTAB_VERSION_UV4_1 0x401 /* + gpa_shift */
|
||||
#define UV_SYSTAB_VERSION_UV4_2 0x402 /* + TYPE_NVRAM/WINDOW/MBOX */
|
||||
#define UV_SYSTAB_VERSION_UV4_LATEST UV_SYSTAB_VERSION_UV4_2
|
||||
|
||||
#define UV_SYSTAB_TYPE_UNUSED 0 /* End of table (offset == 0) */
|
||||
#define UV_SYSTAB_TYPE_GAM_PARAMS 1 /* GAM PARAM conversions */
|
||||
#define UV_SYSTAB_TYPE_GAM_RNG_TBL 2 /* GAM entry table */
|
||||
#define UV_SYSTAB_TYPE_MAX 3
|
||||
|
||||
/*
|
||||
* The UV system table describes specific firmware
|
||||
* capabilities available to the Linux kernel at runtime.
|
||||
*/
|
||||
struct uv_systab {
|
||||
char signature[4]; /* must be "UVST" */
|
||||
char signature[4]; /* must be UV_SYSTAB_SIG */
|
||||
u32 revision; /* distinguish different firmware revs */
|
||||
u64 function; /* BIOS runtime callback function ptr */
|
||||
u32 size; /* systab size (starting with _VERSION_UV4) */
|
||||
struct {
|
||||
u32 type:8; /* type of entry */
|
||||
u32 offset:24; /* byte offset from struct start to entry */
|
||||
} entry[1]; /* additional entries follow */
|
||||
};
|
||||
extern struct uv_systab *uv_systab;
|
||||
/* (... end of definitions from UV BIOS ...) */
|
||||
|
||||
enum {
|
||||
BIOS_FREQ_BASE_PLATFORM = 0,
|
||||
|
@ -99,7 +150,11 @@ extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
|
|||
extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
|
||||
extern int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus);
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
extern void uv_bios_init(void);
|
||||
#else
|
||||
void uv_bios_init(void) { }
|
||||
#endif
|
||||
|
||||
extern unsigned long sn_rtc_cycles_per_second;
|
||||
extern int uv_type;
|
||||
|
@ -107,7 +162,7 @@ extern long sn_partition_id;
|
|||
extern long sn_coherency_id;
|
||||
extern long sn_region_size;
|
||||
extern long system_serial_number;
|
||||
#define partition_coherence_id() (sn_coherency_id)
|
||||
#define uv_partition_coherence_id() (sn_coherency_id)
|
||||
|
||||
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
|
||||
|
||||
|
|
|
@ -598,7 +598,7 @@ struct bau_control {
|
|||
int timeout_tries;
|
||||
int ipi_attempts;
|
||||
int conseccompletes;
|
||||
short nobau;
|
||||
bool nobau;
|
||||
short baudisabled;
|
||||
short cpu;
|
||||
short osnode;
|
||||
|
|
|
@ -16,9 +16,11 @@
|
|||
#include <linux/percpu.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/topology.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/uv/uv_mmrs.h>
|
||||
#include <asm/uv/bios.h>
|
||||
#include <asm/irq_vectors.h>
|
||||
#include <asm/io_apic.h>
|
||||
|
||||
|
@ -103,7 +105,6 @@
|
|||
* processor APICID register.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Maximum number of bricks in all partitions and in all coherency domains.
|
||||
* This is the total number of bricks accessible in the numalink fabric. It
|
||||
|
@ -127,6 +128,7 @@
|
|||
*/
|
||||
#define UV_MAX_NASID_VALUE (UV_MAX_NUMALINK_BLADES * 2)
|
||||
|
||||
/* System Controller Interface Reg info */
|
||||
struct uv_scir_s {
|
||||
struct timer_list timer;
|
||||
unsigned long offset;
|
||||
|
@ -137,71 +139,173 @@ struct uv_scir_s {
|
|||
unsigned char enabled;
|
||||
};
|
||||
|
||||
/* GAM (globally addressed memory) range table */
|
||||
struct uv_gam_range_s {
|
||||
u32 limit; /* PA bits 56:26 (GAM_RANGE_SHFT) */
|
||||
u16 nasid; /* node's global physical address */
|
||||
s8 base; /* entry index of node's base addr */
|
||||
u8 reserved;
|
||||
};
|
||||
|
||||
/*
|
||||
* The following defines attributes of the HUB chip. These attributes are
|
||||
* frequently referenced and are kept in the per-cpu data areas of each cpu.
|
||||
* They are kept together in a struct to minimize cache misses.
|
||||
* frequently referenced and are kept in a common per hub struct.
|
||||
* After setup, the struct is read only, so it should be readily
|
||||
* available in the L3 cache on the cpu socket for the node.
|
||||
*/
|
||||
struct uv_hub_info_s {
|
||||
unsigned long global_mmr_base;
|
||||
unsigned long global_mmr_shift;
|
||||
unsigned long gpa_mask;
|
||||
unsigned int gnode_extra;
|
||||
unsigned short *socket_to_node;
|
||||
unsigned short *socket_to_pnode;
|
||||
unsigned short *pnode_to_socket;
|
||||
struct uv_gam_range_s *gr_table;
|
||||
unsigned short min_socket;
|
||||
unsigned short min_pnode;
|
||||
unsigned char m_val;
|
||||
unsigned char n_val;
|
||||
unsigned char gr_table_len;
|
||||
unsigned char hub_revision;
|
||||
unsigned char apic_pnode_shift;
|
||||
unsigned char gpa_shift;
|
||||
unsigned char m_shift;
|
||||
unsigned char n_lshift;
|
||||
unsigned int gnode_extra;
|
||||
unsigned long gnode_upper;
|
||||
unsigned long lowmem_remap_top;
|
||||
unsigned long lowmem_remap_base;
|
||||
unsigned long global_gru_base;
|
||||
unsigned long global_gru_shift;
|
||||
unsigned short pnode;
|
||||
unsigned short pnode_mask;
|
||||
unsigned short coherency_domain_number;
|
||||
unsigned short numa_blade_id;
|
||||
unsigned char blade_processor_id;
|
||||
unsigned char m_val;
|
||||
unsigned char n_val;
|
||||
struct uv_scir_s scir;
|
||||
unsigned short nr_possible_cpus;
|
||||
unsigned short nr_online_cpus;
|
||||
short memory_nid;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||
#define uv_hub_info this_cpu_ptr(&__uv_hub_info)
|
||||
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
|
||||
/* CPU specific info with a pointer to the hub common info struct */
|
||||
struct uv_cpu_info_s {
|
||||
void *p_uv_hub_info;
|
||||
unsigned char blade_cpu_id;
|
||||
struct uv_scir_s scir;
|
||||
};
|
||||
DECLARE_PER_CPU(struct uv_cpu_info_s, __uv_cpu_info);
|
||||
|
||||
#define uv_cpu_info this_cpu_ptr(&__uv_cpu_info)
|
||||
#define uv_cpu_info_per(cpu) (&per_cpu(__uv_cpu_info, cpu))
|
||||
|
||||
#define uv_scir_info (&uv_cpu_info->scir)
|
||||
#define uv_cpu_scir_info(cpu) (&uv_cpu_info_per(cpu)->scir)
|
||||
|
||||
/* Node specific hub common info struct */
|
||||
extern void **__uv_hub_info_list;
|
||||
static inline struct uv_hub_info_s *uv_hub_info_list(int node)
|
||||
{
|
||||
return (struct uv_hub_info_s *)__uv_hub_info_list[node];
|
||||
}
|
||||
|
||||
static inline struct uv_hub_info_s *_uv_hub_info(void)
|
||||
{
|
||||
return (struct uv_hub_info_s *)uv_cpu_info->p_uv_hub_info;
|
||||
}
|
||||
#define uv_hub_info _uv_hub_info()
|
||||
|
||||
static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
|
||||
{
|
||||
return (struct uv_hub_info_s *)uv_cpu_info_per(cpu)->p_uv_hub_info;
|
||||
}
|
||||
|
||||
#define UV_HUB_INFO_VERSION 0x7150
|
||||
extern int uv_hub_info_version(void);
|
||||
static inline int uv_hub_info_check(int version)
|
||||
{
|
||||
if (uv_hub_info_version() == version)
|
||||
return 0;
|
||||
|
||||
pr_crit("UV: uv_hub_info version(%x) mismatch, expecting(%x)\n",
|
||||
uv_hub_info_version(), version);
|
||||
|
||||
BUG(); /* Catastrophic - cannot continue on unknown UV system */
|
||||
}
|
||||
#define _uv_hub_info_check() uv_hub_info_check(UV_HUB_INFO_VERSION)
|
||||
|
||||
/*
|
||||
* Hub revisions less than UV2_HUB_REVISION_BASE are UV1 hubs. All UV2
|
||||
* hubs have revision numbers greater than or equal to UV2_HUB_REVISION_BASE.
|
||||
* HUB revision ranges for each UV HUB architecture.
|
||||
* This is a software convention - NOT the hardware revision numbers in
|
||||
* the hub chip.
|
||||
*/
|
||||
#define UV1_HUB_REVISION_BASE 1
|
||||
#define UV2_HUB_REVISION_BASE 3
|
||||
#define UV3_HUB_REVISION_BASE 5
|
||||
#define UV4_HUB_REVISION_BASE 7
|
||||
|
||||
#ifdef UV1_HUB_IS_SUPPORTED
|
||||
static inline int is_uv1_hub(void)
|
||||
{
|
||||
return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE;
|
||||
}
|
||||
#else
|
||||
static inline int is_uv1_hub(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef UV2_HUB_IS_SUPPORTED
|
||||
static inline int is_uv2_hub(void)
|
||||
{
|
||||
return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) &&
|
||||
(uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE));
|
||||
}
|
||||
#else
|
||||
static inline int is_uv2_hub(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef UV3_HUB_IS_SUPPORTED
|
||||
static inline int is_uv3_hub(void)
|
||||
{
|
||||
return uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE;
|
||||
return ((uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE) &&
|
||||
(uv_hub_info->hub_revision < UV4_HUB_REVISION_BASE));
|
||||
}
|
||||
#else
|
||||
static inline int is_uv3_hub(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef UV4_HUB_IS_SUPPORTED
|
||||
static inline int is_uv4_hub(void)
|
||||
{
|
||||
return uv_hub_info->hub_revision >= UV4_HUB_REVISION_BASE;
|
||||
}
|
||||
#else
|
||||
static inline int is_uv4_hub(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int is_uvx_hub(void)
|
||||
{
|
||||
if (uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE)
|
||||
return uv_hub_info->hub_revision;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int is_uv_hub(void)
|
||||
{
|
||||
#ifdef UV1_HUB_IS_SUPPORTED
|
||||
return uv_hub_info->hub_revision;
|
||||
}
|
||||
|
||||
/* code common to uv2 and uv3 only */
|
||||
static inline int is_uvx_hub(void)
|
||||
{
|
||||
return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
|
||||
#endif
|
||||
return is_uvx_hub();
|
||||
}
|
||||
|
||||
union uvh_apicid {
|
||||
|
@ -243,24 +347,42 @@ union uvh_apicid {
|
|||
#define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
|
||||
#define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
|
||||
|
||||
#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
|
||||
(is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
|
||||
UV3_LOCAL_MMR_BASE))
|
||||
#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE :\
|
||||
(is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE :\
|
||||
UV3_GLOBAL_MMR32_BASE))
|
||||
#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
|
||||
(is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
|
||||
UV3_LOCAL_MMR_SIZE))
|
||||
#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
|
||||
(is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE :\
|
||||
UV3_GLOBAL_MMR32_SIZE))
|
||||
#define UV4_LOCAL_MMR_BASE 0xfa000000UL
|
||||
#define UV4_GLOBAL_MMR32_BASE 0xfc000000UL
|
||||
#define UV4_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
|
||||
#define UV4_GLOBAL_MMR32_SIZE (16UL * 1024 * 1024)
|
||||
|
||||
#define UV_LOCAL_MMR_BASE ( \
|
||||
is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
|
||||
is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
|
||||
is_uv3_hub() ? UV3_LOCAL_MMR_BASE : \
|
||||
/*is_uv4_hub*/ UV4_LOCAL_MMR_BASE)
|
||||
|
||||
#define UV_GLOBAL_MMR32_BASE ( \
|
||||
is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE : \
|
||||
is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE : \
|
||||
is_uv3_hub() ? UV3_GLOBAL_MMR32_BASE : \
|
||||
/*is_uv4_hub*/ UV4_GLOBAL_MMR32_BASE)
|
||||
|
||||
#define UV_LOCAL_MMR_SIZE ( \
|
||||
is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
|
||||
is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
|
||||
is_uv3_hub() ? UV3_LOCAL_MMR_SIZE : \
|
||||
/*is_uv4_hub*/ UV4_LOCAL_MMR_SIZE)
|
||||
|
||||
#define UV_GLOBAL_MMR32_SIZE ( \
|
||||
is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE : \
|
||||
is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE : \
|
||||
is_uv3_hub() ? UV3_GLOBAL_MMR32_SIZE : \
|
||||
/*is_uv4_hub*/ UV4_GLOBAL_MMR32_SIZE)
|
||||
|
||||
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
|
||||
|
||||
#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
|
||||
|
||||
#define UV_GLOBAL_MMR32_PNODE_SHIFT 15
|
||||
#define UV_GLOBAL_MMR64_PNODE_SHIFT 26
|
||||
#define _UV_GLOBAL_MMR64_PNODE_SHIFT 26
|
||||
#define UV_GLOBAL_MMR64_PNODE_SHIFT (uv_hub_info->global_mmr_shift)
|
||||
|
||||
#define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT))
|
||||
|
||||
|
@ -307,18 +429,74 @@ union uvh_apicid {
|
|||
* between socket virtual and socket physical addresses.
|
||||
*/
|
||||
|
||||
/* global bits offset - number of local address bits in gpa for this UV arch */
|
||||
static inline unsigned int uv_gpa_shift(void)
|
||||
{
|
||||
return uv_hub_info->gpa_shift;
|
||||
}
|
||||
#define _uv_gpa_shift
|
||||
|
||||
/* Find node that has the address range that contains global address */
|
||||
static inline struct uv_gam_range_s *uv_gam_range(unsigned long pa)
|
||||
{
|
||||
struct uv_gam_range_s *gr = uv_hub_info->gr_table;
|
||||
unsigned long pal = (pa & uv_hub_info->gpa_mask) >> UV_GAM_RANGE_SHFT;
|
||||
int i, num = uv_hub_info->gr_table_len;
|
||||
|
||||
if (gr) {
|
||||
for (i = 0; i < num; i++, gr++) {
|
||||
if (pal < gr->limit)
|
||||
return gr;
|
||||
}
|
||||
}
|
||||
pr_crit("UV: GAM Range for 0x%lx not found at %p!\n", pa, gr);
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Return base address of node that contains global address */
|
||||
static inline unsigned long uv_gam_range_base(unsigned long pa)
|
||||
{
|
||||
struct uv_gam_range_s *gr = uv_gam_range(pa);
|
||||
int base = gr->base;
|
||||
|
||||
if (base < 0)
|
||||
return 0UL;
|
||||
|
||||
return uv_hub_info->gr_table[base].limit;
|
||||
}
|
||||
|
||||
/* socket phys RAM --> UV global NASID (UV4+) */
|
||||
static inline unsigned long uv_soc_phys_ram_to_nasid(unsigned long paddr)
|
||||
{
|
||||
return uv_gam_range(paddr)->nasid;
|
||||
}
|
||||
#define _uv_soc_phys_ram_to_nasid
|
||||
|
||||
/* socket virtual --> UV global NASID (UV4+) */
|
||||
static inline unsigned long uv_gpa_nasid(void *v)
|
||||
{
|
||||
return uv_soc_phys_ram_to_nasid(__pa(v));
|
||||
}
|
||||
|
||||
/* socket phys RAM --> UV global physical address */
|
||||
static inline unsigned long uv_soc_phys_ram_to_gpa(unsigned long paddr)
|
||||
{
|
||||
unsigned int m_val = uv_hub_info->m_val;
|
||||
|
||||
if (paddr < uv_hub_info->lowmem_remap_top)
|
||||
paddr |= uv_hub_info->lowmem_remap_base;
|
||||
paddr |= uv_hub_info->gnode_upper;
|
||||
paddr = ((paddr << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
|
||||
((paddr >> uv_hub_info->m_val) << uv_hub_info->n_lshift);
|
||||
if (m_val)
|
||||
paddr = ((paddr << uv_hub_info->m_shift)
|
||||
>> uv_hub_info->m_shift) |
|
||||
((paddr >> uv_hub_info->m_val)
|
||||
<< uv_hub_info->n_lshift);
|
||||
else
|
||||
paddr |= uv_soc_phys_ram_to_nasid(paddr)
|
||||
<< uv_hub_info->gpa_shift;
|
||||
return paddr;
|
||||
}
|
||||
|
||||
|
||||
/* socket virtual --> UV global physical address */
|
||||
static inline unsigned long uv_gpa(void *v)
|
||||
{
|
||||
|
@ -338,54 +516,89 @@ static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
|
|||
unsigned long paddr;
|
||||
unsigned long remap_base = uv_hub_info->lowmem_remap_base;
|
||||
unsigned long remap_top = uv_hub_info->lowmem_remap_top;
|
||||
unsigned int m_val = uv_hub_info->m_val;
|
||||
|
||||
if (m_val)
|
||||
gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
|
||||
((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
|
||||
|
||||
gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
|
||||
((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
|
||||
paddr = gpa & uv_hub_info->gpa_mask;
|
||||
if (paddr >= remap_base && paddr < remap_base + remap_top)
|
||||
paddr -= remap_base;
|
||||
return paddr;
|
||||
}
|
||||
|
||||
|
||||
/* gpa -> pnode */
|
||||
/* gpa -> gnode */
|
||||
static inline unsigned long uv_gpa_to_gnode(unsigned long gpa)
|
||||
{
|
||||
return gpa >> uv_hub_info->n_lshift;
|
||||
unsigned int n_lshift = uv_hub_info->n_lshift;
|
||||
|
||||
if (n_lshift)
|
||||
return gpa >> n_lshift;
|
||||
|
||||
return uv_gam_range(gpa)->nasid >> 1;
|
||||
}
|
||||
|
||||
/* gpa -> pnode */
|
||||
static inline int uv_gpa_to_pnode(unsigned long gpa)
|
||||
{
|
||||
unsigned long n_mask = (1UL << uv_hub_info->n_val) - 1;
|
||||
|
||||
return uv_gpa_to_gnode(gpa) & n_mask;
|
||||
return uv_gpa_to_gnode(gpa) & uv_hub_info->pnode_mask;
|
||||
}
|
||||
|
||||
/* gpa -> node offset*/
|
||||
/* gpa -> node offset */
|
||||
static inline unsigned long uv_gpa_to_offset(unsigned long gpa)
|
||||
{
|
||||
return (gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift;
|
||||
unsigned int m_shift = uv_hub_info->m_shift;
|
||||
|
||||
if (m_shift)
|
||||
return (gpa << m_shift) >> m_shift;
|
||||
|
||||
return (gpa & uv_hub_info->gpa_mask) - uv_gam_range_base(gpa);
|
||||
}
|
||||
|
||||
/* Convert socket to node */
|
||||
static inline int _uv_socket_to_node(int socket, unsigned short *s2nid)
|
||||
{
|
||||
return s2nid ? s2nid[socket - uv_hub_info->min_socket] : socket;
|
||||
}
|
||||
|
||||
static inline int uv_socket_to_node(int socket)
|
||||
{
|
||||
return _uv_socket_to_node(socket, uv_hub_info->socket_to_node);
|
||||
}
|
||||
|
||||
/* pnode, offset --> socket virtual */
|
||||
static inline void *uv_pnode_offset_to_vaddr(int pnode, unsigned long offset)
|
||||
{
|
||||
return __va(((unsigned long)pnode << uv_hub_info->m_val) | offset);
|
||||
unsigned int m_val = uv_hub_info->m_val;
|
||||
unsigned long base;
|
||||
unsigned short sockid, node, *p2s;
|
||||
|
||||
if (m_val)
|
||||
return __va(((unsigned long)pnode << m_val) | offset);
|
||||
|
||||
p2s = uv_hub_info->pnode_to_socket;
|
||||
sockid = p2s ? p2s[pnode - uv_hub_info->min_pnode] : pnode;
|
||||
node = uv_socket_to_node(sockid);
|
||||
|
||||
/* limit address of previous socket is our base, except node 0 is 0 */
|
||||
if (!node)
|
||||
return __va((unsigned long)offset);
|
||||
|
||||
base = (unsigned long)(uv_hub_info->gr_table[node - 1].limit);
|
||||
return __va(base << UV_GAM_RANGE_SHFT | offset);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Extract a PNODE from an APICID (full apicid, not processor subset)
|
||||
*/
|
||||
/* Extract/Convert a PNODE from an APICID (full apicid, not processor subset) */
|
||||
static inline int uv_apicid_to_pnode(int apicid)
|
||||
{
|
||||
return (apicid >> uv_hub_info->apic_pnode_shift);
|
||||
int pnode = apicid >> uv_hub_info->apic_pnode_shift;
|
||||
unsigned short *s2pn = uv_hub_info->socket_to_pnode;
|
||||
|
||||
return s2pn ? s2pn[pnode - uv_hub_info->min_socket] : pnode;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert an apicid to the socket number on the blade
|
||||
*/
|
||||
/* Convert an apicid to the socket number on the blade */
|
||||
static inline int uv_apicid_to_socket(int apicid)
|
||||
{
|
||||
if (is_uv1_hub())
|
||||
|
@ -434,16 +647,6 @@ static inline unsigned long uv_read_global_mmr64(int pnode, unsigned long offset
|
|||
return readq(uv_global_mmr64_address(pnode, offset));
|
||||
}
|
||||
|
||||
/*
|
||||
* Global MMR space addresses when referenced by the GRU. (GRU does
|
||||
* NOT use socket addressing).
|
||||
*/
|
||||
static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long offset)
|
||||
{
|
||||
return UV_GLOBAL_GRU_MMR_BASE | offset |
|
||||
((unsigned long)pnode << uv_hub_info->m_val);
|
||||
}
|
||||
|
||||
static inline void uv_write_global_mmr8(int pnode, unsigned long offset, unsigned char val)
|
||||
{
|
||||
writeb(val, uv_global_mmr64_address(pnode, offset));
|
||||
|
@ -483,27 +686,23 @@ static inline void uv_write_local_mmr8(unsigned long offset, unsigned char val)
|
|||
writeb(val, uv_local_mmr_address(offset));
|
||||
}
|
||||
|
||||
/*
|
||||
* Structures and definitions for converting between cpu, node, pnode, and blade
|
||||
* numbers.
|
||||
*/
|
||||
struct uv_blade_info {
|
||||
unsigned short nr_possible_cpus;
|
||||
unsigned short nr_online_cpus;
|
||||
unsigned short pnode;
|
||||
short memory_nid;
|
||||
spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */
|
||||
unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
|
||||
};
|
||||
extern struct uv_blade_info *uv_blade_info;
|
||||
extern short *uv_node_to_blade;
|
||||
extern short *uv_cpu_to_blade;
|
||||
extern short uv_possible_blades;
|
||||
|
||||
/* Blade-local cpu number of current cpu. Numbered 0 .. <# cpus on the blade> */
|
||||
static inline int uv_blade_processor_id(void)
|
||||
{
|
||||
return uv_hub_info->blade_processor_id;
|
||||
return uv_cpu_info->blade_cpu_id;
|
||||
}
|
||||
|
||||
/* Blade-local cpu number of cpu N. Numbered 0 .. <# cpus on the blade> */
|
||||
static inline int uv_cpu_blade_processor_id(int cpu)
|
||||
{
|
||||
return uv_cpu_info_per(cpu)->blade_cpu_id;
|
||||
}
|
||||
#define _uv_cpu_blade_processor_id 1 /* indicate function available */
|
||||
|
||||
/* Blade number to Node number (UV1..UV4 is 1:1) */
|
||||
static inline int uv_blade_to_node(int blade)
|
||||
{
|
||||
return blade;
|
||||
}
|
||||
|
||||
/* Blade number of current cpu. Numnbered 0 .. <#blades -1> */
|
||||
|
@ -512,55 +711,60 @@ static inline int uv_numa_blade_id(void)
|
|||
return uv_hub_info->numa_blade_id;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert linux node number to the UV blade number.
|
||||
* .. Currently for UV1 thru UV4 the node and the blade are identical.
|
||||
* .. If this changes then you MUST check references to this function!
|
||||
*/
|
||||
static inline int uv_node_to_blade_id(int nid)
|
||||
{
|
||||
return nid;
|
||||
}
|
||||
|
||||
/* Convert a cpu number to the the UV blade number */
|
||||
static inline int uv_cpu_to_blade_id(int cpu)
|
||||
{
|
||||
return uv_cpu_to_blade[cpu];
|
||||
}
|
||||
|
||||
/* Convert linux node number to the UV blade number */
|
||||
static inline int uv_node_to_blade_id(int nid)
|
||||
{
|
||||
return uv_node_to_blade[nid];
|
||||
return uv_node_to_blade_id(cpu_to_node(cpu));
|
||||
}
|
||||
|
||||
/* Convert a blade id to the PNODE of the blade */
|
||||
static inline int uv_blade_to_pnode(int bid)
|
||||
{
|
||||
return uv_blade_info[bid].pnode;
|
||||
return uv_hub_info_list(uv_blade_to_node(bid))->pnode;
|
||||
}
|
||||
|
||||
/* Nid of memory node on blade. -1 if no blade-local memory */
|
||||
static inline int uv_blade_to_memory_nid(int bid)
|
||||
{
|
||||
return uv_blade_info[bid].memory_nid;
|
||||
return uv_hub_info_list(uv_blade_to_node(bid))->memory_nid;
|
||||
}
|
||||
|
||||
/* Determine the number of possible cpus on a blade */
|
||||
static inline int uv_blade_nr_possible_cpus(int bid)
|
||||
{
|
||||
return uv_blade_info[bid].nr_possible_cpus;
|
||||
return uv_hub_info_list(uv_blade_to_node(bid))->nr_possible_cpus;
|
||||
}
|
||||
|
||||
/* Determine the number of online cpus on a blade */
|
||||
static inline int uv_blade_nr_online_cpus(int bid)
|
||||
{
|
||||
return uv_blade_info[bid].nr_online_cpus;
|
||||
return uv_hub_info_list(uv_blade_to_node(bid))->nr_online_cpus;
|
||||
}
|
||||
|
||||
/* Convert a cpu id to the PNODE of the blade containing the cpu */
|
||||
static inline int uv_cpu_to_pnode(int cpu)
|
||||
{
|
||||
return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode;
|
||||
return uv_cpu_hub_info(cpu)->pnode;
|
||||
}
|
||||
|
||||
/* Convert a linux node number to the PNODE of the blade */
|
||||
static inline int uv_node_to_pnode(int nid)
|
||||
{
|
||||
return uv_blade_info[uv_node_to_blade_id(nid)].pnode;
|
||||
return uv_hub_info_list(nid)->pnode;
|
||||
}
|
||||
|
||||
/* Maximum possible number of blades */
|
||||
extern short uv_possible_blades;
|
||||
static inline int uv_num_possible_blades(void)
|
||||
{
|
||||
return uv_possible_blades;
|
||||
|
@ -578,9 +782,7 @@ extern void uv_nmi_setup(void);
|
|||
/* Newer SMM NMI handler, not present in all systems */
|
||||
#define UVH_NMI_MMRX UVH_EVENT_OCCURRED0
|
||||
#define UVH_NMI_MMRX_CLEAR UVH_EVENT_OCCURRED0_ALIAS
|
||||
#define UVH_NMI_MMRX_SHIFT (is_uv1_hub() ? \
|
||||
UV1H_EVENT_OCCURRED0_EXTIO_INT0_SHFT :\
|
||||
UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT)
|
||||
#define UVH_NMI_MMRX_SHIFT UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT
|
||||
#define UVH_NMI_MMRX_TYPE "EXTIO_INT0"
|
||||
|
||||
/* Non-zero indicates newer SMM NMI handler present */
|
||||
|
@ -622,9 +824,9 @@ DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
|
|||
/* Update SCIR state */
|
||||
static inline void uv_set_scir_bits(unsigned char value)
|
||||
{
|
||||
if (uv_hub_info->scir.state != value) {
|
||||
uv_hub_info->scir.state = value;
|
||||
uv_write_local_mmr8(uv_hub_info->scir.offset, value);
|
||||
if (uv_scir_info->state != value) {
|
||||
uv_scir_info->state = value;
|
||||
uv_write_local_mmr8(uv_scir_info->offset, value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -635,10 +837,10 @@ static inline unsigned long uv_scir_offset(int apicid)
|
|||
|
||||
static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
|
||||
{
|
||||
if (uv_cpu_hub_info(cpu)->scir.state != value) {
|
||||
if (uv_cpu_scir_info(cpu)->state != value) {
|
||||
uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
|
||||
uv_cpu_hub_info(cpu)->scir.offset, value);
|
||||
uv_cpu_hub_info(cpu)->scir.state = value;
|
||||
uv_cpu_scir_info(cpu)->offset, value);
|
||||
uv_cpu_scir_info(cpu)->state = value;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -666,10 +868,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
|
|||
|
||||
/*
|
||||
* Get the minimum revision number of the hub chips within the partition.
|
||||
* 1 - UV1 rev 1.0 initial silicon
|
||||
* 2 - UV1 rev 2.0 production silicon
|
||||
* 3 - UV2 rev 1.0 initial silicon
|
||||
* 5 - UV3 rev 1.0 initial silicon
|
||||
* (See UVx_HUB_REVISION_BASE above for specific values.)
|
||||
*/
|
||||
static inline int uv_get_min_hub_revision_id(void)
|
||||
{
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -21,19 +21,20 @@
|
|||
|
||||
#include <linux/efi.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/efi.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/uv/bios.h>
|
||||
#include <asm/uv/uv_hub.h>
|
||||
|
||||
static struct uv_systab uv_systab;
|
||||
struct uv_systab *uv_systab;
|
||||
|
||||
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
|
||||
{
|
||||
struct uv_systab *tab = &uv_systab;
|
||||
struct uv_systab *tab = uv_systab;
|
||||
s64 ret;
|
||||
|
||||
if (!tab->function)
|
||||
if (!tab || !tab->function)
|
||||
/*
|
||||
* BIOS does not support UV systab
|
||||
*/
|
||||
|
@ -183,34 +184,31 @@ int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(uv_bios_set_legacy_vga_target);
|
||||
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
void uv_bios_init(void)
|
||||
{
|
||||
struct uv_systab *tab;
|
||||
|
||||
if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
|
||||
(efi.uv_systab == (unsigned long)NULL)) {
|
||||
printk(KERN_CRIT "No EFI UV System Table.\n");
|
||||
uv_systab.function = (unsigned long)NULL;
|
||||
uv_systab = NULL;
|
||||
if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab) {
|
||||
pr_crit("UV: UVsystab: missing\n");
|
||||
return;
|
||||
}
|
||||
|
||||
tab = (struct uv_systab *)ioremap(efi.uv_systab,
|
||||
sizeof(struct uv_systab));
|
||||
if (strncmp(tab->signature, "UVST", 4) != 0)
|
||||
printk(KERN_ERR "bad signature in UV system table!");
|
||||
uv_systab = ioremap(efi.uv_systab, sizeof(struct uv_systab));
|
||||
if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) {
|
||||
pr_err("UV: UVsystab: bad signature!\n");
|
||||
iounmap(uv_systab);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy table to permanent spot for later use.
|
||||
*/
|
||||
memcpy(&uv_systab, tab, sizeof(struct uv_systab));
|
||||
iounmap(tab);
|
||||
|
||||
printk(KERN_INFO "EFI UV System Table Revision %d\n",
|
||||
uv_systab.revision);
|
||||
if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) {
|
||||
iounmap(uv_systab);
|
||||
uv_systab = ioremap(efi.uv_systab, uv_systab->size);
|
||||
if (!uv_systab) {
|
||||
pr_err("UV: UVsystab: ioremap(%d) failed!\n",
|
||||
uv_systab->size);
|
||||
return;
|
||||
}
|
||||
}
|
||||
pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
|
||||
}
|
||||
#else /* !CONFIG_EFI */
|
||||
|
||||
void uv_bios_init(void) { }
|
||||
#endif
|
||||
|
|
|
@ -37,7 +37,7 @@ static int timeout_base_ns[] = {
|
|||
};
|
||||
|
||||
static int timeout_us;
|
||||
static int nobau;
|
||||
static bool nobau = true;
|
||||
static int nobau_perm;
|
||||
static cycles_t congested_cycles;
|
||||
|
||||
|
@ -106,13 +106,28 @@ static char *stat_description[] = {
|
|||
"enable: number times use of the BAU was re-enabled"
|
||||
};
|
||||
|
||||
static int __init
|
||||
setup_nobau(char *arg)
|
||||
static int __init setup_bau(char *arg)
|
||||
{
|
||||
nobau = 1;
|
||||
int result;
|
||||
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
|
||||
result = strtobool(arg, &nobau);
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
/* we need to flip the logic here, so that bau=y sets nobau to false */
|
||||
nobau = !nobau;
|
||||
|
||||
if (!nobau)
|
||||
pr_info("UV BAU Enabled\n");
|
||||
else
|
||||
pr_info("UV BAU Disabled\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("nobau", setup_nobau);
|
||||
early_param("bau", setup_bau);
|
||||
|
||||
/* base pnode in this partition */
|
||||
static int uv_base_pnode __read_mostly;
|
||||
|
@ -131,10 +146,10 @@ set_bau_on(void)
|
|||
pr_info("BAU not initialized; cannot be turned on\n");
|
||||
return;
|
||||
}
|
||||
nobau = 0;
|
||||
nobau = false;
|
||||
for_each_present_cpu(cpu) {
|
||||
bcp = &per_cpu(bau_control, cpu);
|
||||
bcp->nobau = 0;
|
||||
bcp->nobau = false;
|
||||
}
|
||||
pr_info("BAU turned on\n");
|
||||
return;
|
||||
|
@ -146,10 +161,10 @@ set_bau_off(void)
|
|||
int cpu;
|
||||
struct bau_control *bcp;
|
||||
|
||||
nobau = 1;
|
||||
nobau = true;
|
||||
for_each_present_cpu(cpu) {
|
||||
bcp = &per_cpu(bau_control, cpu);
|
||||
bcp->nobau = 1;
|
||||
bcp->nobau = true;
|
||||
}
|
||||
pr_info("BAU turned off\n");
|
||||
return;
|
||||
|
@ -1886,7 +1901,7 @@ static void __init init_per_cpu_tunables(void)
|
|||
bcp = &per_cpu(bau_control, cpu);
|
||||
bcp->baudisabled = 0;
|
||||
if (nobau)
|
||||
bcp->nobau = 1;
|
||||
bcp->nobau = true;
|
||||
bcp->statp = &per_cpu(ptcstats, cpu);
|
||||
/* time interval to catch a hardware stay-busy bug */
|
||||
bcp->timeout_interval = usec_2_cycles(2*timeout_us);
|
||||
|
@ -2025,7 +2040,8 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
|
|||
return 1;
|
||||
}
|
||||
bcp->uvhub_master = *hmasterp;
|
||||
bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
|
||||
bcp->uvhub_cpu = uv_cpu_blade_processor_id(cpu);
|
||||
|
||||
if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
|
||||
printk(KERN_EMERG "%d cpus per uvhub invalid\n",
|
||||
bcp->uvhub_cpu);
|
||||
|
|
|
@ -34,7 +34,7 @@ static ssize_t partition_id_show(struct kobject *kobj,
|
|||
static ssize_t coherence_id_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%ld\n", partition_coherence_id());
|
||||
return snprintf(buf, PAGE_SIZE, "%ld\n", uv_partition_coherence_id());
|
||||
}
|
||||
|
||||
static struct kobj_attribute partition_id_attr =
|
||||
|
|
|
@ -165,7 +165,7 @@ static __init int uv_rtc_allocate_timers(void)
|
|||
for_each_present_cpu(cpu) {
|
||||
int nid = cpu_to_node(cpu);
|
||||
int bid = uv_cpu_to_blade_id(cpu);
|
||||
int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
|
||||
int bcpu = uv_cpu_blade_processor_id(cpu);
|
||||
struct uv_rtc_timer_head *head = blade_info[bid];
|
||||
|
||||
if (!head) {
|
||||
|
@ -226,7 +226,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
|
|||
int pnode = uv_cpu_to_pnode(cpu);
|
||||
int bid = uv_cpu_to_blade_id(cpu);
|
||||
struct uv_rtc_timer_head *head = blade_info[bid];
|
||||
int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
|
||||
int bcpu = uv_cpu_blade_processor_id(cpu);
|
||||
u64 *t = &head->cpu[bcpu].expires;
|
||||
unsigned long flags;
|
||||
int next_cpu;
|
||||
|
@ -262,7 +262,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
|
|||
int pnode = uv_cpu_to_pnode(cpu);
|
||||
int bid = uv_cpu_to_blade_id(cpu);
|
||||
struct uv_rtc_timer_head *head = blade_info[bid];
|
||||
int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
|
||||
int bcpu = uv_cpu_blade_processor_id(cpu);
|
||||
u64 *t = &head->cpu[bcpu].expires;
|
||||
unsigned long flags;
|
||||
int rc = 0;
|
||||
|
|
|
@ -718,8 +718,8 @@ static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
|
|||
static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
|
||||
void *mesg, int lines)
|
||||
{
|
||||
unsigned long m, *val = mesg, gpa, save;
|
||||
int ret;
|
||||
unsigned long m;
|
||||
int ret, loops = 200; /* experimentally determined */
|
||||
|
||||
m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
|
||||
if (lines == 2) {
|
||||
|
@ -735,22 +735,28 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
|
|||
return MQE_OK;
|
||||
|
||||
/*
|
||||
* Send a cross-partition interrupt to the SSI that contains the target
|
||||
* message queue. Normally, the interrupt is automatically delivered by
|
||||
* hardware but some error conditions require explicit delivery.
|
||||
* Use the GRU to deliver the interrupt. Otherwise partition failures
|
||||
* Send a noop message in order to deliver a cross-partition interrupt
|
||||
* to the SSI that contains the target message queue. Normally, the
|
||||
* interrupt is automatically delivered by hardware following mesq
|
||||
* operations, but some error conditions require explicit delivery.
|
||||
* The noop message will trigger delivery. Otherwise partition failures
|
||||
* could cause unrecovered errors.
|
||||
*/
|
||||
gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT);
|
||||
save = *val;
|
||||
*val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector,
|
||||
dest_Fixed);
|
||||
gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA);
|
||||
ret = gru_wait(cb);
|
||||
*val = save;
|
||||
if (ret != CBS_IDLE)
|
||||
return MQE_UNEXPECTED_CB_ERR;
|
||||
return MQE_OK;
|
||||
do {
|
||||
ret = send_noop_message(cb, mqd, mesg);
|
||||
} while ((ret == MQIE_AGAIN || ret == MQE_CONGESTION) && (loops-- > 0));
|
||||
|
||||
if (ret == MQIE_AGAIN || ret == MQE_CONGESTION) {
|
||||
/*
|
||||
* Don't indicate to the app to resend the message, as it's
|
||||
* already been successfully sent. We simply send an OK
|
||||
* (rather than fail the send with MQE_UNEXPECTED_CB_ERR),
|
||||
* assuming that the other side is receiving enough
|
||||
* interrupts to get this message processed anyway.
|
||||
*/
|
||||
ret = MQE_OK;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue