x86, mtrr: Constify struct mtrr_ops
This is part of the ops structure constification effort started by Arjan van de Ven et al. Benefits of this constification: * prevents modification of data that is shared (referenced) by many other structure instances at runtime * detects/prevents accidental (but not intentional) modification attempts on archs that enforce read-only kernel data at runtime * potentially better optimized code as the compiler can assume that the const data cannot be changed * the compiler/linker move const data into .rodata and therefore exclude them from false sharing Signed-off-by: Emese Revfy <re.emese@gmail.com> LKML-Reference: <4B65D712.3080804@gmail.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
ab658321f3
commit
3b9cfc0a99
|
@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct mtrr_ops amd_mtrr_ops = {
|
||||
static const struct mtrr_ops amd_mtrr_ops = {
|
||||
.vendor = X86_VENDOR_AMD,
|
||||
.set = amd_set_mtrr,
|
||||
.get = amd_get_mtrr,
|
||||
|
|
|
@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct mtrr_ops centaur_mtrr_ops = {
|
||||
static const struct mtrr_ops centaur_mtrr_ops = {
|
||||
.vendor = X86_VENDOR_CENTAUR,
|
||||
.set = centaur_set_mcr,
|
||||
.get = centaur_get_mcr,
|
||||
|
|
|
@ -265,7 +265,7 @@ static void cyrix_set_all(void)
|
|||
post_set();
|
||||
}
|
||||
|
||||
static struct mtrr_ops cyrix_mtrr_ops = {
|
||||
static const struct mtrr_ops cyrix_mtrr_ops = {
|
||||
.vendor = X86_VENDOR_CYRIX,
|
||||
.set_all = cyrix_set_all,
|
||||
.set = cyrix_set_arr,
|
||||
|
|
|
@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
|
|||
/*
|
||||
* Generic structure...
|
||||
*/
|
||||
struct mtrr_ops generic_mtrr_ops = {
|
||||
const struct mtrr_ops generic_mtrr_ops = {
|
||||
.use_intel_if = 1,
|
||||
.set_all = generic_set_all,
|
||||
.get = generic_get_mtrr,
|
||||
|
|
|
@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
|
|||
u64 size_or_mask, size_and_mask;
|
||||
static bool mtrr_aps_delayed_init;
|
||||
|
||||
static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
|
||||
static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
|
||||
|
||||
struct mtrr_ops *mtrr_if;
|
||||
const struct mtrr_ops *mtrr_if;
|
||||
|
||||
static void set_mtrr(unsigned int reg, unsigned long base,
|
||||
unsigned long size, mtrr_type type);
|
||||
|
||||
void set_mtrr_ops(struct mtrr_ops *ops)
|
||||
void set_mtrr_ops(const struct mtrr_ops *ops)
|
||||
{
|
||||
if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
|
||||
mtrr_ops[ops->vendor] = ops;
|
||||
|
|
|
@ -32,7 +32,7 @@ extern int generic_get_free_region(unsigned long base, unsigned long size,
|
|||
extern int generic_validate_add_page(unsigned long base, unsigned long size,
|
||||
unsigned int type);
|
||||
|
||||
extern struct mtrr_ops generic_mtrr_ops;
|
||||
extern const struct mtrr_ops generic_mtrr_ops;
|
||||
|
||||
extern int positive_have_wrcomb(void);
|
||||
|
||||
|
@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
|
|||
u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
|
||||
void get_mtrr_state(void);
|
||||
|
||||
extern void set_mtrr_ops(struct mtrr_ops *ops);
|
||||
extern void set_mtrr_ops(const struct mtrr_ops *ops);
|
||||
|
||||
extern u64 size_or_mask, size_and_mask;
|
||||
extern struct mtrr_ops *mtrr_if;
|
||||
extern const struct mtrr_ops *mtrr_if;
|
||||
|
||||
#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
|
||||
#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
|
||||
|
|
Loading…
Reference in New Issue