mirror of https://gitee.com/openkylin/linux.git
Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Ingo Molnar: "Various cleanups" * 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/iommu: Fix header comments regarding standard and _FINISH macros x86/earlyprintk: Put CONFIG_PCI-only functions under the #ifdef x86: Fix up obsolete __cpu_set() function usage
This commit is contained in:
commit
9f3252f1ad
|
@ -79,11 +79,12 @@ struct iommu_table_entry {
|
|||
* d). Similar to the 'init', except that this gets called from pci_iommu_init
|
||||
* where we do have a memory allocator.
|
||||
*
|
||||
* The standard vs the _FINISH differs in that the _FINISH variant will
|
||||
* continue detecting other IOMMUs in the call list after the
|
||||
* the detection routine returns a positive number. The _FINISH will
|
||||
* stop the execution chain. Both will still call the 'init' and
|
||||
* 'late_init' functions if they are set.
|
||||
* The standard IOMMU_INIT differs from the IOMMU_INIT_FINISH variant
|
||||
* in that the former will continue detecting other IOMMUs in the call
|
||||
* list after the detection routine returns a positive number, while the
|
||||
* latter will stop the execution chain upon first successful detection.
|
||||
* Both variants will still call the 'init' and 'late_init' functions if
|
||||
* they are set.
|
||||
*/
|
||||
#define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \
|
||||
__IOMMU_INIT(_detect, _depend, _init, _late_init, 1)
|
||||
|
|
|
@ -135,12 +135,12 @@ static void init_x2apic_ldr(void)
|
|||
|
||||
per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
|
||||
|
||||
__cpu_set(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
|
||||
cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
|
||||
for_each_online_cpu(cpu) {
|
||||
if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
|
||||
continue;
|
||||
__cpu_set(this_cpu, per_cpu(cpus_in_cluster, cpu));
|
||||
__cpu_set(cpu, per_cpu(cpus_in_cluster, this_cpu));
|
||||
cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
|
||||
cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -195,7 +195,7 @@ static int x2apic_init_cpu_notifier(void)
|
|||
|
||||
BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
|
||||
|
||||
__cpu_set(cpu, per_cpu(cpus_in_cluster, cpu));
|
||||
cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
|
||||
register_hotcpu_notifier(&x2apic_cpu_notifier);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -95,20 +95,6 @@ static unsigned long early_serial_base = 0x3f8; /* ttyS0 */
|
|||
#define DLL 0 /* Divisor Latch Low */
|
||||
#define DLH 1 /* Divisor latch High */
|
||||
|
||||
static void mem32_serial_out(unsigned long addr, int offset, int value)
|
||||
{
|
||||
uint32_t *vaddr = (uint32_t *)addr;
|
||||
/* shift implied by pointer type */
|
||||
writel(value, vaddr + offset);
|
||||
}
|
||||
|
||||
static unsigned int mem32_serial_in(unsigned long addr, int offset)
|
||||
{
|
||||
uint32_t *vaddr = (uint32_t *)addr;
|
||||
/* shift implied by pointer type */
|
||||
return readl(vaddr + offset);
|
||||
}
|
||||
|
||||
static unsigned int io_serial_in(unsigned long addr, int offset)
|
||||
{
|
||||
return inb(addr + offset);
|
||||
|
@ -205,6 +191,20 @@ static __init void early_serial_init(char *s)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
static void mem32_serial_out(unsigned long addr, int offset, int value)
|
||||
{
|
||||
u32 *vaddr = (u32 *)addr;
|
||||
/* shift implied by pointer type */
|
||||
writel(value, vaddr + offset);
|
||||
}
|
||||
|
||||
static unsigned int mem32_serial_in(unsigned long addr, int offset)
|
||||
{
|
||||
u32 *vaddr = (u32 *)addr;
|
||||
/* shift implied by pointer type */
|
||||
return readl(vaddr + offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* early_pci_serial_init()
|
||||
*
|
||||
|
@ -217,8 +217,8 @@ static __init void early_pci_serial_init(char *s)
|
|||
unsigned divisor;
|
||||
unsigned long baud = DEFAULT_BAUD;
|
||||
u8 bus, slot, func;
|
||||
uint32_t classcode, bar0;
|
||||
uint16_t cmdreg;
|
||||
u32 classcode, bar0;
|
||||
u16 cmdreg;
|
||||
char *e;
|
||||
|
||||
|
||||
|
|
|
@ -295,7 +295,7 @@ int check_irq_vectors_for_cpu_disable(void)
|
|||
|
||||
this_cpu = smp_processor_id();
|
||||
cpumask_copy(&online_new, cpu_online_mask);
|
||||
cpu_clear(this_cpu, online_new);
|
||||
cpumask_clear_cpu(this_cpu, &online_new);
|
||||
|
||||
this_count = 0;
|
||||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
|
||||
|
@ -307,7 +307,7 @@ int check_irq_vectors_for_cpu_disable(void)
|
|||
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
cpumask_copy(&affinity_new, data->affinity);
|
||||
cpu_clear(this_cpu, affinity_new);
|
||||
cpumask_clear_cpu(this_cpu, &affinity_new);
|
||||
|
||||
/* Do not count inactive or per-cpu irqs. */
|
||||
if (!irq_has_action(irq) || irqd_is_per_cpu(data))
|
||||
|
|
|
@ -415,7 +415,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
|
|||
struct reset_args reset_args;
|
||||
|
||||
reset_args.sender = sender;
|
||||
cpus_clear(*mask);
|
||||
cpumask_clear(mask);
|
||||
/* find a single cpu for each uvhub in this distribution mask */
|
||||
maskbits = sizeof(struct pnmask) * BITSPERBYTE;
|
||||
/* each bit is a pnode relative to the partition base pnode */
|
||||
|
@ -425,7 +425,7 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
|
|||
continue;
|
||||
apnode = pnode + bcp->partition_base_pnode;
|
||||
cpu = pnode_to_first_cpu(apnode, smaster);
|
||||
cpu_set(cpu, *mask);
|
||||
cpumask_set_cpu(cpu, mask);
|
||||
}
|
||||
|
||||
/* IPI all cpus; preemption is already disabled */
|
||||
|
@ -1126,7 +1126,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
|
|||
/* don't actually do a shootdown of the local cpu */
|
||||
cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
|
||||
|
||||
if (cpu_isset(cpu, *cpumask))
|
||||
if (cpumask_test_cpu(cpu, cpumask))
|
||||
stat->s_ntargself++;
|
||||
|
||||
bau_desc = bcp->descriptor_base;
|
||||
|
|
Loading…
Reference in New Issue