mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc fixes from David Miller: 1) ldc_alloc_exp_dring() can be called from softints, so use GFP_ATOMIC. From Sowmini Varadhan. 2) Some minor warning/build fixups for the new iommu-common code on certain archs and with certain debug options enabled. Also from Sowmini Varadhan. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc: Use GFP_ATOMIC in ldc_alloc_exp_dring() as it can be called in softirq context sparc64: Use M7 PMC write on all chips T4 and onward. iommu-common: rename iommu_pool_hash to iommu_hash_common iommu-common: fix x86_64 compiler warnings
This commit is contained in:
commit
db4fd9c5d0
|
@ -2290,7 +2290,7 @@ void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
|
|||
if (len & (8UL - 1))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
buf = kzalloc(len, GFP_KERNEL);
|
||||
buf = kzalloc(len, GFP_ATOMIC);
|
||||
if (!buf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -737,25 +737,9 @@ static void sparc_vt_write_pmc(int idx, u64 val)
|
|||
{
|
||||
u64 pcr;
|
||||
|
||||
/* There seems to be an internal latch on the overflow event
|
||||
* on SPARC-T4 that prevents it from triggering unless you
|
||||
* update the PIC exactly as we do here. The requirement
|
||||
* seems to be that you have to turn off event counting in the
|
||||
* PCR around the PIC update.
|
||||
*
|
||||
* For example, after the following sequence:
|
||||
*
|
||||
* 1) set PIC to -1
|
||||
* 2) enable event counting and overflow reporting in PCR
|
||||
* 3) overflow triggers, softint 15 handler invoked
|
||||
* 4) clear OV bit in PCR
|
||||
* 5) write PIC to -1
|
||||
*
|
||||
* a subsequent overflow event will not trigger. This
|
||||
* sequence works on SPARC-T3 and previous chips.
|
||||
*/
|
||||
pcr = pcr_ops->read_pcr(idx);
|
||||
pcr_ops->write_pcr(idx, PCR_N4_PICNPT);
|
||||
/* ensure ov and ntc are reset */
|
||||
pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
|
||||
|
||||
pcr_ops->write_pic(idx, val & 0xffffffff);
|
||||
|
||||
|
@ -792,25 +776,12 @@ static const struct sparc_pmu niagara4_pmu = {
|
|||
.num_pic_regs = 4,
|
||||
};
|
||||
|
||||
static void sparc_m7_write_pmc(int idx, u64 val)
|
||||
{
|
||||
u64 pcr;
|
||||
|
||||
pcr = pcr_ops->read_pcr(idx);
|
||||
/* ensure ov and ntc are reset */
|
||||
pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
|
||||
|
||||
pcr_ops->write_pic(idx, val & 0xffffffff);
|
||||
|
||||
pcr_ops->write_pcr(idx, pcr);
|
||||
}
|
||||
|
||||
static const struct sparc_pmu sparc_m7_pmu = {
|
||||
.event_map = niagara4_event_map,
|
||||
.cache_map = &niagara4_cache_map,
|
||||
.max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
|
||||
.read_pmc = sparc_vt_read_pmc,
|
||||
.write_pmc = sparc_m7_write_pmc,
|
||||
.write_pmc = sparc_vt_write_pmc,
|
||||
.upper_shift = 5,
|
||||
.lower_shift = 5,
|
||||
.event_mask = 0x7ff,
|
||||
|
|
|
@ -15,9 +15,9 @@
|
|||
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
||||
#endif
|
||||
|
||||
unsigned long iommu_large_alloc = 15;
|
||||
static unsigned long iommu_large_alloc = 15;
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
|
||||
static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
|
||||
|
||||
static inline bool need_flush(struct iommu_map_table *iommu)
|
||||
{
|
||||
|
@ -44,7 +44,7 @@ static void setup_iommu_pool_hash(void)
|
|||
return;
|
||||
do_once = true;
|
||||
for_each_possible_cpu(i)
|
||||
per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
|
||||
per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -53,7 +53,7 @@ static void setup_iommu_pool_hash(void)
|
|||
* the top 1/4 of the table will be set aside for pool allocations
|
||||
* of more than iommu_large_alloc pages.
|
||||
*/
|
||||
extern void iommu_tbl_pool_init(struct iommu_map_table *iommu,
|
||||
void iommu_tbl_pool_init(struct iommu_map_table *iommu,
|
||||
unsigned long num_entries,
|
||||
u32 table_shift,
|
||||
void (*lazy_flush)(struct iommu_map_table *),
|
||||
|
@ -106,7 +106,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
|
|||
unsigned long mask,
|
||||
unsigned int align_order)
|
||||
{
|
||||
unsigned int pool_hash = __this_cpu_read(iommu_pool_hash);
|
||||
unsigned int pool_hash = __this_cpu_read(iommu_hash_common);
|
||||
unsigned long n, end, start, limit, boundary_size;
|
||||
struct iommu_pool *pool;
|
||||
int pass = 0;
|
||||
|
|
Loading…
Reference in New Issue