Merge 9c0c4d24ac
("Merge tag 'block-5.15-2021-10-22' of git://git.kernel.dk/linux-block") into android-mainline
Steps on the way to 5.15-rc7 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I6ed442f1e356e5c01d41e69b6212d01eb67e8edf
This commit is contained in:
commit
e98e8e3ad6
|
@ -30,10 +30,11 @@ The ``ice`` driver reports the following versions
|
|||
PHY, link, etc.
|
||||
* - ``fw.mgmt.api``
|
||||
- running
|
||||
- 1.5
|
||||
- 2-digit version number of the API exported over the AdminQ by the
|
||||
management firmware. Used by the driver to identify what commands
|
||||
are supported.
|
||||
- 1.5.1
|
||||
- 3-digit version number (major.minor.patch) of the API exported over
|
||||
the AdminQ by the management firmware. Used by the driver to
|
||||
identify what commands are supported. Historical versions of the
|
||||
kernel only displayed a 2-digit version number (major.minor).
|
||||
* - ``fw.mgmt.build``
|
||||
- running
|
||||
- 0x305d955f
|
||||
|
|
|
@ -59,11 +59,11 @@ specified with a ``sockaddr`` type, with a single-byte endpoint address:
|
|||
};
|
||||
|
||||
struct sockaddr_mctp {
|
||||
unsigned short int smctp_family;
|
||||
int smctp_network;
|
||||
struct mctp_addr smctp_addr;
|
||||
__u8 smctp_type;
|
||||
__u8 smctp_tag;
|
||||
__kernel_sa_family_t smctp_family;
|
||||
unsigned int smctp_network;
|
||||
struct mctp_addr smctp_addr;
|
||||
__u8 smctp_type;
|
||||
__u8 smctp_tag;
|
||||
};
|
||||
|
||||
#define MCTP_NET_ANY 0x0
|
||||
|
|
|
@ -24,6 +24,7 @@ struct hyp_pool {
|
|||
|
||||
/* Allocation */
|
||||
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
|
||||
void hyp_split_page(struct hyp_page *page);
|
||||
void hyp_get_page(struct hyp_pool *pool, void *addr);
|
||||
void hyp_put_page(struct hyp_pool *pool, void *addr);
|
||||
|
||||
|
|
|
@ -35,7 +35,18 @@ const u8 pkvm_hyp_id = 1;
|
|||
|
||||
static void *host_s2_zalloc_pages_exact(size_t size)
|
||||
{
|
||||
return hyp_alloc_pages(&host_s2_pool, get_order(size));
|
||||
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
|
||||
|
||||
hyp_split_page(hyp_virt_to_page(addr));
|
||||
|
||||
/*
|
||||
* The size of concatenated PGDs is always a power of two of PAGE_SIZE,
|
||||
* so there should be no need to free any of the tail pages to make the
|
||||
* allocation exact.
|
||||
*/
|
||||
WARN_ON(size != (PAGE_SIZE << get_order(size)));
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void *host_s2_zalloc_page(void *pool)
|
||||
|
|
|
@ -152,6 +152,7 @@ static inline void hyp_page_ref_inc(struct hyp_page *p)
|
|||
|
||||
static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
|
||||
{
|
||||
BUG_ON(!p->refcount);
|
||||
p->refcount--;
|
||||
return (p->refcount == 0);
|
||||
}
|
||||
|
@ -193,6 +194,20 @@ void hyp_get_page(struct hyp_pool *pool, void *addr)
|
|||
hyp_spin_unlock(&pool->lock);
|
||||
}
|
||||
|
||||
void hyp_split_page(struct hyp_page *p)
|
||||
{
|
||||
unsigned short order = p->order;
|
||||
unsigned int i;
|
||||
|
||||
p->order = 0;
|
||||
for (i = 1; i < (1 << order); i++) {
|
||||
struct hyp_page *tail = p + i;
|
||||
|
||||
tail->order = 0;
|
||||
hyp_set_page_refcounted(tail);
|
||||
}
|
||||
}
|
||||
|
||||
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
|
||||
{
|
||||
unsigned short i = order;
|
||||
|
|
|
@ -1529,8 +1529,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
* when updating the PG_mte_tagged page flag, see
|
||||
* sanitise_mte_tags for more details.
|
||||
*/
|
||||
if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
|
||||
return -EINVAL;
|
||||
if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vma->vm_flags & VM_PFNMAP) {
|
||||
/* IO region dirty page logging not allowed */
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
return RDCTL(CTL_STATUS);
|
||||
return RDCTL(CTL_FSTATUS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -18,7 +18,7 @@ static inline unsigned long arch_local_save_flags(void)
|
|||
*/
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
WRCTL(CTL_STATUS, flags);
|
||||
WRCTL(CTL_FSTATUS, flags);
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_disable(void)
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#endif
|
||||
|
||||
/* control register numbers */
|
||||
#define CTL_STATUS 0
|
||||
#define CTL_FSTATUS 0
|
||||
#define CTL_ESTATUS 1
|
||||
#define CTL_BSTATUS 2
|
||||
#define CTL_IENABLE 3
|
||||
|
|
|
@ -126,14 +126,16 @@ _GLOBAL(idle_return_gpr_loss)
|
|||
/*
|
||||
* This is the sequence required to execute idle instructions, as
|
||||
* specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
|
||||
*
|
||||
* The 0(r1) slot is used to save r2 in isa206, so use that here.
|
||||
* We have to store a GPR somewhere, ptesync, then reload it, and create
|
||||
* a false dependency on the result of the load. It doesn't matter which
|
||||
* GPR we store, or where we store it. We have already stored r2 to the
|
||||
* stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
|
||||
*/
|
||||
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
|
||||
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
|
||||
std r2,0(r1); \
|
||||
std r2,-8(r1); \
|
||||
ptesync; \
|
||||
ld r2,0(r1); \
|
||||
ld r2,-8(r1); \
|
||||
236: cmpd cr0,r2,r2; \
|
||||
bne 236b; \
|
||||
IDLE_INST; \
|
||||
|
|
|
@ -1730,8 +1730,6 @@ void __cpu_die(unsigned int cpu)
|
|||
|
||||
void arch_cpu_idle_dead(void)
|
||||
{
|
||||
sched_preempt_enable_no_resched();
|
||||
|
||||
/*
|
||||
* Disable on the down path. This will be re-enabled by
|
||||
* start_secondary() via start_secondary_resume() below
|
||||
|
|
|
@ -894,6 +894,11 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
|
|||
|
||||
/**
|
||||
* guest_translate_address - translate guest logical into guest absolute address
|
||||
* @vcpu: virtual cpu
|
||||
* @gva: Guest virtual address
|
||||
* @ar: Access register
|
||||
* @gpa: Guest physical address
|
||||
* @mode: Translation access mode
|
||||
*
|
||||
* Parameter semantics are the same as the ones from guest_translate.
|
||||
* The memory contents at the guest address are not changed.
|
||||
|
@ -934,6 +939,11 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
|||
|
||||
/**
|
||||
* check_gva_range - test a range of guest virtual addresses for accessibility
|
||||
* @vcpu: virtual cpu
|
||||
* @gva: Guest virtual address
|
||||
* @ar: Access register
|
||||
* @length: Length of test range
|
||||
* @mode: Translation access mode
|
||||
*/
|
||||
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
||||
unsigned long length, enum gacc_mode mode)
|
||||
|
@ -956,6 +966,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
|||
|
||||
/**
|
||||
* kvm_s390_check_low_addr_prot_real - check for low-address protection
|
||||
* @vcpu: virtual cpu
|
||||
* @gra: Guest real address
|
||||
*
|
||||
* Checks whether an address is subject to low-address protection and set
|
||||
|
@ -979,6 +990,7 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
|
|||
* @pgt: pointer to the beginning of the page table for the given address if
|
||||
* successful (return value 0), or to the first invalid DAT entry in
|
||||
* case of exceptions (return value > 0)
|
||||
* @dat_protection: referenced memory is write protected
|
||||
* @fake: pgt references contiguous guest memory block, not a pgtable
|
||||
*/
|
||||
static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
|
||||
|
|
|
@ -269,6 +269,7 @@ static int handle_prog(struct kvm_vcpu *vcpu)
|
|||
|
||||
/**
|
||||
* handle_external_interrupt - used for external interruption interceptions
|
||||
* @vcpu: virtual cpu
|
||||
*
|
||||
* This interception only occurs if the CPUSTAT_EXT_INT bit was set, or if
|
||||
* the new PSW does not have external interrupts disabled. In the first case,
|
||||
|
@ -315,7 +316,8 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
/**
|
||||
* Handle MOVE PAGE partial execution interception.
|
||||
* handle_mvpg_pei - Handle MOVE PAGE partial execution interception.
|
||||
* @vcpu: virtual cpu
|
||||
*
|
||||
* This interception can only happen for guests with DAT disabled and
|
||||
* addresses that are currently not mapped in the host. Thus we try to
|
||||
|
|
|
@ -702,7 +702,8 @@ struct kvm_vcpu_arch {
|
|||
|
||||
struct kvm_pio_request pio;
|
||||
void *pio_data;
|
||||
void *guest_ins_data;
|
||||
void *sev_pio_data;
|
||||
unsigned sev_pio_count;
|
||||
|
||||
u8 event_exit_inst_len;
|
||||
|
||||
|
|
|
@ -2321,13 +2321,14 @@ EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
|
|||
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
{
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
u64 msr_val;
|
||||
int i;
|
||||
|
||||
if (!init_event) {
|
||||
vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE |
|
||||
MSR_IA32_APICBASE_ENABLE;
|
||||
msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
|
||||
if (kvm_vcpu_is_reset_bsp(vcpu))
|
||||
vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
|
||||
msr_val |= MSR_IA32_APICBASE_BSP;
|
||||
kvm_lapic_set_base(vcpu, msr_val);
|
||||
}
|
||||
|
||||
if (!apic)
|
||||
|
@ -2336,11 +2337,9 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||
/* Stop the timer in case it's a reset to an active apic */
|
||||
hrtimer_cancel(&apic->lapic_timer.timer);
|
||||
|
||||
if (!init_event) {
|
||||
apic->base_address = APIC_DEFAULT_PHYS_BASE;
|
||||
|
||||
/* The xAPIC ID is set at RESET even if the APIC was already enabled. */
|
||||
if (!init_event)
|
||||
kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
|
||||
}
|
||||
kvm_apic_set_version(apic->vcpu);
|
||||
|
||||
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
|
||||
|
@ -2481,6 +2480,11 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
|
|||
lapic_timer_advance_dynamic = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stuff the APIC ENABLE bit in lieu of temporarily incrementing
|
||||
* apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
|
||||
*/
|
||||
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
|
||||
static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
|
||||
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
|
||||
|
||||
|
@ -2942,5 +2946,7 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
|
|||
void kvm_lapic_exit(void)
|
||||
{
|
||||
static_key_deferred_flush(&apic_hw_disabled);
|
||||
WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
|
||||
static_key_deferred_flush(&apic_sw_disabled);
|
||||
WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
|
||||
}
|
||||
|
|
|
@ -4596,10 +4596,10 @@ static void update_pkru_bitmask(struct kvm_mmu *mmu)
|
|||
unsigned bit;
|
||||
bool wp;
|
||||
|
||||
if (!is_cr4_pke(mmu)) {
|
||||
mmu->pkru_mask = 0;
|
||||
mmu->pkru_mask = 0;
|
||||
|
||||
if (!is_cr4_pke(mmu))
|
||||
return;
|
||||
}
|
||||
|
||||
wp = is_cr0_wp(mmu);
|
||||
|
||||
|
|
|
@ -618,7 +618,12 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
|||
vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
|
||||
vmsa.address = __sme_pa(svm->vmsa);
|
||||
vmsa.len = PAGE_SIZE;
|
||||
return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
|
||||
ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vcpu->arch.guest_state_protected = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
|
@ -1479,6 +1484,13 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
|||
goto e_free_trans;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
|
||||
* encrypts the written data with the guest's key, and the cache may
|
||||
* contain dirty, unencrypted data.
|
||||
*/
|
||||
sev_clflush_pages(guest_page, n);
|
||||
|
||||
/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
|
||||
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
|
||||
data.guest_address |= sev_me_mask;
|
||||
|
@ -2583,7 +2595,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
|
|||
return -EINVAL;
|
||||
|
||||
return kvm_sev_es_string_io(&svm->vcpu, size, port,
|
||||
svm->ghcb_sa, svm->ghcb_sa_len, in);
|
||||
svm->ghcb_sa, svm->ghcb_sa_len / size, in);
|
||||
}
|
||||
|
||||
void sev_es_init_vmcb(struct vcpu_svm *svm)
|
||||
|
|
|
@ -191,7 +191,7 @@ struct vcpu_svm {
|
|||
|
||||
/* SEV-ES scratch area support */
|
||||
void *ghcb_sa;
|
||||
u64 ghcb_sa_len;
|
||||
u32 ghcb_sa_len;
|
||||
bool ghcb_sa_sync;
|
||||
bool ghcb_sa_free;
|
||||
|
||||
|
|
|
@ -5562,9 +5562,13 @@ static int handle_encls(struct kvm_vcpu *vcpu)
|
|||
|
||||
static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->run->exit_reason = KVM_EXIT_X86_BUS_LOCK;
|
||||
vcpu->run->flags |= KVM_RUN_X86_BUS_LOCK;
|
||||
return 0;
|
||||
/*
|
||||
* Hardware may or may not set the BUS_LOCK_DETECTED flag on BUS_LOCK
|
||||
* VM-Exits. Unconditionally set the flag here and leave the handling to
|
||||
* vmx_handle_exit().
|
||||
*/
|
||||
to_vmx(vcpu)->exit_reason.bus_lock_detected = true;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -6051,9 +6055,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
|
|||
int ret = __vmx_handle_exit(vcpu, exit_fastpath);
|
||||
|
||||
/*
|
||||
* Even when current exit reason is handled by KVM internally, we
|
||||
* still need to exit to user space when bus lock detected to inform
|
||||
* that there is a bus lock in guest.
|
||||
* Exit to user space when bus lock detected to inform that there is
|
||||
* a bus lock in guest.
|
||||
*/
|
||||
if (to_vmx(vcpu)->exit_reason.bus_lock_detected) {
|
||||
if (ret > 0)
|
||||
|
@ -6302,18 +6305,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
|||
|
||||
/*
|
||||
* If we are running L2 and L1 has a new pending interrupt
|
||||
* which can be injected, we should re-evaluate
|
||||
* what should be done with this new L1 interrupt.
|
||||
* If L1 intercepts external-interrupts, we should
|
||||
* exit from L2 to L1. Otherwise, interrupt should be
|
||||
* delivered directly to L2.
|
||||
* which can be injected, this may cause a vmexit or it may
|
||||
* be injected into L2. Either way, this interrupt will be
|
||||
* processed via KVM_REQ_EVENT, not RVI, because we do not use
|
||||
* virtual interrupt delivery to inject L1 interrupts into L2.
|
||||
*/
|
||||
if (is_guest_mode(vcpu) && max_irr_updated) {
|
||||
if (nested_exit_on_intr(vcpu))
|
||||
kvm_vcpu_exiting_guest_mode(vcpu);
|
||||
else
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
}
|
||||
if (is_guest_mode(vcpu) && max_irr_updated)
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
} else {
|
||||
max_irr = kvm_lapic_find_highest_irr(vcpu);
|
||||
}
|
||||
|
|
|
@ -6906,7 +6906,7 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
|
|||
}
|
||||
|
||||
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, void *val,
|
||||
unsigned short port,
|
||||
unsigned int count, bool in)
|
||||
{
|
||||
vcpu->arch.pio.port = port;
|
||||
|
@ -6914,10 +6914,8 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
|
|||
vcpu->arch.pio.count = count;
|
||||
vcpu->arch.pio.size = size;
|
||||
|
||||
if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
|
||||
vcpu->arch.pio.count = 0;
|
||||
if (!kernel_pio(vcpu, vcpu->arch.pio_data))
|
||||
return 1;
|
||||
}
|
||||
|
||||
vcpu->run->exit_reason = KVM_EXIT_IO;
|
||||
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
|
||||
|
@ -6929,26 +6927,39 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, unsigned int count)
|
||||
{
|
||||
WARN_ON(vcpu->arch.pio.count);
|
||||
memset(vcpu->arch.pio_data, 0, size * count);
|
||||
return emulator_pio_in_out(vcpu, size, port, count, true);
|
||||
}
|
||||
|
||||
static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
|
||||
{
|
||||
int size = vcpu->arch.pio.size;
|
||||
unsigned count = vcpu->arch.pio.count;
|
||||
memcpy(val, vcpu->arch.pio_data, size * count);
|
||||
trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
|
||||
vcpu->arch.pio.count = 0;
|
||||
}
|
||||
|
||||
static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
|
||||
unsigned short port, void *val, unsigned int count)
|
||||
{
|
||||
int ret;
|
||||
if (vcpu->arch.pio.count) {
|
||||
/* Complete previous iteration. */
|
||||
} else {
|
||||
int r = __emulator_pio_in(vcpu, size, port, count);
|
||||
if (!r)
|
||||
return r;
|
||||
|
||||
if (vcpu->arch.pio.count)
|
||||
goto data_avail;
|
||||
|
||||
memset(vcpu->arch.pio_data, 0, size * count);
|
||||
|
||||
ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
|
||||
if (ret) {
|
||||
data_avail:
|
||||
memcpy(val, vcpu->arch.pio_data, size * count);
|
||||
trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
|
||||
vcpu->arch.pio.count = 0;
|
||||
return 1;
|
||||
/* Results already available, fall through. */
|
||||
}
|
||||
|
||||
return 0;
|
||||
WARN_ON(count != vcpu->arch.pio.count);
|
||||
complete_emulator_pio_in(vcpu, val);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
|
@ -6963,9 +6974,15 @@ static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
|
|||
unsigned short port, const void *val,
|
||||
unsigned int count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
memcpy(vcpu->arch.pio_data, val, size * count);
|
||||
trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
|
||||
return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
|
||||
ret = emulator_pio_in_out(vcpu, size, port, count, false);
|
||||
if (ret)
|
||||
vcpu->arch.pio.count = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
|
||||
|
@ -9643,14 +9660,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
|
||||
break;
|
||||
|
||||
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
|
||||
if (vcpu->arch.apicv_active)
|
||||
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||
|
||||
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
|
||||
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vcpu->arch.apicv_active)
|
||||
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Do this here before restoring debug registers on the host. And
|
||||
|
@ -11392,7 +11409,8 @@ static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
|
|||
int level = i + 1;
|
||||
int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
|
||||
|
||||
WARN_ON(slot->arch.rmap[i]);
|
||||
if (slot->arch.rmap[i])
|
||||
continue;
|
||||
|
||||
slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
|
||||
if (!slot->arch.rmap[i]) {
|
||||
|
@ -12367,44 +12385,81 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
|
||||
|
||||
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data,
|
||||
vcpu->arch.pio.count * vcpu->arch.pio.size);
|
||||
vcpu->arch.pio.count = 0;
|
||||
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port);
|
||||
|
||||
static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int size = vcpu->arch.pio.size;
|
||||
int port = vcpu->arch.pio.port;
|
||||
|
||||
vcpu->arch.pio.count = 0;
|
||||
if (vcpu->arch.sev_pio_count)
|
||||
return kvm_sev_es_outs(vcpu, size, port);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port, void *data, unsigned int count)
|
||||
unsigned int port)
|
||||
{
|
||||
int ret;
|
||||
for (;;) {
|
||||
unsigned int count =
|
||||
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
|
||||
int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
|
||||
|
||||
ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port,
|
||||
data, count);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* memcpy done already by emulator_pio_out. */
|
||||
vcpu->arch.sev_pio_count -= count;
|
||||
vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
vcpu->arch.pio.count = 0;
|
||||
/* Emulation done by the kernel. */
|
||||
if (!vcpu->arch.sev_pio_count)
|
||||
return 1;
|
||||
}
|
||||
|
||||
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port, void *data, unsigned int count)
|
||||
{
|
||||
int ret;
|
||||
unsigned int port);
|
||||
|
||||
ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port,
|
||||
data, count);
|
||||
if (ret) {
|
||||
vcpu->arch.pio.count = 0;
|
||||
} else {
|
||||
vcpu->arch.guest_ins_data = data;
|
||||
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
|
||||
static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned count = vcpu->arch.pio.count;
|
||||
complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
|
||||
vcpu->arch.sev_pio_count -= count;
|
||||
vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
|
||||
}
|
||||
|
||||
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int size = vcpu->arch.pio.size;
|
||||
int port = vcpu->arch.pio.port;
|
||||
|
||||
advance_sev_es_emulated_ins(vcpu);
|
||||
if (vcpu->arch.sev_pio_count)
|
||||
return kvm_sev_es_ins(vcpu, size, port);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
unsigned int port)
|
||||
{
|
||||
for (;;) {
|
||||
unsigned int count =
|
||||
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
|
||||
if (!__emulator_pio_in(vcpu, size, port, count))
|
||||
break;
|
||||
|
||||
/* Emulation done by the kernel. */
|
||||
advance_sev_es_emulated_ins(vcpu);
|
||||
if (!vcpu->arch.sev_pio_count)
|
||||
return 1;
|
||||
}
|
||||
|
||||
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -12412,8 +12467,10 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
|
|||
unsigned int port, void *data, unsigned int count,
|
||||
int in)
|
||||
{
|
||||
return in ? kvm_sev_es_ins(vcpu, size, port, data, count)
|
||||
: kvm_sev_es_outs(vcpu, size, port, data, count);
|
||||
vcpu->arch.sev_pio_data = data;
|
||||
vcpu->arch.sev_pio_count = count;
|
||||
return in ? kvm_sev_es_ins(vcpu, size, port)
|
||||
: kvm_sev_es_outs(vcpu, size, port);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
|
||||
|
||||
|
|
|
@ -1897,10 +1897,11 @@ void blk_cgroup_bio_start(struct bio *bio)
|
|||
{
|
||||
int rwd = blk_cgroup_io_type(bio), cpu;
|
||||
struct blkg_iostat_set *bis;
|
||||
unsigned long flags;
|
||||
|
||||
cpu = get_cpu();
|
||||
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
|
||||
u64_stats_update_begin(&bis->sync);
|
||||
flags = u64_stats_update_begin_irqsave(&bis->sync);
|
||||
|
||||
/*
|
||||
* If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
|
||||
|
@ -1912,7 +1913,7 @@ void blk_cgroup_bio_start(struct bio *bio)
|
|||
}
|
||||
bis->cur.ios[rwd]++;
|
||||
|
||||
u64_stats_update_end(&bis->sync);
|
||||
u64_stats_update_end_irqrestore(&bis->sync, flags);
|
||||
if (cgroup_subsys_on_dfl(io_cgrp_subsys))
|
||||
cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
|
||||
put_cpu();
|
||||
|
|
|
@ -423,6 +423,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
|
|||
device_del(pdev);
|
||||
out_put:
|
||||
put_device(pdev);
|
||||
return ERR_PTR(err);
|
||||
out_put_disk:
|
||||
put_disk(disk);
|
||||
return ERR_PTR(err);
|
||||
|
|
|
@ -1035,13 +1035,8 @@ void acpi_turn_off_unused_power_resources(void)
|
|||
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
|
||||
mutex_lock(&resource->resource_lock);
|
||||
|
||||
/*
|
||||
* Turn off power resources in an unknown state too, because the
|
||||
* platform firmware on some system expects the OS to turn off
|
||||
* power resources without any users unconditionally.
|
||||
*/
|
||||
if (!resource->ref_count &&
|
||||
resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
|
||||
resource->state == ACPI_POWER_RESOURCE_STATE_ON) {
|
||||
acpi_handle_debug(resource->device.handle, "Turning OFF\n");
|
||||
__acpi_power_off(resource);
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/earlycpio.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include "internal.h"
|
||||
|
||||
#ifdef CONFIG_ACPI_CUSTOM_DSDT
|
||||
|
@ -601,6 +602,8 @@ void __init acpi_table_upgrade(void)
|
|||
*/
|
||||
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
|
||||
|
||||
kmemleak_ignore_phys(acpi_tables_addr);
|
||||
|
||||
/*
|
||||
* early_ioremap only can remap 256k one time. If we map all
|
||||
* tables one time, we will hit the limit. Need to map chunks
|
||||
|
|
|
@ -1300,18 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
|
|||
return flags;
|
||||
}
|
||||
|
||||
static enum drm_connector_status ast_connector_detect(struct drm_connector
|
||||
*connector, bool force)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = ast_get_modes(connector);
|
||||
if (r <= 0)
|
||||
return connector_status_disconnected;
|
||||
|
||||
return connector_status_connected;
|
||||
}
|
||||
|
||||
static void ast_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct ast_connector *ast_connector = to_ast_connector(connector);
|
||||
|
@ -1327,7 +1315,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
|
|||
|
||||
static const struct drm_connector_funcs ast_connector_funcs = {
|
||||
.reset = drm_atomic_helper_connector_reset,
|
||||
.detect = ast_connector_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.destroy = ast_connector_destroy,
|
||||
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
|
||||
|
@ -1355,8 +1342,7 @@ static int ast_connector_init(struct drm_device *dev)
|
|||
connector->interlace_allowed = 0;
|
||||
connector->doublescan_allowed = 0;
|
||||
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
|
||||
DRM_CONNECTOR_POLL_DISCONNECT;
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
|
||||
|
@ -1425,8 +1411,6 @@ int ast_mode_config_init(struct ast_private *ast)
|
|||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = {
|
|||
.disable_vblank = kmb_crtc_disable_vblank,
|
||||
};
|
||||
|
||||
static void kmb_crtc_set_mode(struct drm_crtc *crtc)
|
||||
static void kmb_crtc_set_mode(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *old_state)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_display_mode *m = &crtc->state->adjusted_mode;
|
||||
|
@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc)
|
|||
unsigned int val = 0;
|
||||
|
||||
/* Initialize mipi */
|
||||
kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz);
|
||||
kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state);
|
||||
drm_info(dev,
|
||||
"vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
|
||||
m->crtc_vsync_start - m->crtc_vdisplay,
|
||||
|
@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
|
|||
struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
|
||||
|
||||
clk_prepare_enable(kmb->kmb_clk.clk_lcd);
|
||||
kmb_crtc_set_mode(crtc);
|
||||
kmb_crtc_set_mode(crtc, state);
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
|
@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
|
|||
spin_unlock_irq(&crtc->dev->event_lock);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
kmb_crtc_mode_valid(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
int refresh;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
int vfp = mode->vsync_start - mode->vdisplay;
|
||||
|
||||
if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) {
|
||||
drm_dbg(dev, "height = %d less than %d",
|
||||
mode->vdisplay, KMB_CRTC_MAX_HEIGHT);
|
||||
return MODE_BAD_VVALUE;
|
||||
}
|
||||
if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) {
|
||||
drm_dbg(dev, "width = %d less than %d",
|
||||
mode->hdisplay, KMB_CRTC_MAX_WIDTH);
|
||||
return MODE_BAD_HVALUE;
|
||||
}
|
||||
refresh = drm_mode_vrefresh(mode);
|
||||
if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) {
|
||||
drm_dbg(dev, "refresh = %d less than %d or greater than %d",
|
||||
refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH);
|
||||
return MODE_BAD;
|
||||
}
|
||||
|
||||
if (vfp < KMB_CRTC_MIN_VFP) {
|
||||
drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP);
|
||||
return MODE_BAD;
|
||||
}
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
|
||||
.atomic_begin = kmb_crtc_atomic_begin,
|
||||
.atomic_enable = kmb_crtc_atomic_enable,
|
||||
.atomic_disable = kmb_crtc_atomic_disable,
|
||||
.atomic_flush = kmb_crtc_atomic_flush,
|
||||
.mode_valid = kmb_crtc_mode_valid,
|
||||
};
|
||||
|
||||
int kmb_setup_crtc(struct drm_device *drm)
|
||||
|
|
|
@ -380,7 +380,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
|
|||
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
|
||||
drm_dbg(&kmb->drm,
|
||||
"LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
|
||||
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
|
||||
if (val & LAYER3_DMA_FIFO_OVERFLOW)
|
||||
drm_dbg(&kmb->drm,
|
||||
"LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
|
||||
}
|
||||
|
|
|
@ -20,11 +20,18 @@
|
|||
#define DRIVER_MAJOR 1
|
||||
#define DRIVER_MINOR 1
|
||||
|
||||
/* Platform definitions */
|
||||
#define KMB_CRTC_MIN_VFP 4
|
||||
#define KMB_CRTC_MAX_WIDTH 1920 /* max width in pixels */
|
||||
#define KMB_CRTC_MAX_HEIGHT 1080 /* max height in pixels */
|
||||
#define KMB_CRTC_MIN_WIDTH 1920
|
||||
#define KMB_CRTC_MIN_HEIGHT 1080
|
||||
#define KMB_FB_MAX_WIDTH 1920
|
||||
#define KMB_FB_MAX_HEIGHT 1080
|
||||
#define KMB_FB_MIN_WIDTH 1
|
||||
#define KMB_FB_MIN_HEIGHT 1
|
||||
|
||||
#define KMB_MIN_VREFRESH 59 /*vertical refresh in Hz */
|
||||
#define KMB_MAX_VREFRESH 60 /*vertical refresh in Hz */
|
||||
#define KMB_LCD_DEFAULT_CLK 200000000
|
||||
#define KMB_SYS_CLK_MHZ 500
|
||||
|
||||
|
@ -50,6 +57,7 @@ struct kmb_drm_private {
|
|||
spinlock_t irq_lock;
|
||||
int irq_lcd;
|
||||
int sys_clk_mhz;
|
||||
struct disp_cfg init_disp_cfg[KMB_MAX_PLANES];
|
||||
struct layer_status plane_status[KMB_MAX_PLANES];
|
||||
int kmb_under_flow;
|
||||
int kmb_flush_done;
|
||||
|
|
|
@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define CLK_DIFF_LOW 50
|
||||
#define CLK_DIFF_HI 60
|
||||
#define SYSCLK_500 500
|
||||
|
||||
static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
|
||||
struct mipi_tx_frame_timing_cfg *fg_cfg)
|
||||
{
|
||||
|
@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
|
|||
/* 500 Mhz system clock minus 50 to account for the difference in
|
||||
* MIPI clock speed in RTL tests
|
||||
*/
|
||||
sysclk = kmb_dsi->sys_clk_mhz - 50;
|
||||
if (kmb_dsi->sys_clk_mhz == SYSCLK_500) {
|
||||
sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW;
|
||||
} else {
|
||||
/* 700 Mhz clk*/
|
||||
sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI;
|
||||
}
|
||||
|
||||
/* PPL-Pixel Packing Layer, LLP-Low Level Protocol
|
||||
* Frame genartor timing parameters are clocked on the system clock,
|
||||
|
@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
|
||||
static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi,
|
||||
struct drm_atomic_state *old_state)
|
||||
{
|
||||
struct regmap *msscam;
|
||||
|
||||
|
@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
|
|||
dev_dbg(kmb_dsi->dev, "failed to get msscam syscon");
|
||||
return;
|
||||
}
|
||||
|
||||
drm_atomic_bridge_chain_enable(adv_bridge, old_state);
|
||||
/* DISABLE MIPI->CIF CONNECTION */
|
||||
regmap_write(msscam, MSS_MIPI_CIF_CFG, 0);
|
||||
|
||||
|
@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
|
|||
}
|
||||
|
||||
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
|
||||
int sys_clk_mhz)
|
||||
int sys_clk_mhz, struct drm_atomic_state *old_state)
|
||||
{
|
||||
u64 data_rate;
|
||||
|
||||
|
@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
|
|||
mipi_tx_init_cfg.lane_rate_mbps = data_rate;
|
||||
}
|
||||
|
||||
kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0);
|
||||
kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0);
|
||||
kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0);
|
||||
kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0);
|
||||
|
||||
/* Initialize mipi controller */
|
||||
mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg);
|
||||
|
||||
/* Dphy initialization */
|
||||
mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg);
|
||||
|
||||
connect_lcd_to_mipi(kmb_dsi);
|
||||
connect_lcd_to_mipi(kmb_dsi, old_state);
|
||||
dev_info(kmb_dsi->dev, "mipi hw initialized");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev);
|
|||
struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev);
|
||||
void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi);
|
||||
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
|
||||
int sys_clk_mhz);
|
||||
int sys_clk_mhz, struct drm_atomic_state *old_state);
|
||||
int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi);
|
||||
int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi);
|
||||
int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi);
|
||||
|
|
|
@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = {
|
|||
|
||||
static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
|
||||
{
|
||||
struct kmb_drm_private *kmb;
|
||||
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
|
||||
int i;
|
||||
int plane_id = kmb_plane->id;
|
||||
struct disp_cfg init_disp_cfg;
|
||||
|
||||
kmb = to_kmb(plane->dev);
|
||||
init_disp_cfg = kmb->init_disp_cfg[plane_id];
|
||||
/* Due to HW limitations, changing pixel format after initial
|
||||
* plane configuration is not supported.
|
||||
*/
|
||||
if (init_disp_cfg.format && init_disp_cfg.format != format) {
|
||||
drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration");
|
||||
return -EINVAL;
|
||||
}
|
||||
for (i = 0; i < plane->format_count; i++) {
|
||||
if (plane->format_types[i] == format)
|
||||
return 0;
|
||||
|
@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
|
|||
{
|
||||
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
|
||||
plane);
|
||||
struct kmb_drm_private *kmb;
|
||||
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
|
||||
int plane_id = kmb_plane->id;
|
||||
struct disp_cfg init_disp_cfg;
|
||||
struct drm_framebuffer *fb;
|
||||
int ret;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
bool can_position;
|
||||
|
||||
kmb = to_kmb(plane->dev);
|
||||
init_disp_cfg = kmb->init_disp_cfg[plane_id];
|
||||
fb = new_plane_state->fb;
|
||||
if (!fb || !new_plane_state->crtc)
|
||||
return 0;
|
||||
|
@ -99,6 +118,16 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
|
|||
new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
|
||||
new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
|
||||
return -EINVAL;
|
||||
|
||||
/* Due to HW limitations, changing plane height or width after
|
||||
* initial plane configuration is not supported.
|
||||
*/
|
||||
if ((init_disp_cfg.width && init_disp_cfg.height) &&
|
||||
(init_disp_cfg.width != fb->width ||
|
||||
init_disp_cfg.height != fb->height)) {
|
||||
drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration");
|
||||
return -EINVAL;
|
||||
}
|
||||
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
|
||||
crtc_state =
|
||||
drm_atomic_get_existing_crtc_state(state,
|
||||
|
@ -335,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
|
|||
unsigned char plane_id;
|
||||
int num_planes;
|
||||
static dma_addr_t addr[MAX_SUB_PLANES];
|
||||
struct disp_cfg *init_disp_cfg;
|
||||
|
||||
if (!plane || !new_plane_state || !old_plane_state)
|
||||
return;
|
||||
|
@ -357,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
|
|||
}
|
||||
spin_unlock_irq(&kmb->irq_lock);
|
||||
|
||||
src_w = (new_plane_state->src_w >> 16);
|
||||
init_disp_cfg = &kmb->init_disp_cfg[plane_id];
|
||||
src_w = new_plane_state->src_w >> 16;
|
||||
src_h = new_plane_state->src_h >> 16;
|
||||
crtc_x = new_plane_state->crtc_x;
|
||||
crtc_y = new_plane_state->crtc_y;
|
||||
|
@ -500,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
|
|||
|
||||
/* Enable DMA */
|
||||
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
|
||||
|
||||
/* Save initial display config */
|
||||
if (!init_disp_cfg->width ||
|
||||
!init_disp_cfg->height ||
|
||||
!init_disp_cfg->format) {
|
||||
init_disp_cfg->width = width;
|
||||
init_disp_cfg->height = height;
|
||||
init_disp_cfg->format = fb->format->format;
|
||||
}
|
||||
|
||||
drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
|
||||
kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
|
||||
|
||||
|
|
|
@ -63,6 +63,12 @@ struct layer_status {
|
|||
u32 ctrl;
|
||||
};
|
||||
|
||||
struct disp_cfg {
|
||||
unsigned int width;
|
||||
unsigned int height;
|
||||
unsigned int format;
|
||||
};
|
||||
|
||||
struct kmb_plane *kmb_plane_init(struct drm_device *drm);
|
||||
void kmb_plane_destroy(struct drm_plane *plane);
|
||||
#endif /* __KMB_PLANE_H__ */
|
||||
|
|
|
@ -1838,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
|
|||
adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
|
||||
adreno_gpu->base.hw_apriv = true;
|
||||
|
||||
/*
|
||||
* For now only clamp to idle freq for devices where this is known not
|
||||
* to cause power supply issues:
|
||||
*/
|
||||
if (info && (info->revn == 618))
|
||||
gpu->clamp_to_idle = true;
|
||||
|
||||
a6xx_llc_slices_init(pdev, a6xx_gpu);
|
||||
|
||||
ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
|
||||
|
|
|
@ -203,6 +203,10 @@ struct msm_gpu {
|
|||
uint32_t suspend_count;
|
||||
|
||||
struct msm_gpu_state *crashstate;
|
||||
|
||||
/* Enable clamping to idle freq when inactive: */
|
||||
bool clamp_to_idle;
|
||||
|
||||
/* True if the hardware supports expanded apriv (a650 and newer) */
|
||||
bool hw_apriv;
|
||||
|
||||
|
|
|
@ -200,7 +200,8 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
|
|||
|
||||
idle_freq = get_freq(gpu);
|
||||
|
||||
msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
|
||||
if (gpu->clamp_to_idle)
|
||||
msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
|
||||
|
||||
df->idle_time = ktime_get();
|
||||
df->idle_freq = idle_freq;
|
||||
|
|
|
@ -173,7 +173,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
|
|||
struct mxsfb_drm_private *mxsfb = drm->dev_private;
|
||||
|
||||
mxsfb_enable_axi_clk(mxsfb);
|
||||
mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
|
||||
|
||||
/* Disable and clear VBLANK IRQ */
|
||||
writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
|
||||
writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
|
||||
|
||||
mxsfb_disable_axi_clk(mxsfb);
|
||||
}
|
||||
|
||||
|
|
|
@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
|
|||
.clock = 69700,
|
||||
|
||||
.hdisplay = 800,
|
||||
.hsync_start = 800 + 6,
|
||||
.hsync_end = 800 + 6 + 15,
|
||||
.htotal = 800 + 6 + 15 + 16,
|
||||
.hsync_start = 800 + 52,
|
||||
.hsync_end = 800 + 52 + 8,
|
||||
.htotal = 800 + 52 + 8 + 48,
|
||||
|
||||
.vdisplay = 1280,
|
||||
.vsync_start = 1280 + 8,
|
||||
.vsync_end = 1280 + 8 + 48,
|
||||
.vtotal = 1280 + 8 + 48 + 52,
|
||||
.vsync_start = 1280 + 16,
|
||||
.vsync_end = 1280 + 16 + 6,
|
||||
.vtotal = 1280 + 16 + 6 + 15,
|
||||
|
||||
.width_mm = 135,
|
||||
.height_mm = 217,
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#define _HYPERV_VMBUS_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/sync_bitops.h>
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
#include <linux/atomic.h>
|
||||
|
|
|
@ -1994,14 +1994,14 @@ setup_hw(struct hfc_pci *hc)
|
|||
pci_set_master(hc->pdev);
|
||||
if (!hc->irq) {
|
||||
printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
|
||||
return 1;
|
||||
return -EINVAL;
|
||||
}
|
||||
hc->hw.pci_io =
|
||||
(char __iomem *)(unsigned long)hc->pdev->resource[1].start;
|
||||
|
||||
if (!hc->hw.pci_io) {
|
||||
printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Allocate memory for FIFOS */
|
||||
/* the memory needs to be on a 32k boundary within the first 4G */
|
||||
|
@ -2012,7 +2012,7 @@ setup_hw(struct hfc_pci *hc)
|
|||
if (!buffer) {
|
||||
printk(KERN_WARNING
|
||||
"HFC-PCI: Error allocating memory for FIFO!\n");
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
hc->hw.fifos = buffer;
|
||||
pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
|
||||
|
@ -2022,7 +2022,7 @@ setup_hw(struct hfc_pci *hc)
|
|||
"HFC-PCI: Error in ioremap for PCI!\n");
|
||||
dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
|
||||
hc->hw.dmahandle);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
printk(KERN_INFO
|
||||
|
|
|
@ -32,8 +32,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
|
|||
static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
|
||||
{
|
||||
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
|
||||
void __iomem *src = priv->mram_base + offset;
|
||||
|
||||
ioread32_rep(priv->mram_base + offset, val, val_count);
|
||||
while (val_count--) {
|
||||
*(unsigned int *)val = ioread32(src);
|
||||
val += 4;
|
||||
src += 4;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -51,8 +56,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
|
|||
const void *val, size_t val_count)
|
||||
{
|
||||
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
|
||||
void __iomem *dst = priv->mram_base + offset;
|
||||
|
||||
iowrite32_rep(priv->base + offset, val, val_count);
|
||||
while (val_count--) {
|
||||
iowrite32(*(unsigned int *)val, dst);
|
||||
val += 4;
|
||||
dst += 4;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
|
|||
struct rcar_can_priv *priv = netdev_priv(ndev);
|
||||
u16 ctlr;
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
netif_stop_queue(ndev);
|
||||
netif_device_detach(ndev);
|
||||
}
|
||||
if (!netif_running(ndev))
|
||||
return 0;
|
||||
|
||||
netif_stop_queue(ndev);
|
||||
netif_device_detach(ndev);
|
||||
|
||||
ctlr = readw(&priv->regs->ctlr);
|
||||
ctlr |= RCAR_CAN_CTLR_CANM_HALT;
|
||||
writew(ctlr, &priv->regs->ctlr);
|
||||
|
@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
|
|||
u16 ctlr;
|
||||
int err;
|
||||
|
||||
if (!netif_running(ndev))
|
||||
return 0;
|
||||
|
||||
err = clk_enable(priv->clk);
|
||||
if (err) {
|
||||
netdev_err(ndev, "clk_enable() failed, error %d\n", err);
|
||||
|
@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
|
|||
writew(ctlr, &priv->regs->ctlr);
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
|
||||
if (netif_running(ndev)) {
|
||||
netif_device_attach(ndev);
|
||||
netif_start_queue(ndev);
|
||||
}
|
||||
netif_device_attach(ndev);
|
||||
netif_start_queue(ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -752,16 +752,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
|
|||
struct net_device *prev_dev = chan->prev_dev;
|
||||
|
||||
dev_info(&pdev->dev, "removing device %s\n", dev->name);
|
||||
/* do that only for first channel */
|
||||
if (!prev_dev && chan->pciec_card)
|
||||
peak_pciec_remove(chan->pciec_card);
|
||||
unregister_sja1000dev(dev);
|
||||
free_sja1000dev(dev);
|
||||
dev = prev_dev;
|
||||
|
||||
if (!dev) {
|
||||
/* do that only for first channel */
|
||||
if (chan->pciec_card)
|
||||
peak_pciec_remove(chan->pciec_card);
|
||||
if (!dev)
|
||||
break;
|
||||
}
|
||||
priv = netdev_priv(dev);
|
||||
chan = priv->priv;
|
||||
}
|
||||
|
|
|
@ -551,11 +551,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
|
|||
} else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
|
||||
new_state = CAN_STATE_ERROR_WARNING;
|
||||
} else {
|
||||
/* no error bit (so, no error skb, back to active state) */
|
||||
dev->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
/* back to (or still in) ERROR_ACTIVE state */
|
||||
new_state = CAN_STATE_ERROR_ACTIVE;
|
||||
pdev->bec.txerr = 0;
|
||||
pdev->bec.rxerr = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* state hasn't changed */
|
||||
|
@ -568,8 +567,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
|
|||
|
||||
/* allocate an skb to store the error frame */
|
||||
skb = alloc_can_err_skb(netdev, &cf);
|
||||
if (skb)
|
||||
can_change_state(netdev, cf, tx_state, rx_state);
|
||||
can_change_state(netdev, cf, tx_state, rx_state);
|
||||
|
||||
/* things must be done even in case of OOM */
|
||||
if (new_state == CAN_STATE_BUS_OFF)
|
||||
|
|
|
@ -230,7 +230,7 @@
|
|||
#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
|
||||
#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
|
||||
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
|
||||
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
|
||||
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
|
||||
|
||||
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
|
||||
#define GSWIP_TABLE_VLAN_MAPPING 0x02
|
||||
|
|
|
@ -1035,9 +1035,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
|
|||
{
|
||||
struct mt7530_priv *priv = ds->priv;
|
||||
|
||||
if (!dsa_is_user_port(ds, port))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&priv->reg_mutex);
|
||||
|
||||
/* Allow the user port gets connected to the cpu port and also
|
||||
|
@ -1060,9 +1057,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
|
|||
{
|
||||
struct mt7530_priv *priv = ds->priv;
|
||||
|
||||
if (!dsa_is_user_port(ds, port))
|
||||
return;
|
||||
|
||||
mutex_lock(&priv->reg_mutex);
|
||||
|
||||
/* Clear up all port matrix which could be restored in the next
|
||||
|
@ -3211,7 +3205,7 @@ mt7530_probe(struct mdio_device *mdiodev)
|
|||
return -ENOMEM;
|
||||
|
||||
priv->ds->dev = &mdiodev->dev;
|
||||
priv->ds->num_ports = DSA_MAX_PORTS;
|
||||
priv->ds->num_ports = MT7530_NUM_PORTS;
|
||||
|
||||
/* Use medatek,mcm property to distinguish hardware type that would
|
||||
* casues a little bit differences on power-on sequence.
|
||||
|
|
|
@ -1193,7 +1193,7 @@ static int nic_register_interrupts(struct nicpf *nic)
|
|||
dev_err(&nic->pdev->dev,
|
||||
"Request for #%d msix vectors failed, returned %d\n",
|
||||
nic->num_vec, ret);
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Register mailbox interrupt handler */
|
||||
|
|
|
@ -1224,7 +1224,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
|
|||
if (ret < 0) {
|
||||
netdev_err(nic->netdev,
|
||||
"Req for #%d msix vectors failed\n", nic->num_vec);
|
||||
return 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
|
||||
|
@ -1243,7 +1243,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
|
|||
if (!nicvf_check_pf_ready(nic)) {
|
||||
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
|
||||
nicvf_unregister_interrupts(nic);
|
||||
return 1;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -157,7 +157,7 @@ static const struct {
|
|||
{ ENETC_PM0_TFRM, "MAC tx frames" },
|
||||
{ ENETC_PM0_TFCS, "MAC tx fcs errors" },
|
||||
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
|
||||
{ ENETC_PM0_TERR, "MAC tx frames" },
|
||||
{ ENETC_PM0_TERR, "MAC tx frame errors" },
|
||||
{ ENETC_PM0_TUCA, "MAC tx unicast frames" },
|
||||
{ ENETC_PM0_TMCA, "MAC tx multicast frames" },
|
||||
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
|
||||
|
|
|
@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)
|
|||
|
||||
static void enetc_configure_port_mac(struct enetc_hw *hw)
|
||||
{
|
||||
int tc;
|
||||
|
||||
enetc_port_wr(hw, ENETC_PM0_MAXFRM,
|
||||
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
|
||||
|
||||
enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
|
||||
for (tc = 0; tc < 8; tc++)
|
||||
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
|
||||
|
||||
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
|
||||
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
|
||||
|
|
|
@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
|
|||
static LIST_HEAD(hnae3_client_list);
|
||||
static LIST_HEAD(hnae3_ae_dev_list);
|
||||
|
||||
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
|
||||
{
|
||||
const struct pci_device_id *pci_id;
|
||||
struct hnae3_ae_dev *ae_dev;
|
||||
|
||||
if (!ae_algo)
|
||||
return;
|
||||
|
||||
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
|
||||
if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
|
||||
continue;
|
||||
|
||||
pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
|
||||
if (!pci_id)
|
||||
continue;
|
||||
if (IS_ENABLED(CONFIG_PCI_IOV))
|
||||
pci_disable_sriov(ae_dev->pdev);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
|
||||
|
||||
/* we are keeping things simple and using single lock for all the
|
||||
* list. This is a non-critical code so other updations, if happen
|
||||
* in parallel, can wait.
|
||||
|
|
|
@ -853,6 +853,7 @@ struct hnae3_handle {
|
|||
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
|
||||
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
|
||||
|
||||
void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
|
||||
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
|
||||
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
|
||||
|
||||
|
|
|
@ -1847,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
|
|||
|
||||
static int hns3_skb_linearize(struct hns3_enet_ring *ring,
|
||||
struct sk_buff *skb,
|
||||
u8 max_non_tso_bd_num,
|
||||
unsigned int bd_num)
|
||||
{
|
||||
/* 'bd_num == UINT_MAX' means the skb' fraglist has a
|
||||
|
@ -1864,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
|
|||
* will not help.
|
||||
*/
|
||||
if (skb->len > HNS3_MAX_TSO_SIZE ||
|
||||
(!skb_is_gso(skb) && skb->len >
|
||||
HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
|
||||
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
|
||||
u64_stats_update_begin(&ring->syncp);
|
||||
ring->stats.hw_limitation++;
|
||||
u64_stats_update_end(&ring->syncp);
|
||||
|
@ -1900,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
|
||||
bd_num))
|
||||
if (hns3_skb_linearize(ring, skb, bd_num))
|
||||
return -ENOMEM;
|
||||
|
||||
bd_num = hns3_tx_bd_count(skb->len);
|
||||
|
@ -3258,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
|
|||
{
|
||||
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
|
||||
ring->desc[i].addr = 0;
|
||||
ring->desc_cb[i].refill = 0;
|
||||
}
|
||||
|
||||
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
|
||||
|
@ -3336,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
|
|||
|
||||
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
|
||||
ring->desc_cb[i].page_offset);
|
||||
ring->desc_cb[i].refill = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3365,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
|
|||
{
|
||||
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
|
||||
ring->desc_cb[i] = *res_cb;
|
||||
ring->desc_cb[i].refill = 1;
|
||||
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
|
||||
ring->desc_cb[i].page_offset);
|
||||
ring->desc[i].rx.bd_base_info = 0;
|
||||
|
@ -3373,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
|
|||
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
|
||||
{
|
||||
ring->desc_cb[i].reuse_flag = 0;
|
||||
ring->desc_cb[i].refill = 1;
|
||||
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
|
||||
ring->desc_cb[i].page_offset);
|
||||
ring->desc[i].rx.bd_base_info = 0;
|
||||
|
@ -3479,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
|
|||
int ntc = ring->next_to_clean;
|
||||
int ntu = ring->next_to_use;
|
||||
|
||||
if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
|
||||
return ring->desc_num;
|
||||
|
||||
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
|
||||
}
|
||||
|
||||
static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
||||
/* Return true if there is any allocation failure */
|
||||
static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
||||
int cleand_count)
|
||||
{
|
||||
struct hns3_desc_cb *desc_cb;
|
||||
|
@ -3507,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
|||
hns3_rl_err(ring_to_netdev(ring),
|
||||
"alloc rx buffer failed: %d\n",
|
||||
ret);
|
||||
break;
|
||||
|
||||
writel(i, ring->tqp->io_base +
|
||||
HNS3_RING_RX_RING_HEAD_REG);
|
||||
return true;
|
||||
}
|
||||
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
|
||||
|
||||
|
@ -3520,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
|
|||
}
|
||||
|
||||
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
|
||||
|
@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
|
|||
{
|
||||
ring->desc[ring->next_to_clean].rx.bd_base_info &=
|
||||
cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
|
||||
ring->desc_cb[ring->next_to_clean].refill = 0;
|
||||
ring->next_to_clean += 1;
|
||||
|
||||
if (unlikely(ring->next_to_clean == ring->desc_num))
|
||||
|
@ -4170,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
|
|||
{
|
||||
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
|
||||
int unused_count = hns3_desc_unused(ring);
|
||||
bool failure = false;
|
||||
int recv_pkts = 0;
|
||||
int err;
|
||||
|
||||
|
@ -4178,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
|
|||
while (recv_pkts < budget) {
|
||||
/* Reuse or realloc buffers */
|
||||
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
|
||||
hns3_nic_alloc_rx_buffers(ring, unused_count);
|
||||
unused_count = hns3_desc_unused(ring) -
|
||||
ring->pending_buf;
|
||||
failure = failure ||
|
||||
hns3_nic_alloc_rx_buffers(ring, unused_count);
|
||||
unused_count = 0;
|
||||
}
|
||||
|
||||
/* Poll one pkt */
|
||||
|
@ -4199,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
|
|||
}
|
||||
|
||||
out:
|
||||
/* Make all data has been write before submit */
|
||||
if (unused_count > 0)
|
||||
hns3_nic_alloc_rx_buffers(ring, unused_count);
|
||||
|
||||
return recv_pkts;
|
||||
return failure ? budget : recv_pkts;
|
||||
}
|
||||
|
||||
static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
|
||||
|
|
|
@ -186,11 +186,9 @@ enum hns3_nic_state {
|
|||
|
||||
#define HNS3_MAX_BD_SIZE 65535
|
||||
#define HNS3_MAX_TSO_BD_NUM 63U
|
||||
#define HNS3_MAX_TSO_SIZE \
|
||||
(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
|
||||
#define HNS3_MAX_TSO_SIZE 1048576U
|
||||
#define HNS3_MAX_NON_TSO_SIZE 9728U
|
||||
|
||||
#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
|
||||
(HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
|
||||
|
||||
#define HNS3_VECTOR_GL0_OFFSET 0x100
|
||||
#define HNS3_VECTOR_GL1_OFFSET 0x200
|
||||
|
@ -332,6 +330,7 @@ struct hns3_desc_cb {
|
|||
u32 length; /* length of the buffer */
|
||||
|
||||
u16 reuse_flag;
|
||||
u16 refill;
|
||||
|
||||
/* desc type, used by the ring user to mark the type of the priv data */
|
||||
u16 type;
|
||||
|
|
|
@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
|
|||
*changed = true;
|
||||
break;
|
||||
case IEEE_8021QAZ_TSA_ETS:
|
||||
/* The hardware will switch to sp mode if bandwidth is
|
||||
* 0, so limit ets bandwidth must be greater than 0.
|
||||
*/
|
||||
if (!ets->tc_tx_bw[i]) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"tc%u ets bw cannot be 0\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
|
||||
HCLGE_SCH_MODE_DWRR)
|
||||
*changed = true;
|
||||
|
|
|
@ -1560,8 +1560,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
|
|||
|
||||
/* configure TM QCN hw errors */
|
||||
hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
|
||||
if (en)
|
||||
desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
|
||||
if (en) {
|
||||
desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
|
||||
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
|
||||
}
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret)
|
||||
|
|
|
@ -50,6 +50,8 @@
|
|||
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
|
||||
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
|
||||
#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
|
||||
#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
|
||||
#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
|
||||
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
|
||||
#define HCLGE_NCSI_ERR_INT_EN 0x3
|
||||
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
|
||||
|
|
|
@ -13065,6 +13065,7 @@ static int hclge_init(void)
|
|||
|
||||
static void hclge_exit(void)
|
||||
{
|
||||
hnae3_unregister_ae_algo_prepare(&ae_algo);
|
||||
hnae3_unregister_ae_algo(&ae_algo);
|
||||
destroy_workqueue(hclge_wq);
|
||||
}
|
||||
|
|
|
@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
|
|||
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
|
||||
for (k = 0; k < hdev->tm_info.num_tc; k++)
|
||||
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
|
||||
for (; k < HNAE3_MAX_TC; k++)
|
||||
hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2273,9 +2273,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
|
|||
hdev->reset_attempts = 0;
|
||||
|
||||
hdev->last_reset_time = jiffies;
|
||||
while ((hdev->reset_type =
|
||||
hclgevf_get_reset_level(hdev, &hdev->reset_pending))
|
||||
!= HNAE3_NONE_RESET)
|
||||
hdev->reset_type =
|
||||
hclgevf_get_reset_level(hdev, &hdev->reset_pending);
|
||||
if (hdev->reset_type != HNAE3_NONE_RESET)
|
||||
hclgevf_reset(hdev);
|
||||
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
|
||||
&hdev->reset_state)) {
|
||||
|
|
|
@ -113,7 +113,8 @@ enum e1000_boards {
|
|||
board_pch2lan,
|
||||
board_pch_lpt,
|
||||
board_pch_spt,
|
||||
board_pch_cnp
|
||||
board_pch_cnp,
|
||||
board_pch_tgp
|
||||
};
|
||||
|
||||
struct e1000_ps_page {
|
||||
|
@ -499,6 +500,7 @@ extern const struct e1000_info e1000_pch2_info;
|
|||
extern const struct e1000_info e1000_pch_lpt_info;
|
||||
extern const struct e1000_info e1000_pch_spt_info;
|
||||
extern const struct e1000_info e1000_pch_cnp_info;
|
||||
extern const struct e1000_info e1000_pch_tgp_info;
|
||||
extern const struct e1000_info e1000_es2_info;
|
||||
|
||||
void e1000e_ptp_init(struct e1000_adapter *adapter);
|
||||
|
|
|
@ -4813,7 +4813,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
|
|||
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
struct e1000_mac_info *mac = &hw->mac;
|
||||
u32 ctrl_ext, txdctl, snoop;
|
||||
u32 ctrl_ext, txdctl, snoop, fflt_dbg;
|
||||
s32 ret_val;
|
||||
u16 i;
|
||||
|
||||
|
@ -4872,6 +4872,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
|
|||
snoop = (u32)~(PCIE_NO_SNOOP_ALL);
|
||||
e1000e_set_pcie_no_snoop(hw, snoop);
|
||||
|
||||
/* Enable workaround for packet loss issue on TGP PCH
|
||||
* Do not gate DMA clock from the modPHY block
|
||||
*/
|
||||
if (mac->type >= e1000_pch_tgp) {
|
||||
fflt_dbg = er32(FFLT_DBG);
|
||||
fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
|
||||
ew32(FFLT_DBG, fflt_dbg);
|
||||
}
|
||||
|
||||
ctrl_ext = er32(CTRL_EXT);
|
||||
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
|
||||
ew32(CTRL_EXT, ctrl_ext);
|
||||
|
@ -5992,3 +6001,23 @@ const struct e1000_info e1000_pch_cnp_info = {
|
|||
.phy_ops = &ich8_phy_ops,
|
||||
.nvm_ops = &spt_nvm_ops,
|
||||
};
|
||||
|
||||
const struct e1000_info e1000_pch_tgp_info = {
|
||||
.mac = e1000_pch_tgp,
|
||||
.flags = FLAG_IS_ICH
|
||||
| FLAG_HAS_WOL
|
||||
| FLAG_HAS_HW_TIMESTAMP
|
||||
| FLAG_HAS_CTRLEXT_ON_LOAD
|
||||
| FLAG_HAS_AMT
|
||||
| FLAG_HAS_FLASH
|
||||
| FLAG_HAS_JUMBO_FRAMES
|
||||
| FLAG_APME_IN_WUC,
|
||||
.flags2 = FLAG2_HAS_PHY_STATS
|
||||
| FLAG2_HAS_EEE,
|
||||
.pba = 26,
|
||||
.max_hw_frame_size = 9022,
|
||||
.get_variants = e1000_get_variants_ich8lan,
|
||||
.mac_ops = &ich8_mac_ops,
|
||||
.phy_ops = &ich8_phy_ops,
|
||||
.nvm_ops = &spt_nvm_ops,
|
||||
};
|
||||
|
|
|
@ -289,6 +289,9 @@
|
|||
/* Proprietary Latency Tolerance Reporting PCI Capability */
|
||||
#define E1000_PCI_LTR_CAP_LPT 0xA8
|
||||
|
||||
/* Don't gate wake DMA clock */
|
||||
#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK 0x1000
|
||||
|
||||
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
|
||||
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
|
||||
bool state);
|
||||
|
|
|
@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
|
|||
[board_pch_lpt] = &e1000_pch_lpt_info,
|
||||
[board_pch_spt] = &e1000_pch_spt_info,
|
||||
[board_pch_cnp] = &e1000_pch_cnp_info,
|
||||
[board_pch_tgp] = &e1000_pch_tgp_info,
|
||||
};
|
||||
|
||||
struct e1000_reg_info {
|
||||
|
@ -7896,28 +7897,28 @@ static const struct pci_device_id e1000_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
|
||||
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
|
||||
|
||||
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
|
||||
};
|
||||
|
|
|
@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
|
|||
case ICE_DEV_ID_E810C_BACKPLANE:
|
||||
case ICE_DEV_ID_E810C_QSFP:
|
||||
case ICE_DEV_ID_E810C_SFP:
|
||||
case ICE_DEV_ID_E810_XXV_BACKPLANE:
|
||||
case ICE_DEV_ID_E810_XXV_QSFP:
|
||||
case ICE_DEV_ID_E810_XXV_SFP:
|
||||
hw->mac_type = ICE_MAC_E810;
|
||||
break;
|
||||
|
|
|
@ -21,6 +21,10 @@
|
|||
#define ICE_DEV_ID_E810C_QSFP 0x1592
|
||||
/* Intel(R) Ethernet Controller E810-C for SFP */
|
||||
#define ICE_DEV_ID_E810C_SFP 0x1593
|
||||
/* Intel(R) Ethernet Controller E810-XXV for backplane */
|
||||
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
|
||||
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
|
||||
#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
|
||||
/* Intel(R) Ethernet Controller E810-XXV for SFP */
|
||||
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
|
||||
/* Intel(R) Ethernet Connection E823-C for backplane */
|
||||
|
|
|
@ -63,7 +63,8 @@ static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
|
|||
{
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
|
||||
snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
|
||||
snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
|
||||
hw->api_min_ver, hw->api_patch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1668,7 +1668,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
|
|||
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
|
||||
if (hw->tnl.tbl[i].valid &&
|
||||
hw->tnl.tbl[i].type == type &&
|
||||
idx--)
|
||||
idx-- == 0)
|
||||
return i;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
|
@ -1828,7 +1828,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
|
|||
u16 index;
|
||||
|
||||
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
|
||||
index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
|
||||
index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
|
||||
|
||||
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
|
||||
if (status) {
|
||||
|
|
|
@ -2841,6 +2841,7 @@ void ice_napi_del(struct ice_vsi *vsi)
|
|||
*/
|
||||
int ice_vsi_release(struct ice_vsi *vsi)
|
||||
{
|
||||
enum ice_status err;
|
||||
struct ice_pf *pf;
|
||||
|
||||
if (!vsi->back)
|
||||
|
@ -2912,6 +2913,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
|
|||
|
||||
ice_fltr_remove_all(vsi);
|
||||
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
|
||||
err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
|
||||
if (err)
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
|
||||
vsi->vsi_num, err);
|
||||
ice_vsi_delete(vsi);
|
||||
ice_vsi_free_q_vectors(vsi);
|
||||
|
||||
|
@ -3092,6 +3097,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
|
|||
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
|
||||
|
||||
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
|
||||
ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
|
||||
if (ret)
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
|
||||
vsi->vsi_num, ret);
|
||||
ice_vsi_free_q_vectors(vsi);
|
||||
|
||||
/* SR-IOV determines needed MSIX resources all at once instead of per
|
||||
|
|
|
@ -4224,6 +4224,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
|||
if (!pf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* initialize Auxiliary index to invalid value */
|
||||
pf->aux_idx = -1;
|
||||
|
||||
/* set up for high or low DMA */
|
||||
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
||||
if (err)
|
||||
|
@ -4615,7 +4618,8 @@ static void ice_remove(struct pci_dev *pdev)
|
|||
|
||||
ice_aq_cancel_waiting_tasks(pf);
|
||||
ice_unplug_aux_dev(pf);
|
||||
ida_free(&ice_aux_ida, pf->aux_idx);
|
||||
if (pf->aux_idx >= 0)
|
||||
ida_free(&ice_aux_ida, pf->aux_idx);
|
||||
set_bit(ICE_DOWN, pf->state);
|
||||
|
||||
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
|
||||
|
@ -5016,6 +5020,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
|
||||
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
|
||||
|
|
|
@ -2070,6 +2070,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
|
|||
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
|
||||
* @pi: port information structure
|
||||
* @vsi_handle: software VSI handle
|
||||
*
|
||||
* This function clears the VSI and its RDMA children nodes from scheduler tree
|
||||
* for all TCs.
|
||||
*/
|
||||
enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
|
||||
{
|
||||
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_agg_info - get the aggregator ID
|
||||
* @hw: pointer to the hardware structure
|
||||
|
|
|
@ -89,6 +89,7 @@ enum ice_status
|
|||
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
|
||||
u8 owner, bool enable);
|
||||
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
|
||||
enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
|
||||
|
||||
/* Tx scheduler rate limiter functions */
|
||||
enum ice_status
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
#define IGC_DEV_ID_I220_V 0x15F7
|
||||
#define IGC_DEV_ID_I225_K 0x3100
|
||||
#define IGC_DEV_ID_I225_K2 0x3101
|
||||
#define IGC_DEV_ID_I226_K 0x3102
|
||||
#define IGC_DEV_ID_I225_LMVP 0x5502
|
||||
#define IGC_DEV_ID_I226_K 0x5504
|
||||
#define IGC_DEV_ID_I225_IT 0x0D9F
|
||||
#define IGC_DEV_ID_I226_LM 0x125B
|
||||
#define IGC_DEV_ID_I226_V 0x125C
|
||||
|
|
|
@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
|
|||
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
|
||||
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
|
||||
|
||||
int mlx5e_fs_init(struct mlx5e_priv *priv);
|
||||
void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
|
||||
|
||||
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
|
||||
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
|
||||
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
#include "en_tc.h"
|
||||
#include "rep/tc.h"
|
||||
#include "rep/neigh.h"
|
||||
#include "lag.h"
|
||||
#include "lag_mp.h"
|
||||
|
||||
struct mlx5e_tc_tun_route_attr {
|
||||
struct net_device *out_dev;
|
||||
|
|
|
@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
|||
* Pkt: MAC IP ESP IP L4
|
||||
*
|
||||
* Transport Mode:
|
||||
* SWP: OutL3 InL4
|
||||
* InL3
|
||||
* SWP: OutL3 OutL4
|
||||
* Pkt: MAC IP ESP L4
|
||||
*
|
||||
* Tunnel(VXLAN TCP/UDP) over Transport Mode
|
||||
|
@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
|
|||
return;
|
||||
|
||||
if (!xo->inner_ipproto) {
|
||||
eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
if (xo->proto == IPPROTO_UDP)
|
||||
switch (xo->proto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
/* IP | ESP | TCP */
|
||||
eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
|
||||
switch (xo->inner_ipproto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
return;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset =
|
||||
(skb->csum_start + skb->head - skb->data) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
|
||||
switch (xo->inner_ipproto) {
|
||||
case IPPROTO_UDP:
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
|
||||
fallthrough;
|
||||
case IPPROTO_TCP:
|
||||
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
|
||||
eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
|
||||
if (skb->protocol == htons(ETH_P_IPV6))
|
||||
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
|
||||
|
|
|
@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
|||
struct mlx5e_flow_table *ft;
|
||||
int err;
|
||||
|
||||
priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
|
||||
if (!priv->fs.vlan)
|
||||
return -ENOMEM;
|
||||
|
||||
ft = &priv->fs.vlan->ft;
|
||||
ft->num_groups = 0;
|
||||
|
||||
|
@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
|||
ft_attr.prio = MLX5E_NIC_PRIO;
|
||||
|
||||
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
|
||||
if (IS_ERR(ft->t)) {
|
||||
err = PTR_ERR(ft->t);
|
||||
goto err_free_t;
|
||||
}
|
||||
if (IS_ERR(ft->t))
|
||||
return PTR_ERR(ft->t);
|
||||
|
||||
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
|
||||
if (!ft->g) {
|
||||
|
@ -1221,9 +1215,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
|
|||
kfree(ft->g);
|
||||
err_destroy_vlan_table:
|
||||
mlx5_destroy_flow_table(ft->t);
|
||||
err_free_t:
|
||||
kvfree(priv->fs.vlan);
|
||||
priv->fs.vlan = NULL;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
|
|||
{
|
||||
mlx5e_del_vlan_rules(priv);
|
||||
mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
|
||||
kvfree(priv->fs.vlan);
|
||||
}
|
||||
|
||||
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
|
||||
|
@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
|
|||
mlx5e_arfs_destroy_tables(priv);
|
||||
mlx5e_ethtool_cleanup_steering(priv);
|
||||
}
|
||||
|
||||
int mlx5e_fs_init(struct mlx5e_priv *priv)
|
||||
{
|
||||
priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
|
||||
if (!priv->fs.vlan)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
|
||||
{
|
||||
kvfree(priv->fs.vlan);
|
||||
priv->fs.vlan = NULL;
|
||||
}
|
||||
|
|
|
@ -4578,6 +4578,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
|
|||
|
||||
mlx5e_timestamp_init(priv);
|
||||
|
||||
err = mlx5e_fs_init(priv);
|
||||
if (err) {
|
||||
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5e_ipsec_init(priv);
|
||||
if (err)
|
||||
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
|
||||
|
@ -4595,6 +4601,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
|
|||
mlx5e_health_destroy_reporters(priv);
|
||||
mlx5e_tls_cleanup(priv);
|
||||
mlx5e_ipsec_cleanup(priv);
|
||||
mlx5e_fs_cleanup(priv);
|
||||
}
|
||||
|
||||
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
|
||||
|
|
|
@ -67,6 +67,8 @@
|
|||
#include "lib/fs_chains.h"
|
||||
#include "diag/en_tc_tracepoint.h"
|
||||
#include <asm/div64.h>
|
||||
#include "lag.h"
|
||||
#include "lag_mp.h"
|
||||
|
||||
#define nic_chains(priv) ((priv)->fs.tc.chains)
|
||||
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
|
||||
|
|
|
@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
|
|||
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
|
||||
}
|
||||
|
||||
/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
|
||||
* need to set L3 checksum flag for IPsec
|
||||
*/
|
||||
static void
|
||||
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (skb->encapsulation) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
if (xo->inner_ipproto) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
|
||||
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial_inner++;
|
||||
} else {
|
||||
sq->stats->csum_partial++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
struct mlx5e_accel_tx_state *accel,
|
||||
struct mlx5_wqe_eth_seg *eseg)
|
||||
{
|
||||
if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
return;
|
||||
}
|
||||
|
||||
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
|
||||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
|
||||
if (skb->encapsulation) {
|
||||
|
@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
|
||||
sq->stats->csum_partial++;
|
||||
#endif
|
||||
} else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
|
||||
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
|
||||
} else
|
||||
sq->stats->csum_none++;
|
||||
}
|
||||
|
|
|
@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
|
|||
|
||||
err_min_rate:
|
||||
list_del(&group->list);
|
||||
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
group->tsar_ix);
|
||||
if (err)
|
||||
if (mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
group->tsar_ix))
|
||||
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
|
||||
err_sched_elem:
|
||||
kfree(group);
|
||||
|
|
|
@ -442,6 +442,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
|
|||
if (!mlx5_lag_is_ready(ldev)) {
|
||||
do_bond = false;
|
||||
} else {
|
||||
/* VF LAG is in multipath mode, ignore bond change requests */
|
||||
if (mlx5_lag_is_multipath(dev0))
|
||||
return;
|
||||
|
||||
tracker = ldev->tracker;
|
||||
|
||||
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
|
||||
|
|
|
@ -9,20 +9,23 @@
|
|||
#include "eswitch.h"
|
||||
#include "lib/mlx5.h"
|
||||
|
||||
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
|
||||
{
|
||||
return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
|
||||
}
|
||||
|
||||
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
|
||||
{
|
||||
if (!mlx5_lag_is_ready(ldev))
|
||||
return false;
|
||||
|
||||
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
|
||||
return false;
|
||||
|
||||
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
|
||||
ldev->pf[MLX5_LAG_P2].dev);
|
||||
}
|
||||
|
||||
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
|
||||
{
|
||||
return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
|
||||
}
|
||||
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_lag *ldev;
|
||||
|
|
|
@ -24,12 +24,14 @@ struct lag_mp {
|
|||
void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
|
||||
int mlx5_lag_mp_init(struct mlx5_lag *ldev);
|
||||
void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
|
||||
static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
|
||||
static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
|
||||
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
|
||||
|
||||
#endif /* CONFIG_MLX5_ESWITCH */
|
||||
#endif /* __MLX5_LAG_MP_H__ */
|
||||
|
|
|
@ -758,6 +758,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
|
|||
err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
|
||||
"port %u: missing serdes\n",
|
||||
portno);
|
||||
of_node_put(portnp);
|
||||
goto cleanup_config;
|
||||
}
|
||||
config->portno = portno;
|
||||
|
|
|
@ -969,6 +969,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
|
|||
target = ocelot_regmap_init(ocelot, res);
|
||||
if (IS_ERR(target)) {
|
||||
err = PTR_ERR(target);
|
||||
of_node_put(portnp);
|
||||
goto out_teardown;
|
||||
}
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
|
|||
}
|
||||
|
||||
reg->dst_lmextn = swreg_lmextn(dst);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
|
|||
}
|
||||
|
||||
reg->dst_lmextn = swreg_lmextn(dst);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
|
||||
reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -132,16 +132,27 @@ void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
|
|||
case MC_CMD_MEDIA_SFP_PLUS:
|
||||
case MC_CMD_MEDIA_QSFP_PLUS:
|
||||
SET_BIT(FIBRE);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {
|
||||
SET_BIT(1000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
|
||||
SET_BIT(10000baseT_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
|
||||
SET_BIT(1000baseX_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) {
|
||||
SET_BIT(10000baseCR_Full);
|
||||
SET_BIT(10000baseLR_Full);
|
||||
SET_BIT(10000baseSR_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
|
||||
SET_BIT(40000baseCR4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
|
||||
SET_BIT(40000baseSR4_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {
|
||||
SET_BIT(100000baseCR4_Full);
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
|
||||
SET_BIT(100000baseSR4_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {
|
||||
SET_BIT(25000baseCR_Full);
|
||||
SET_BIT(25000baseSR_Full);
|
||||
}
|
||||
if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
|
||||
SET_BIT(50000baseCR2_Full);
|
||||
break;
|
||||
|
@ -192,15 +203,19 @@ u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
|
|||
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
|
||||
if (TEST_BIT(1000baseT_Half))
|
||||
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
|
||||
if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
|
||||
if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) ||
|
||||
TEST_BIT(1000baseX_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
|
||||
if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
|
||||
if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) ||
|
||||
TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) ||
|
||||
TEST_BIT(10000baseSR_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
|
||||
if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
|
||||
if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) ||
|
||||
TEST_BIT(40000baseSR4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
|
||||
if (TEST_BIT(100000baseCR4_Full))
|
||||
if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
|
||||
if (TEST_BIT(25000baseCR_Full))
|
||||
if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
|
||||
if (TEST_BIT(50000baseCR2_Full))
|
||||
result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
|
||||
|
|
|
@ -648,7 +648,7 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
|
|||
} else if (rc == -EINVAL) {
|
||||
fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
|
||||
} else if (rc == -EPERM) {
|
||||
netif_info(efx, probe, efx->net_dev, "no PTP support\n");
|
||||
pci_info(efx->pci_dev, "no PTP support\n");
|
||||
return rc;
|
||||
} else {
|
||||
efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
|
||||
|
@ -824,7 +824,7 @@ static int efx_ptp_disable(struct efx_nic *efx)
|
|||
* should only have been called during probe.
|
||||
*/
|
||||
if (rc == -ENOSYS || rc == -EPERM)
|
||||
netif_info(efx, probe, efx->net_dev, "no PTP support\n");
|
||||
pci_info(efx->pci_dev, "no PTP support\n");
|
||||
else if (rc)
|
||||
efx_mcdi_display_error(efx, MC_CMD_PTP,
|
||||
MC_CMD_PTP_IN_DISABLE_LEN,
|
||||
|
|
|
@ -1057,7 +1057,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
|
|||
return;
|
||||
|
||||
if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
|
||||
netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
|
||||
pci_info(efx->pci_dev, "no SR-IOV VFs probed\n");
|
||||
return;
|
||||
}
|
||||
if (count > 0 && count > max_vfs)
|
||||
|
|
|
@ -736,7 +736,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
|
|||
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
|
||||
ptp_v2 = PTP_TCR_TSVER2ENA;
|
||||
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
|
||||
if (priv->synopsys_id != DWMAC_CORE_5_10)
|
||||
if (priv->synopsys_id < DWMAC_CORE_4_10)
|
||||
ts_event_en = PTP_TCR_TSEVNTENA;
|
||||
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
|
||||
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
|
||||
|
|
|
@ -623,16 +623,16 @@ static int receive(struct net_device *dev, int cnt)
|
|||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
#ifdef __i386__
|
||||
#if defined(__i386__) && !defined(CONFIG_UML)
|
||||
#include <asm/msr.h>
|
||||
#define GETTICK(x) \
|
||||
({ \
|
||||
if (boot_cpu_has(X86_FEATURE_TSC)) \
|
||||
x = (unsigned int)rdtsc(); \
|
||||
})
|
||||
#else /* __i386__ */
|
||||
#else /* __i386__ && !CONFIG_UML */
|
||||
#define GETTICK(x)
|
||||
#endif /* __i386__ */
|
||||
#endif /* __i386__ && !CONFIG_UML */
|
||||
|
||||
static void epp_bh(struct work_struct *work)
|
||||
{
|
||||
|
|
|
@ -117,6 +117,7 @@ config USB_LAN78XX
|
|||
select PHYLIB
|
||||
select MICROCHIP_PHY
|
||||
select FIXED_PHY
|
||||
select CRC32
|
||||
help
|
||||
This option adds support for Microchip LAN78XX based USB 2
|
||||
& USB 3 10/100/1000 Ethernet adapters.
|
||||
|
|
|
@ -1788,6 +1788,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
|||
if (!dev->rx_urb_size)
|
||||
dev->rx_urb_size = dev->hard_mtu;
|
||||
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
|
||||
if (dev->maxpacket == 0) {
|
||||
/* that is a broken device */
|
||||
goto out4;
|
||||
}
|
||||
|
||||
/* let userspace know we have a random address */
|
||||
if (ether_addr_equal(net->dev_addr, node_id))
|
||||
|
|
|
@ -1360,8 +1360,6 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
|
|||
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
|
||||
bool is_ndisc = ipv6_ndisc_frame(skb);
|
||||
|
||||
nf_reset_ct(skb);
|
||||
|
||||
/* loopback, multicast & non-ND link-local traffic; do not push through
|
||||
* packet taps again. Reset pkt_type for upper layers to process skb.
|
||||
* For strict packets with a source LLA, determine the dst using the
|
||||
|
@ -1424,8 +1422,6 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
|
|||
skb->skb_iif = vrf_dev->ifindex;
|
||||
IPCB(skb)->flags |= IPSKB_L3SLAVE;
|
||||
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1226,11 +1226,9 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
|
|||
&reset_cmd,
|
||||
ST95HF_RESET_CMD_LEN,
|
||||
ASYNC);
|
||||
if (result) {
|
||||
if (result)
|
||||
dev_err(&spictx->spidev->dev,
|
||||
"ST95HF reset failed in remove() err = %d\n", result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* wait for 3 ms to complete the controller reset process */
|
||||
usleep_range(3000, 4000);
|
||||
|
@ -1239,7 +1237,7 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
|
|||
if (stcontext->st95hf_supply)
|
||||
regulator_disable(stcontext->st95hf_supply);
|
||||
|
||||
return result;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Register as SPI protocol driver */
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/sort.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/kmemleak.h>
|
||||
|
||||
#include "of_private.h"
|
||||
|
||||
|
@ -46,6 +47,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
|
|||
err = memblock_mark_nomap(base, size);
|
||||
if (err)
|
||||
memblock_free(base, size);
|
||||
kmemleak_ignore_phys(base);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
|
|
@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev)
|
|||
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
|
||||
|
||||
ptp_cleanup_pin_groups(ptp);
|
||||
kfree(ptp->vclock_index);
|
||||
mutex_destroy(&ptp->tsevq_mux);
|
||||
mutex_destroy(&ptp->pincfg_mux);
|
||||
mutex_destroy(&ptp->n_vclocks_mux);
|
||||
|
@ -283,15 +284,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
|
|||
/* Create a posix clock and link it to the device. */
|
||||
err = posix_clock_register(&ptp->clock, &ptp->dev);
|
||||
if (err) {
|
||||
if (ptp->pps_source)
|
||||
pps_unregister_source(ptp->pps_source);
|
||||
|
||||
if (ptp->kworker)
|
||||
kthread_destroy_worker(ptp->kworker);
|
||||
|
||||
put_device(&ptp->dev);
|
||||
|
||||
pr_err("failed to create posix clock\n");
|
||||
goto no_clock;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return ptp;
|
||||
|
||||
no_clock:
|
||||
if (ptp->pps_source)
|
||||
pps_unregister_source(ptp->pps_source);
|
||||
no_pps:
|
||||
ptp_cleanup_pin_groups(ptp);
|
||||
no_pin_groups:
|
||||
|
@ -321,8 +327,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
|
|||
ptp->defunct = 1;
|
||||
wake_up_interruptible(&ptp->tsev_wq);
|
||||
|
||||
kfree(ptp->vclock_index);
|
||||
|
||||
if (ptp->kworker) {
|
||||
kthread_cancel_delayed_work_sync(&ptp->aux_work);
|
||||
kthread_destroy_worker(ptp->kworker);
|
||||
|
|
|
@ -31,10 +31,10 @@ int kvm_arch_ptp_init(void)
|
|||
|
||||
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
|
||||
KVM_CLOCK_PAIRING_WALLCLOCK);
|
||||
if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
|
||||
if (ret == -KVM_ENOSYS)
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_arch_ptp_get_clock(struct timespec64 *ts)
|
||||
|
|
|
@ -2330,7 +2330,6 @@ static int unsafe_request_wait(struct inode *inode)
|
|||
|
||||
int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
{
|
||||
struct ceph_file_info *fi = file->private_data;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
u64 flush_tid;
|
||||
|
@ -2365,14 +2364,9 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
if (err < 0)
|
||||
ret = err;
|
||||
|
||||
if (errseq_check(&ci->i_meta_err, READ_ONCE(fi->meta_err))) {
|
||||
spin_lock(&file->f_lock);
|
||||
err = errseq_check_and_advance(&ci->i_meta_err,
|
||||
&fi->meta_err);
|
||||
spin_unlock(&file->f_lock);
|
||||
if (err < 0)
|
||||
ret = err;
|
||||
}
|
||||
err = file_check_and_advance_wb_err(file);
|
||||
if (err < 0)
|
||||
ret = err;
|
||||
out:
|
||||
dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
|
||||
return ret;
|
||||
|
|
|
@ -233,7 +233,6 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
|
|||
|
||||
spin_lock_init(&fi->rw_contexts_lock);
|
||||
INIT_LIST_HEAD(&fi->rw_contexts);
|
||||
fi->meta_err = errseq_sample(&ci->i_meta_err);
|
||||
fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -541,8 +541,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
|
|||
|
||||
ceph_fscache_inode_init(ci);
|
||||
|
||||
ci->i_meta_err = 0;
|
||||
|
||||
return &ci->vfs_inode;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue