Linux 5.15-rc2

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmFH1aYeHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiG8OkH/R6C6xduJ7I/LkUK
 S3bATqOwAnpR8Z/1cbNIj7NG7wFEiRbOS0xgnxteUgo34xGVWUoRgdM2wvi4mnst
 fvWsM/8765g4ok5WtmxZ+ZeClVFo2eGWyZqfsXMlisPNwswceSPOXDPMcEKlROZA
 CGFqCh0hmma3+waAKf85ZfFR8i/gndbI574MKq00OPNr02Z8GSqQMJSaCzNbGspc
 AMpdXezEG6CnwRUnlmYZoXOBP9+/ItAMZ6RTPulitJKT3PmuHcs9Pnu6qqbW2X9B
 fRW3tQNE5Q4oLHPb7H9m13jMNwuIP4DI3b3qnUSjl0g4OKBX0q/Uw1UfntC5Ejxn
 9d7jvZ8=
 =6RN4
 -----END PGP SIGNATURE-----

Merge tag 'v5.15-rc2' into 'android-mainline'

Linux 5.15-rc2

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I065371bbb8a3159032f5804dbf0c5fed37ba4722
This commit is contained in:
Greg Kroah-Hartman 2021-09-20 08:28:01 +02:00
commit 54244a0c1d
36 changed files with 460 additions and 263 deletions

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Opossums on Parade
# *DOCUMENTATION*

View File

@ -20,7 +20,7 @@ config ALPHA
select NEED_SG_DMA_LENGTH
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_PCI_IOMAP if PCI
select GENERIC_PCI_IOMAP
select AUTO_IRQ_AFFINITY if SMP
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION

View File

@ -513,12 +513,15 @@ void ioport_unmap(void __iomem *addr)
}
}
#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
if (!INDIRECT_ADDR(addr)) {
iounmap(addr);
}
}
EXPORT_SYMBOL(pci_iounmap);
#endif
EXPORT_SYMBOL(ioread8);
EXPORT_SYMBOL(ioread16);
@ -544,4 +547,3 @@ EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
EXPORT_SYMBOL(pci_iounmap);

View File

@ -18,6 +18,7 @@
#include <asm/switch_to.h>
#include <asm/syscall.h>
#include <asm/time.h>
#include <asm/tm.h>
#include <asm/unistd.h>
#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
@ -136,6 +137,48 @@ notrace long system_call_exception(long r3, long r4, long r5,
*/
irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
/*
* If system call is called with TM active, set _TIF_RESTOREALL to
* prevent RFSCV being used to return to userspace, because POWER9
* TM implementation has problems with this instruction returning to
* transactional state. Final register values are not relevant because
* the transaction will be aborted upon return anyway. Or in the case
* of unsupported_scv SIGILL fault, the return state does not much
* matter because it's an edge case.
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
current_thread_info()->flags |= _TIF_RESTOREALL;
/*
* If the system call was made with a transaction active, doom it and
* return without performing the system call. Unless it was an
* unsupported scv vector, in which case it's treated like an illegal
* instruction.
*/
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
!trap_is_unsupported_scv(regs)) {
/* Enable TM in the kernel, and disable EE (for scv) */
hard_irq_disable();
mtmsr(mfmsr() | MSR_TM);
/* tabort, this dooms the transaction, nothing else */
asm volatile(".long 0x7c00071d | ((%0) << 16)"
:: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
/*
* Userspace will never see the return value. Execution will
* resume after the tbegin. of the aborted transaction with the
* checkpointed register state. A context switch could occur
* or signal delivered to the process before resuming the
* doomed transaction context, but that should all be handled
* as expected.
*/
return -ENOSYS;
}
#endif // CONFIG_PPC_TRANSACTIONAL_MEM
local_irq_enable();
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {

View File

@ -12,7 +12,6 @@
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/ptrace.h>
#include <asm/tm.h>
.section ".toc","aw"
SYS_CALL_TABLE:
@ -55,12 +54,6 @@ COMPAT_SYS_CALL_TABLE:
.globl system_call_vectored_\name
system_call_vectored_\name:
_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
bne tabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
SCV_INTERRUPT_TO_KERNEL
mr r10,r1
ld r1,PACAKSAVE(r13)
@ -247,12 +240,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_common_real)
.globl system_call_common
system_call_common:
_ASM_NOKPROBE_SYMBOL(system_call_common)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BEGIN_FTR_SECTION
extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
bne tabort_syscall
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
mr r10,r1
ld r1,PACAKSAVE(r13)
std r10,0(r1)
@ -425,34 +412,6 @@ SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tabort_syscall:
_ASM_NOKPROBE_SYMBOL(tabort_syscall)
/* Firstly we need to enable TM in the kernel */
mfmsr r10
li r9, 1
rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
mtmsrd r10, 0
/* tabort, this dooms the transaction, nothing else */
li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
TABORT(R9)
/*
* Return directly to userspace. We have corrupted user register state,
* but userspace will never see that register state. Execution will
* resume after the tbegin of the aborted transaction with the
* checkpointed register state.
*/
li r9, MSR_RI
andc r10, r10, r9
mtmsrd r10, 1
mtspr SPRN_SRR0, r11
mtspr SPRN_SRR1, r12
RFI_TO_USER
b . /* prevent speculative execution */
#endif
/*
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
* touched, no exit work created, then this can be used.

View File

@ -249,6 +249,7 @@ void machine_check_queue_event(void)
{
int index;
struct machine_check_event evt;
unsigned long msr;
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
@ -262,8 +263,20 @@ void machine_check_queue_event(void)
memcpy(&local_paca->mce_info->mce_event_queue[index],
&evt, sizeof(evt));
/* Queue irq work to process this event later. */
irq_work_queue(&mce_event_process_work);
/*
* Queue irq work to process this event later. Before
* queuing the work enable translation for non radix LPAR,
* as irq_work_queue may try to access memory outside RMO
* region.
*/
if (!radix_enabled() && firmware_has_feature(FW_FEATURE_LPAR)) {
msr = mfmsr();
mtmsr(msr | MSR_IR | MSR_DR);
irq_work_queue(&mce_event_process_work);
mtmsr(msr);
} else {
irq_work_queue(&mce_event_process_work);
}
}
void mce_common_process_ue(struct pt_regs *regs,

View File

@ -2536,7 +2536,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
/* The following code handles the fake_suspend = 1 case */
mflr r0
std r0, PPC_LR_STKOFF(r1)
stdu r1, -PPC_MIN_STKFRM(r1)
stdu r1, -TM_FRAME_SIZE(r1)
/* Turn on TM. */
mfmsr r8
@ -2551,10 +2551,42 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
/*
* It's possible that treclaim. may modify registers, if we have lost
* track of fake-suspend state in the guest due to it using rfscv.
* Save and restore registers in case this occurs.
*/
mfspr r3, SPRN_DSCR
mfspr r4, SPRN_XER
mfspr r5, SPRN_AMR
/* SPRN_TAR would need to be saved here if the kernel ever used it */
mfcr r12
SAVE_NVGPRS(r1)
SAVE_GPR(2, r1)
SAVE_GPR(3, r1)
SAVE_GPR(4, r1)
SAVE_GPR(5, r1)
stw r12, 8(r1)
std r1, HSTATE_HOST_R1(r13)
/* We have to treclaim here because that's the only way to do S->N */
li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3)
GET_PACA(r13)
ld r1, HSTATE_HOST_R1(r13)
REST_GPR(2, r1)
REST_GPR(3, r1)
REST_GPR(4, r1)
REST_GPR(5, r1)
lwz r12, 8(r1)
REST_NVGPRS(r1)
mtspr SPRN_DSCR, r3
mtspr SPRN_XER, r4
mtspr SPRN_AMR, r5
mtcr r12
HMT_MEDIUM
/*
* We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since
@ -2582,7 +2614,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
addi r1, r1, PPC_MIN_STKFRM
addi r1, r1, TM_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr

View File

@ -348,9 +348,9 @@ static int xics_host_map(struct irq_domain *domain, unsigned int virq,
if (xics_ics->check(xics_ics, hwirq))
return -EINVAL;
/* No chip data for the XICS domain */
/* Let the ICS be the chip data for the XICS domain. For ICS native */
irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
NULL, handle_fasteoi_irq, NULL, NULL);
xics_ics, handle_fasteoi_irq, NULL, NULL);
return 0;
}

View File

@ -80,30 +80,30 @@ $(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
$(call if_changed,lzo)
$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2
$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
$(call if_changed,uimage,bzip2)
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz
$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
$(call if_changed,uimage,gzip)
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
$(call if_changed,uimage,lzma)
$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz FORCE
$(call if_changed,uimage,xz)
$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
$(call if_changed,uimage,lzo)
$(obj)/uImage.bin: $(obj)/vmlinux.bin
$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
$(call if_changed,uimage,none)
OBJCOPYFLAGS_vmlinux.srec := -I binary -O srec
$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux
$(obj)/vmlinux.srec: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
OBJCOPYFLAGS_uImage.srec := -I binary -O srec
$(obj)/uImage.srec: $(obj)/uImage
$(obj)/uImage.srec: $(obj)/uImage FORCE
$(call if_changed,objcopy)
$(obj)/uImage: $(obj)/uImage.$(suffix-y)

View File

@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
config ARCH_HIBERNATION_POSSIBLE
def_bool y
config ARCH_NR_GPIO
int
default 1024 if X86_64
default 512
config ARCH_SUSPEND_POSSIBLE
def_bool y

View File

@ -4,6 +4,12 @@
tune = $(call cc-option,-mtune=$(1),$(2))
ifdef CONFIG_CC_IS_CLANG
align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
else
align := -falign-functions=0 -falign-jumps=0 -falign-loops=0
endif
cflags-$(CONFIG_M486SX) += -march=i486
cflags-$(CONFIG_M486) += -march=i486
cflags-$(CONFIG_M586) += -march=i586
@ -19,11 +25,11 @@ cflags-$(CONFIG_MK6) += -march=k6
# They make zero difference whatsosever to performance at this time.
cflags-$(CONFIG_MK7) += -march=athlon
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
cflags-$(CONFIG_MCRUSOE) += -march=i686 -falign-functions=0 -falign-jumps=0 -falign-loops=0
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) -falign-functions=0 -falign-jumps=0 -falign-loops=0
cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) -falign-functions=0 -falign-jumps=0 -falign-loops=0
cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)

View File

@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
static void kill_me_now(struct callback_head *ch)
{
struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
p->mce_count = 0;
force_sig(SIGBUS);
}
@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
int flags = MF_ACTION_REQUIRED;
int ret;
p->mce_count = 0;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv)
@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
}
}
static void queue_task_work(struct mce *m, int kill_current_task)
static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
{
current->mce_addr = m->addr;
current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
current->mce_whole_page = whole_page(m);
int count = ++current->mce_count;
if (kill_current_task)
current->mce_kill_me.func = kill_me_now;
else
current->mce_kill_me.func = kill_me_maybe;
/* First call, save all the details */
if (count == 1) {
current->mce_addr = m->addr;
current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
current->mce_whole_page = whole_page(m);
if (kill_current_task)
current->mce_kill_me.func = kill_me_now;
else
current->mce_kill_me.func = kill_me_maybe;
}
/* Ten is likely overkill. Don't expect more than two faults before task_work() */
if (count > 10)
mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
/* Second or later call, make sure page address matches the one from first call */
if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
mce_panic("Consecutive machine checks to different user pages", m, msg);
/* Do not call task_work_add() more than once */
if (count > 1)
return;
task_work_add(current, &current->mce_kill_me, TWA_RESUME);
}
@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
/* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs));
queue_task_work(&m, kill_current_task);
queue_task_work(&m, msg, kill_current_task);
} else {
/*
@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
}
if (m.kflags & MCE_IN_KERNEL_COPYIN)
queue_task_work(&m, kill_current_task);
queue_task_work(&m, msg, kill_current_task);
}
out:
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);

View File

@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
return 0;
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d))
if (!p4d_present(*p4d))
return 0;
pud = pud_offset(p4d, addr);
if (pud_none(*pud))
if (!pud_present(*pud))
return 0;
if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
if (!pmd_present(*pmd))
return 0;
if (pmd_large(*pmd))

View File

@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
int err = 0;
start = sanitize_phys(start);
end = sanitize_phys(end);
/*
* The end address passed into this function is exclusive, but
* sanitize_phys() expects an inclusive address.
*/
end = sanitize_phys(end - 1) + 1;
if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type));

View File

@ -167,8 +167,6 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
bool connected = false;
WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev));
if (vc4_hdmi->hpd_gpio &&
gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio)) {
connected = true;
@ -189,12 +187,10 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
}
}
pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_connected;
}
cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
pm_runtime_put(&vc4_hdmi->pdev->dev);
return connector_status_disconnected;
}
@ -436,7 +432,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
struct drm_crtc *crtc = cstate->crtc;
struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
union hdmi_infoframe frame;
int ret;
@ -541,11 +537,8 @@ static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder,
static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
struct drm_crtc *crtc = cstate->crtc;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
if (!vc4_hdmi_supports_scrambling(encoder, mode))
return;
@ -566,18 +559,17 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder)
static void vc4_hdmi_disable_scrambling(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_connector_state *cstate = connector->state;
struct drm_crtc *crtc = encoder->crtc;
/*
* At boot, connector->state will be NULL. Since we don't know the
* At boot, encoder->crtc will be NULL. Since we don't know the
* state of the scrambler and in order to avoid any
* inconsistency, let's disable it all the time.
*/
if (cstate && !vc4_hdmi_supports_scrambling(encoder, &cstate->crtc->mode))
if (crtc && !vc4_hdmi_supports_scrambling(encoder, &crtc->mode))
return;
if (cstate && !vc4_hdmi_mode_needs_scrambling(&cstate->crtc->mode))
if (crtc && !vc4_hdmi_mode_needs_scrambling(&crtc->mode))
return;
if (delayed_work_pending(&vc4_hdmi->scrambling_work))
@ -635,6 +627,7 @@ static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder,
vc4_hdmi->variant->phy_disable(vc4_hdmi);
clk_disable_unprepare(vc4_hdmi->pixel_bvb_clock);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
ret = pm_runtime_put(&vc4_hdmi->pdev->dev);
@ -898,9 +891,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
vc4_hdmi_encoder_get_connector_state(encoder, state);
struct vc4_hdmi_connector_state *vc4_conn_state =
conn_state_to_vc4_hdmi_conn_state(conn_state);
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, conn_state->crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
unsigned long bvb_rate, pixel_rate, hsm_rate;
int ret;
@ -947,6 +938,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
return;
}
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret) {
DRM_ERROR("Failed to turn on HSM clock: %d\n", ret);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
vc4_hdmi_cec_update_clk_div(vc4_hdmi);
if (pixel_rate > 297000000)
@ -959,6 +957,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_set_min_rate(vc4_hdmi->pixel_bvb_clock, bvb_rate);
if (ret) {
DRM_ERROR("Failed to set pixel bvb clock rate: %d\n", ret);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@ -966,6 +965,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
ret = clk_prepare_enable(vc4_hdmi->pixel_bvb_clock);
if (ret) {
DRM_ERROR("Failed to turn on pixel bvb clock: %d\n", ret);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
clk_disable_unprepare(vc4_hdmi->pixel_clock);
return;
}
@ -985,11 +985,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct drm_connector_state *conn_state =
vc4_hdmi_encoder_get_connector_state(encoder, state);
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, conn_state->crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@ -1012,11 +1008,7 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder,
static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
struct drm_connector_state *conn_state =
vc4_hdmi_encoder_get_connector_state(encoder, state);
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, conn_state->crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC;
@ -1204,8 +1196,8 @@ static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
static void vc4_hdmi_set_n_cts(struct vc4_hdmi *vc4_hdmi, unsigned int samplerate)
{
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_crtc *crtc = connector->state->crtc;
struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
u32 n, cts;
u64 tmp;
@ -1238,13 +1230,13 @@ static inline struct vc4_hdmi *dai_to_hdmi(struct snd_soc_dai *dai)
static int vc4_hdmi_audio_startup(struct device *dev, void *data)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
struct drm_connector *connector = &vc4_hdmi->connector;
struct drm_encoder *encoder = &vc4_hdmi->encoder.base.base;
/*
* If the HDMI encoder hasn't probed, or the encoder is
* currently in DVI mode, treat the codec dai as missing.
*/
if (!connector->state || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
if (!encoder->crtc || !(HDMI_READ(HDMI_RAM_PACKET_CONFIG) &
VC4_HDMI_RAM_PACKET_ENABLE))
return -ENODEV;
@ -2114,29 +2106,6 @@ static int vc5_hdmi_init_resources(struct vc4_hdmi *vc4_hdmi)
return 0;
}
#ifdef CONFIG_PM
static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
clk_disable_unprepare(vc4_hdmi->hsm_clock);
return 0;
}
static int vc4_hdmi_runtime_resume(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
int ret;
ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
return 0;
}
#endif
static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
{
const struct vc4_hdmi_variant *variant = of_device_get_match_data(dev);
@ -2391,18 +2360,11 @@ static const struct of_device_id vc4_hdmi_dt_match[] = {
{}
};
static const struct dev_pm_ops vc4_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(vc4_hdmi_runtime_suspend,
vc4_hdmi_runtime_resume,
NULL)
};
struct platform_driver vc4_hdmi_driver = {
.probe = vc4_hdmi_dev_probe,
.remove = vc4_hdmi_dev_remove,
.driver = {
.name = "vc4_hdmi",
.of_match_table = vc4_hdmi_dt_match,
.pm = &vc4_hdmi_pm_ops,
},
};

View File

@ -973,7 +973,7 @@ static inline void tx_on(struct scc_priv *priv)
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
set_dma_addr(priv->param.dma,
(int) priv->tx_buf[priv->tx_tail] + n);
virt_to_bus(priv->tx_buf[priv->tx_tail]) + n);
set_dma_count(priv->param.dma,
priv->tx_len[priv->tx_tail] - n);
release_dma_lock(flags);
@ -1020,7 +1020,7 @@ static inline void rx_on(struct scc_priv *priv)
flags = claim_dma_lock();
set_dma_mode(priv->param.dma, DMA_MODE_READ);
set_dma_addr(priv->param.dma,
(int) priv->rx_buf[priv->rx_head]);
virt_to_bus(priv->rx_buf[priv->rx_head]));
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
enable_dma(priv->param.dma);
@ -1233,7 +1233,7 @@ static void special_condition(struct scc_priv *priv, int rc)
if (priv->param.dma >= 0) {
flags = claim_dma_lock();
set_dma_addr(priv->param.dma,
(int) priv->rx_buf[priv->rx_head]);
virt_to_bus(priv->rx_buf[priv->rx_head]));
set_dma_count(priv->param.dma, BUF_SIZE);
release_dma_lock(flags);
} else {

View File

@ -1023,16 +1023,7 @@ static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
port &= IO_SPACE_LIMIT;
return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
#define __pci_ioport_unmap __pci_ioport_unmap
static inline void __pci_ioport_unmap(void __iomem *p)
{
uintptr_t start = (uintptr_t) PCI_IOBASE;
uintptr_t addr = (uintptr_t) p;
if (addr >= start && addr < start + IO_SPACE_LIMIT)
return;
iounmap(p);
}
#define ARCH_HAS_GENERIC_IOPORT_MAP
#endif
#ifndef ioport_unmap
@ -1048,21 +1039,10 @@ extern void ioport_unmap(void __iomem *p);
#endif /* CONFIG_HAS_IOPORT_MAP */
#ifndef CONFIG_GENERIC_IOMAP
struct pci_dev;
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
#ifndef __pci_ioport_unmap
static inline void __pci_ioport_unmap(void __iomem *p) {}
#endif
#ifndef pci_iounmap
#define pci_iounmap pci_iounmap
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
{
__pci_ioport_unmap(p);
}
#define ARCH_WANTS_GENERIC_PCI_IOUNMAP
#endif
#endif
#endif /* CONFIG_GENERIC_IOMAP */
#ifndef xlate_dev_mem_ptr
#define xlate_dev_mem_ptr xlate_dev_mem_ptr

View File

@ -110,16 +110,6 @@ static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
}
#endif
#ifdef CONFIG_PCI
/* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
struct pci_dev;
extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#elif defined(CONFIG_GENERIC_IOMAP)
struct pci_dev;
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{ }
#endif
#include <asm-generic/pci_iomap.h>
#endif

View File

@ -18,6 +18,7 @@ extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
unsigned long offset,
unsigned long maxlen);
extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
/* Create a virtual mapping cookie for a port on a given PCI device.
* Do not call this directly, it exists to make it easier for architectures
* to override */
@ -50,6 +51,8 @@ static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
{
return NULL;
}
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
{ }
#endif
#endif /* __ASM_GENERIC_PCI_IOMAP_H */

View File

@ -1477,6 +1477,7 @@ struct task_struct {
mce_whole_page : 1,
__mce_reserved : 62;
struct callback_head mce_kill_me;
int mce_count;
#endif
ANDROID_VENDOR_DATA_ARRAY(1, 64);
ANDROID_OEM_DATA_ARRAY(1, 2);

View File

@ -10194,7 +10194,7 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
return;
if (ifh->nr_file_filters) {
mm = get_task_mm(event->ctx->task);
mm = get_task_mm(task);
if (!mm)
goto restart;

View File

@ -41,6 +41,12 @@
* The risk of writer starvation is there, but the pathological use cases
* which trigger it are not necessarily the typical RT workloads.
*
* Fast-path orderings:
* The lock/unlock of readers can run in fast paths: lock and unlock are only
* atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
* semantics of rwbase_rt. Atomic ops should thus provide _acquire()
* and _release() (or stronger).
*
* Common code shared between RT rw_semaphore and rwlock
*/
@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
* set.
*/
for (r = atomic_read(&rwb->readers); r < 0;) {
/* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
return 1;
}
@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
/*
* rwb->readers can only hit 0 when a writer is waiting for the
* active readers to leave the critical section.
*
* dec_and_test() is fully ordered, provides RELEASE.
*/
if (unlikely(atomic_dec_and_test(&rwb->readers)))
__rwbase_read_unlock(rwb, state);
@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
{
struct rt_mutex_base *rtm = &rwb->rtmutex;
atomic_add(READER_BIAS - bias, &rwb->readers);
/*
* _release() is needed in case that reader is in fast path, pairing
* with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
*/
(void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
rwbase_rtmutex_unlock(rtm);
}
@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
}
static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
{
/* Can do without CAS because we're serialized by wait_lock. */
lockdep_assert_held(&rwb->rtmutex.wait_lock);
/*
* _acquire is needed in case the reader is in the fast path, pairing
* with rwbase_read_unlock(), provides ACQUIRE.
*/
if (!atomic_read_acquire(&rwb->readers)) {
atomic_set(&rwb->readers, WRITER_BIAS);
return 1;
}
return 0;
}
static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
unsigned int state)
{
@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
/*
* set_current_state() for rw_semaphore
* current_save_and_set_rtlock_wait_state() for rwlock
*/
rwbase_set_and_save_current_state(state);
if (__rwbase_write_trylock(rwb))
goto out_unlock;
/* Block until all readers have left the critical section. */
for (; atomic_read(&rwb->readers);) {
rwbase_set_and_save_current_state(state);
for (;;) {
/* Optimized out for rwlocks */
if (rwbase_signal_pending_state(state, current)) {
__set_current_state(TASK_RUNNING);
rwbase_restore_current_state();
__rwbase_write_unlock(rwb, 0, flags);
return -EINTR;
}
if (__rwbase_write_trylock(rwb))
break;
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
/*
* Schedule and wait for the readers to leave the critical
* section. The last reader leaving it wakes the waiter.
*/
if (atomic_read(&rwb->readers) != 0)
rwbase_schedule();
set_current_state(state);
rwbase_schedule();
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
}
atomic_set(&rwb->readers, WRITER_BIAS);
set_current_state(state);
}
rwbase_restore_current_state();
out_unlock:
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 0;
}
@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
if (!atomic_read(&rwb->readers)) {
atomic_set(&rwb->readers, WRITER_BIAS);
if (__rwbase_write_trylock(rwb)) {
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 1;
}

View File

@ -134,4 +134,47 @@ void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
return pci_iomap_wc_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL_GPL(pci_iomap_wc);
/*
* pci_iounmap() somewhat illogically comes from lib/iomap.c for the
* CONFIG_GENERIC_IOMAP case, because that's the code that knows about
* the different IOMAP ranges.
*
* But if the architecture does not use the generic iomap code, and if
* it has _not_ defined it's own private pci_iounmap function, we define
* it here.
*
* NOTE! This default implementation assumes that if the architecture
* support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
* be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
* and does not need unmapping with 'ioport_unmap()'.
*
* If you have different rules for your architecture, you need to
* implement your own pci_iounmap() that knows the rules for where
* and how IO vs MEM get mapped.
*
* This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
* from legacy <asm-generic/io.h> header file behavior. In particular,
* it would seem to make sense to do the iounmap(p) for the non-IO-space
* case here regardless, but that's not what the old header file code
* did. Probably incorrectly, but this is meant to be bug-for-bug
* compatible.
*/
#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
void pci_iounmap(struct pci_dev *dev, void __iomem *p)
{
#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
uintptr_t start = (uintptr_t) PCI_IOBASE;
uintptr_t addr = (uintptr_t) p;
if (addr >= start && addr < start + IO_SPACE_LIMIT)
return;
iounmap(p);
#endif
}
EXPORT_SYMBOL(pci_iounmap);
#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
#endif /* CONFIG_PCI */

View File

@ -29,7 +29,12 @@ CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
else
CLANG_FLAGS += -fintegrated-as
endif
# By default, clang only warns when it encounters an unknown warning flag or
# certain optimization flags it knows it has not implemented.
# Make it behave more like gcc by erroring when these flags are encountered
# so they can be implemented or wrapped in cc-option.
CLANG_FLAGS += -Werror=unknown-warning-option
CLANG_FLAGS += -Werror=ignored-optimization-argument
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_AFLAGS += $(CLANG_FLAGS)
export CLANG_FLAGS

View File

@ -13,7 +13,7 @@
# Stage 2 is handled by this file and does the following
# 1) Find all modules listed in modules.order
# 2) modpost is then used to
# 3) create one <module>.mod.c file pr. module
# 3) create one <module>.mod.c file per module
# 4) create one Module.symvers file with CRC for all exported symbols
# Step 3 is used to place certain information in the module's ELF

View File

@ -34,7 +34,6 @@ REGEX_SOURCE_SYMBOL = re.compile(SOURCE_SYMBOL)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
REGEX_KCONFIG_HELP = re.compile(r"^\s+help\s*$")
REGEX_FILTER_SYMBOLS = re.compile(r"[A-Za-z0-9]$")
REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
REGEX_QUOTES = re.compile("(\"(.*?)\")")
@ -102,6 +101,9 @@ def parse_options():
"continue.")
if args.commit:
if args.commit.startswith('HEAD'):
sys.exit("The --commit option can't use the HEAD ref")
args.find = False
if args.ignore:
@ -432,7 +434,6 @@ def parse_kconfig_file(kfile):
lines = []
defined = []
references = []
skip = False
if not os.path.exists(kfile):
return defined, references
@ -448,12 +449,6 @@ def parse_kconfig_file(kfile):
if REGEX_KCONFIG_DEF.match(line):
symbol_def = REGEX_KCONFIG_DEF.findall(line)
defined.append(symbol_def[0])
skip = False
elif REGEX_KCONFIG_HELP.match(line):
skip = True
elif skip:
# ignore content of help messages
pass
elif REGEX_KCONFIG_STMT.match(line):
line = REGEX_QUOTES.sub("", line)
symbols = get_symbols_in_line(line)

View File

@ -13,6 +13,7 @@ import logging
import os
import re
import subprocess
import sys
_DEFAULT_OUTPUT = 'compile_commands.json'
_DEFAULT_LOG_LEVEL = 'WARNING'

View File

@ -43,7 +43,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
free(evsel);
}
#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
#define FD(e, x, y) ((int *) xyarray__entry(e->fd, x, y))
#define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@ -54,7 +54,10 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
int cpu, thread;
for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) {
FD(evsel, cpu, thread) = -1;
int *fd = FD(evsel, cpu, thread);
if (fd)
*fd = -1;
}
}
}
@ -80,7 +83,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *group_fd)
{
struct perf_evsel *leader = evsel->leader;
int fd;
int *fd;
if (evsel == leader) {
*group_fd = -1;
@ -95,10 +98,10 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread, int *grou
return -ENOTCONN;
fd = FD(leader, cpu, thread);
if (fd == -1)
if (fd == NULL || *fd == -1)
return -EBADF;
*group_fd = fd;
*group_fd = *fd;
return 0;
}
@ -138,7 +141,11 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
for (cpu = 0; cpu < cpus->nr; cpu++) {
for (thread = 0; thread < threads->nr; thread++) {
int fd, group_fd;
int fd, group_fd, *evsel_fd;
evsel_fd = FD(evsel, cpu, thread);
if (evsel_fd == NULL)
return -EINVAL;
err = get_group_fd(evsel, cpu, thread, &group_fd);
if (err < 0)
@ -151,7 +158,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
if (fd < 0)
return -errno;
FD(evsel, cpu, thread) = fd;
*evsel_fd = fd;
}
}
@ -163,9 +170,12 @@ static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
if (FD(evsel, cpu, thread) >= 0)
close(FD(evsel, cpu, thread));
FD(evsel, cpu, thread) = -1;
int *fd = FD(evsel, cpu, thread);
if (fd && *fd >= 0) {
close(*fd);
*fd = -1;
}
}
}
@ -209,13 +219,12 @@ void perf_evsel__munmap(struct perf_evsel *evsel)
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
int fd = FD(evsel, cpu, thread);
struct perf_mmap *map = MMAP(evsel, cpu, thread);
int *fd = FD(evsel, cpu, thread);
if (fd < 0)
if (fd == NULL || *fd < 0)
continue;
perf_mmap__munmap(map);
perf_mmap__munmap(MMAP(evsel, cpu, thread));
}
}
@ -239,15 +248,16 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
int fd = FD(evsel, cpu, thread);
struct perf_mmap *map = MMAP(evsel, cpu, thread);
int *fd = FD(evsel, cpu, thread);
struct perf_mmap *map;
if (fd < 0)
if (fd == NULL || *fd < 0)
continue;
map = MMAP(evsel, cpu, thread);
perf_mmap__init(map, NULL, false, NULL);
ret = perf_mmap__mmap(map, &mp, fd, cpu);
ret = perf_mmap__mmap(map, &mp, *fd, cpu);
if (ret) {
perf_evsel__munmap(evsel);
return ret;
@ -260,7 +270,9 @@ int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
{
if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
int *fd = FD(evsel, cpu, thread);
if (fd == NULL || *fd < 0 || MMAP(evsel, cpu, thread) == NULL)
return NULL;
return MMAP(evsel, cpu, thread)->base;
@ -295,17 +307,18 @@ int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
struct perf_counts_values *count)
{
size_t size = perf_evsel__read_size(evsel);
int *fd = FD(evsel, cpu, thread);
memset(count, 0, sizeof(*count));
if (FD(evsel, cpu, thread) < 0)
if (fd == NULL || *fd < 0)
return -EINVAL;
if (MMAP(evsel, cpu, thread) &&
!perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
return 0;
if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
if (readn(*fd, count->values, size) <= 0)
return -errno;
return 0;
@ -318,8 +331,13 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
int fd = FD(evsel, cpu, thread),
err = ioctl(fd, ioc, arg);
int err;
int *fd = FD(evsel, cpu, thread);
if (fd == NULL || *fd < 0)
return -1;
err = ioctl(*fd, ioc, arg);
if (err)
return err;

View File

@ -368,16 +368,6 @@ static inline int output_type(unsigned int type)
return OUTPUT_TYPE_OTHER;
}
static inline unsigned int attr_type(unsigned int type)
{
switch (type) {
case OUTPUT_TYPE_SYNTH:
return PERF_TYPE_SYNTH;
default:
return type;
}
}
static bool output_set_by_user(void)
{
int j;
@ -556,6 +546,18 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
}
static struct evsel *find_first_output_type(struct evlist *evlist,
unsigned int type)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (output_type(evsel->core.attr.type) == (int)type)
return evsel;
}
return NULL;
}
/*
* verify all user requested events exist and the samples
* have the expected data
@ -567,7 +569,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
struct evsel *evsel;
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
evsel = perf_session__find_first_evtype(session, attr_type(j));
evsel = find_first_output_type(session->evlist, j);
/*
* even if fields is set to 0 (ie., show nothing) event must

View File

@ -757,25 +757,40 @@ void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
}
void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
unsigned int row, bool arrow_down)
unsigned int row, int diff, bool arrow_down)
{
unsigned int end_row;
int end_row;
if (row >= browser->top_idx)
end_row = row - browser->top_idx;
else
if (diff <= 0)
return;
SLsmg_set_char_set(1);
if (arrow_down) {
if (row + diff <= browser->top_idx)
return;
end_row = row + diff - browser->top_idx;
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_write_char(SLSMG_ULCORN_CHAR);
ui_browser__gotorc(browser, end_row, column);
SLsmg_draw_hline(2);
ui_browser__gotorc(browser, end_row + 1, column - 1);
SLsmg_write_char(SLSMG_LTEE_CHAR);
while (--end_row >= 0 && end_row > (int)(row - browser->top_idx)) {
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_draw_vline(1);
}
end_row = (int)(row - browser->top_idx);
if (end_row >= 0) {
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_write_char(SLSMG_ULCORN_CHAR);
ui_browser__gotorc(browser, end_row, column);
SLsmg_draw_hline(2);
}
} else {
if (row < browser->top_idx)
return;
end_row = row - browser->top_idx;
ui_browser__gotorc(browser, end_row, column - 1);
SLsmg_write_char(SLSMG_LTEE_CHAR);
ui_browser__gotorc(browser, end_row, column);

View File

@ -51,7 +51,7 @@ void ui_browser__write_graph(struct ui_browser *browser, int graph);
void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
u64 start, u64 end);
void ui_browser__mark_fused(struct ui_browser *browser, unsigned int column,
unsigned int row, bool arrow_down);
unsigned int row, int diff, bool arrow_down);
void __ui_browser__show_title(struct ui_browser *browser, const char *title);
void ui_browser__show_title(struct ui_browser *browser, const char *title);
int ui_browser__show(struct ui_browser *browser, const char *title,

View File

@ -125,13 +125,20 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
ab->selection = al;
}
static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
{
struct disasm_line *pos = list_prev_entry(cursor, al.node);
const char *name;
int diff = 1;
while (pos && pos->al.offset == -1) {
pos = list_prev_entry(pos, al.node);
if (!ab->opts->hide_src_code)
diff++;
}
if (!pos)
return false;
return 0;
if (ins__is_lock(&pos->ins))
name = pos->ops.locked.ins.name;
@ -139,9 +146,11 @@ static bool is_fused(struct annotate_browser *ab, struct disasm_line *cursor)
name = pos->ins.name;
if (!name || !cursor->ins.name)
return false;
return 0;
return ins__is_fused(ab->arch, name, cursor->ins.name);
if (ins__is_fused(ab->arch, name, cursor->ins.name))
return diff;
return 0;
}
static void annotate_browser__draw_current_jump(struct ui_browser *browser)
@ -155,6 +164,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
struct annotation *notes = symbol__annotation(sym);
u8 pcnt_width = annotation__pcnt_width(notes);
int width;
int diff = 0;
/* PLT symbols contain external offsets */
if (strstr(sym->name, "@plt"))
@ -205,11 +215,11 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
pcnt_width + 2 + notes->widths.addr + width,
from, to);
if (is_fused(ab, cursor)) {
diff = is_fused(ab, cursor);
if (diff > 0) {
ui_browser__mark_fused(browser,
pcnt_width + 3 + notes->widths.addr + width,
from - 1,
to > from);
from - diff, diff, to > from);
}
}

View File

@ -24,7 +24,10 @@
struct btf * __weak btf__load_from_kernel_by_id(__u32 id)
{
struct btf *btf;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int err = btf__get_from_id(id, &btf);
#pragma GCC diagnostic pop
return err ? ERR_PTR(err) : btf;
}

View File

@ -2149,6 +2149,7 @@ static int add_callchain_ip(struct thread *thread,
al.filtered = 0;
al.sym = NULL;
al.srcline = NULL;
if (!cpumode) {
thread__find_cpumode_addr_location(thread, ip, &al);
} else {

View File

@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <ppc-asm.h>
#include <basic_asm.h>
#include <asm/unistd.h>
.text
@ -26,3 +26,38 @@ FUNC_START(getppid_tm_suspended)
1:
li r3, -1
blr
.macro scv level
.long (0x44000001 | (\level) << 5)
.endm
FUNC_START(getppid_scv_tm_active)
PUSH_BASIC_STACK(0)
tbegin.
beq 1f
li r0, __NR_getppid
scv 0
tend.
POP_BASIC_STACK(0)
blr
1:
li r3, -1
POP_BASIC_STACK(0)
blr
FUNC_START(getppid_scv_tm_suspended)
PUSH_BASIC_STACK(0)
tbegin.
beq 1f
li r0, __NR_getppid
tsuspend.
scv 0
tresume.
tend.
POP_BASIC_STACK(0)
blr
1:
li r3, -1
POP_BASIC_STACK(0)
blr

View File

@ -19,23 +19,36 @@
#include "utils.h"
#include "tm.h"
#ifndef PPC_FEATURE2_SCV
#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
#endif
extern int getppid_tm_active(void);
extern int getppid_tm_suspended(void);
extern int getppid_scv_tm_active(void);
extern int getppid_scv_tm_suspended(void);
unsigned retries = 0;
#define TEST_DURATION 10 /* seconds */
pid_t getppid_tm(bool suspend)
pid_t getppid_tm(bool scv, bool suspend)
{
int i;
pid_t pid;
for (i = 0; i < TM_RETRIES; i++) {
if (suspend)
pid = getppid_tm_suspended();
else
pid = getppid_tm_active();
if (suspend) {
if (scv)
pid = getppid_scv_tm_suspended();
else
pid = getppid_tm_suspended();
} else {
if (scv)
pid = getppid_scv_tm_active();
else
pid = getppid_tm_active();
}
if (pid >= 0)
return pid;
@ -82,15 +95,24 @@ int tm_syscall(void)
* Test a syscall within a suspended transaction and verify
* that it succeeds.
*/
FAIL_IF(getppid_tm(true) == -1); /* Should succeed. */
FAIL_IF(getppid_tm(false, true) == -1); /* Should succeed. */
/*
* Test a syscall within an active transaction and verify that
* it fails with the correct failure code.
*/
FAIL_IF(getppid_tm(false) != -1); /* Should fail... */
FAIL_IF(getppid_tm(false, false) != -1); /* Should fail... */
FAIL_IF(!failure_is_persistent()); /* ...persistently... */
FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
/* Now do it all again with scv if it is available. */
if (have_hwcap2(PPC_FEATURE2_SCV)) {
FAIL_IF(getppid_tm(true, true) == -1); /* Should succeed. */
FAIL_IF(getppid_tm(true, false) != -1); /* Should fail... */
FAIL_IF(!failure_is_persistent()); /* ...persistently... */
FAIL_IF(!failure_is_syscall()); /* ...with code syscall. */
}
gettimeofday(&now, 0);
}