Skip to content

Commit

Permalink
Merge tag 'v5.17.7' into 5.17
Browse files Browse the repository at this point in the history
This is the 5.17.7 stable release

* tag 'v5.17.7': (141 commits)
  Linux 5.17.7
  PCI: aardvark: Update comment about link going down after link-up
  PCI: aardvark: Drop __maybe_unused from advk_pcie_disable_phy()
  PCI: aardvark: Don't mask irq when mapping
  PCI: aardvark: Remove irq_mask_ack() callback for INTx interrupts
  PCI: aardvark: Use separate INTA interrupt for emulated root bridge
  PCI: aardvark: Fix support for PME requester on emulated bridge
  PCI: aardvark: Add support for PME interrupts
  PCI: aardvark: Optimize writing PCI_EXP_RTCTL_PMEIE and PCI_EXP_RTSTA_PME on emulated bridge
  PCI: aardvark: Add support for ERR interrupt on emulated bridge
  PCI: aardvark: Enable MSI-X support
  PCI: aardvark: Fix setting MSI address
  PCI: aardvark: Add support for masking MSI interrupts
  PCI: aardvark: Refactor unmasking summary MSI interrupt
  PCI: aardvark: Use dev_fwnode() instead of of_node_to_fwnode(dev->of_node)
  PCI: aardvark: Make msi_domain_info structure a static driver structure
  PCI: aardvark: Make MSI irq_chip structures static driver structures
  PCI: aardvark: Check return value of generic_handle_domain_irq() when processing INTx IRQ
  PCI: aardvark: Rewrite IRQ code to chained IRQ handler
  PCI: aardvark: Replace custom PCIE_CORE_INT_* macros with PCI_INTERRUPT_*
  ...

Signed-off-by: KenHV <[email protected]>
  • Loading branch information
KenHV committed May 14, 2022
2 parents dfccb38 + 7651bb7 commit 96f252f
Show file tree
Hide file tree
Showing 121 changed files with 1,273 additions and 662 deletions.
3 changes: 0 additions & 3 deletions Documentation/devicetree/bindings/pci/apple,pcie.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,6 @@ examples:
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
reset-gpios = <&pinctrl_ap 152 0>;
max-link-speed = <2>;
#address-cells = <3>;
#size-cells = <2>;
Expand All @@ -153,7 +152,6 @@ examples:
device_type = "pci";
reg = <0x800 0x0 0x0 0x0 0x0>;
reset-gpios = <&pinctrl_ap 153 0>;
max-link-speed = <2>;
#address-cells = <3>;
#size-cells = <2>;
Expand All @@ -164,7 +162,6 @@ examples:
device_type = "pci";
reg = <0x1000 0x0 0x0 0x0 0x0>;
reset-gpios = <&pinctrl_ap 33 0>;
max-link-speed = <1>;
#address-cells = <3>;
#size-cells = <2>;
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 17
SUBLEVEL = 6
SUBLEVEL = 7
EXTRAVERSION = -Kensur
NAME = Superb Owl

Expand Down
8 changes: 4 additions & 4 deletions arch/mips/include/asm/timex.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@
typedef unsigned int cycles_t;

/*
* On R4000/R4400 before version 5.0 an erratum exists such that if the
* cycle counter is read in the exact moment that it is matching the
* compare register, no interrupt will be generated.
* On R4000/R4400 an erratum exists such that if the cycle counter is
* read in the exact moment that it is matching the compare register,
* no interrupt will be generated.
*
* There is a suggested workaround and also the erratum can't strike if
* the compare interrupt isn't being used as the clock source device.
Expand All @@ -63,7 +63,7 @@ static inline int can_use_mips_counter(unsigned int prid)
if (!__builtin_constant_p(cpu_has_counter))
asm volatile("" : "=m" (cpu_data[0].options));
if (likely(cpu_has_counter &&
prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15))))
return 1;
else
return 0;
Expand Down
11 changes: 3 additions & 8 deletions arch/mips/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void)
case CPU_R4400MC:
/*
* The published errata for the R4400 up to 3.0 say the CPU
* has the mfc0 from count bug.
* has the mfc0 from count bug. This seems the last version
* produced.
*/
if ((current_cpu_data.processor_id & 0xff) <= 0x30)
return 1;

/*
* we assume newer revisions are ok
*/
return 0;
return 1;
}

return 0;
Expand Down
3 changes: 1 addition & 2 deletions arch/parisc/kernel/processor.c
Original file line number Diff line number Diff line change
Expand Up @@ -418,8 +418,7 @@ show_cpuinfo (struct seq_file *m, void *v)
}
seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);

seq_printf(m, "model\t\t: %s\n"
"model name\t: %s\n",
seq_printf(m, "model\t\t: %s - %s\n",
boot_cpu_data.pdc.sys_model_name,
cpuinfo->dev ?
cpuinfo->dev->name : "Unknown");
Expand Down
2 changes: 2 additions & 0 deletions arch/parisc/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,8 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_PA11
dma_ops_init();
#endif

clear_sched_clock_stable();
}

/*
Expand Down
6 changes: 1 addition & 5 deletions arch/parisc/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -249,13 +249,9 @@ void __init time_init(void)
static int __init init_cr16_clocksource(void)
{
/*
* The cr16 interval timers are not syncronized across CPUs, even if
* they share the same socket.
* The cr16 interval timers are not synchronized across CPUs.
*/
if (num_online_cpus() > 1 && !running_on_qemu) {
/* mark sched_clock unstable */
clear_sched_clock_stable();

clocksource_cr16.name = "cr16_unstable";
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
clocksource_cr16.rating = 0;
Expand Down
21 changes: 19 additions & 2 deletions arch/riscv/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,8 +206,25 @@ static void __init setup_bootmem(void)
* early_init_fdt_reserve_self() since __pa() does
* not work for DTB pointers that are fixmap addresses
*/
if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) {
/*
* In case the DTB is not located in a memory region we won't
* be able to locate it later on via the linear mapping and
* get a segfault when accessing it via __va(dtb_early_pa).
* To avoid this situation copy DTB to a memory region.
* Note that memblock_phys_alloc will also reserve DTB region.
*/
if (!memblock_is_memory(dtb_early_pa)) {
size_t fdt_size = fdt_totalsize(dtb_early_va);
phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE);
void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size);

memcpy(new_dtb_early_va, dtb_early_va, fdt_size);
early_memunmap(new_dtb_early_va, fdt_size);
_dtb_early_pa = new_dtb_early_pa;
} else
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
}

early_init_fdt_scan_reserved_mem();
dma_contiguous_reserve(dma32_phys_limit);
Expand Down
67 changes: 26 additions & 41 deletions arch/x86/kernel/fpu/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,60 +41,45 @@ struct fpu_state_config fpu_user_cfg __ro_after_init;
*/
struct fpstate init_fpstate __ro_after_init;

/*
* Track whether the kernel is using the FPU state
* currently.
*
* This flag is used:
*
* - by IRQ context code to potentially use the FPU
* if it's unused.
*
* - to debug kernel_fpu_begin()/end() correctness
*/
/* Track in-kernel FPU usage */
static DEFINE_PER_CPU(bool, in_kernel_fpu);

/*
* Track which context is using the FPU on the CPU:
*/
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);

static bool kernel_fpu_disabled(void)
{
return this_cpu_read(in_kernel_fpu);
}

static bool interrupted_kernel_fpu_idle(void)
{
return !kernel_fpu_disabled();
}

/*
* Were we in user mode (or vm86 mode) when we were
* interrupted?
*
* Doing kernel_fpu_begin/end() is ok if we are running
* in an interrupt context from user mode - we'll just
* save the FPU state as required.
*/
static bool interrupted_user_mode(void)
{
struct pt_regs *regs = get_irq_regs();
return regs && user_mode(regs);
}

/*
* Can we use the FPU in kernel mode with the
* whole "kernel_fpu_begin/end()" sequence?
*
* It's always ok in process context (ie "not interrupt")
* but it is sometimes ok even from an irq.
*/
bool irq_fpu_usable(void)
{
return !in_interrupt() ||
interrupted_user_mode() ||
interrupted_kernel_fpu_idle();
if (WARN_ON_ONCE(in_nmi()))
return false;

/* In kernel FPU usage already active? */
if (this_cpu_read(in_kernel_fpu))
return false;

/*
* When not in NMI or hard interrupt context, FPU can be used in:
*
* - Task context except from within fpregs_lock()'ed critical
* regions.
*
* - Soft interrupt processing context which cannot happen
* while in a fpregs_lock()'ed critical region.
*/
if (!in_hardirq())
return true;

/*
* In hard interrupt context it's safe when soft interrupts
* are enabled, which means the interrupt did not hit in
* a fpregs_lock()'ed critical region.
*/
return !softirq_count();
}
EXPORT_SYMBOL(irq_fpu_usable);

Expand Down
13 changes: 13 additions & 0 deletions arch/x86/kernel/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align
DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
static int has_steal_clock = 0;

static int has_guest_poll = 0;
/*
* No need for any "IO delay" on KVM
*/
Expand Down Expand Up @@ -706,14 +707,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu)

static int kvm_suspend(void)
{
u64 val = 0;

kvm_guest_cpu_offline(false);

#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
rdmsrl(MSR_KVM_POLL_CONTROL, val);
has_guest_poll = !(val & 1);
#endif
return 0;
}

static void kvm_resume(void)
{
kvm_cpu_online(raw_smp_processor_id());

#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
wrmsrl(MSR_KVM_POLL_CONTROL, 0);
#endif
}

static struct syscore_ops kvm_syscore_ops = {
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/kvm/cpuid.c
Original file line number Diff line number Diff line change
Expand Up @@ -865,6 +865,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
union cpuid10_eax eax;
union cpuid10_edx edx;

if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
}

perf_get_x86_pmu_capability(&cap);

/*
Expand Down
10 changes: 5 additions & 5 deletions arch/x86/kvm/lapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,8 @@ static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)

static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
{
return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
(kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
}

bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -2125,10 +2126,9 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
break;

case APIC_SELF_IPI:
if (apic_x2apic_mode(apic)) {
kvm_lapic_reg_write(apic, APIC_ICR,
APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
} else
if (apic_x2apic_mode(apic))
kvm_apic_send_ipi(apic, APIC_DEST_SELF | (val & APIC_VECTOR_MASK), 0);
else
ret = 1;
break;
default:
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -3239,6 +3239,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
return;

sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
if (WARN_ON(!sp))
return;

if (is_tdp_mmu_page(sp))
kvm_tdp_mmu_put_root(kvm, sp, false);
Expand Down
28 changes: 25 additions & 3 deletions arch/x86/kvm/svm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};

/* duplicated from amd_f17h_perfmon_event_map. */
static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
[7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};

/* amd_pmc_perf_hw_id depends on these being the same size */
static_assert(ARRAY_SIZE(amd_event_mapping) ==
ARRAY_SIZE(amd_f17h_event_mapping));

static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
Expand Down Expand Up @@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,

static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
{
struct kvm_event_hw_type_mapping *event_mapping;
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
Expand All @@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
if (WARN_ON(pmc_is_fixed(pmc)))
return PERF_COUNT_HW_MAX;

if (guest_cpuid_family(pmc->vcpu) >= 0x17)
event_mapping = amd_f17h_event_mapping;
else
event_mapping = amd_event_mapping;

for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
if (amd_event_mapping[i].eventsel == event_select
&& amd_event_mapping[i].unit_mask == unit_mask)
if (event_mapping[i].eventsel == event_select
&& event_mapping[i].unit_mask == unit_mask)
break;

if (i == ARRAY_SIZE(amd_event_mapping))
return PERF_COUNT_HW_MAX;

return amd_event_mapping[i].event_type;
return event_mapping[i].event_type;
}

/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
Expand Down
Loading

0 comments on commit 96f252f

Please sign in to comment.