Skip to content

Commit

Permalink
Acrux: r4, Natsuki
Browse files Browse the repository at this point in the history
Signed-off-by: nysascape <[email protected]>
  • Loading branch information
nysascape committed Aug 23, 2019
2 parents d8f4808 + 82b3d8c commit 1dfc5e1
Show file tree
Hide file tree
Showing 30 changed files with 273 additions and 59 deletions.
7 changes: 4 additions & 3 deletions Documentation/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2235,6 +2235,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
improves system performance, but it may also
expose users to several CPU vulnerabilities.
Equivalent to: nopti [X86]
nospectre_v1 [X86]
nospectre_v2 [X86]
spectre_v2_user=off [X86]
spec_store_bypass_disable=off [X86]
Expand Down Expand Up @@ -2568,9 +2569,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.

nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.

nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
check bypass). With this option data leaks are possible
in the system.
nospectre_v1 [X86,PPC] Disable mitigations for Spectre Variant 1
(bounds check bypass). With this option data leaks are
possible in the system.

nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
(indirect branch prediction) vulnerability. System may
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 188
SUBLEVEL = 189
EXTRAVERSION =
NAME = Blurry Fish Butt

Expand Down
7 changes: 4 additions & 3 deletions arch/arm64/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,10 @@ extern const char *machine_name;

/* CPU feature register tracking */
enum ftr_type {
FTR_EXACT, /* Use a predefined safe value */
FTR_LOWER_SAFE, /* Smaller value is safe */
FTR_HIGHER_SAFE,/* Bigger value is safe */
FTR_EXACT, /* Use a predefined safe value */
FTR_LOWER_SAFE, /* Smaller value is safe */
FTR_HIGHER_SAFE, /* Bigger value is safe */
FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
};

#define FTR_STRICT true /* SANITY check strict matching required */
Expand Down
14 changes: 10 additions & 4 deletions arch/arm64/kernel/cpufeature.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,10 +139,12 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
};

static struct arm64_ftr_bits ftr_ctr[] = {
U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 30, 1, 0),
U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0), /* CWG */
U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0), /* ERG */
U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
/*
* Linux can handle differing I-cache policies. Userspace JITs will
Expand Down Expand Up @@ -353,6 +355,10 @@ static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur)
case FTR_LOWER_SAFE:
ret = new < cur ? new : cur;
break;
case FTR_HIGHER_OR_ZERO_SAFE:
if (!cur || !new)
break;
/* Fallthrough */
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
Expand Down
19 changes: 19 additions & 0 deletions arch/x86/entry/calling.h
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#include <asm/cpufeatures.h>

/*
x86 function call convention, 64-bit:
Expand Down Expand Up @@ -199,6 +201,23 @@ For 32-bit we have the following conventions - kernel is built with
.byte 0xf1
.endm

/*
* Mitigate Spectre v1 for conditional swapgs code paths.
*
* FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
* prevent a speculative swapgs when coming from kernel space.
*
* FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
* to prevent the swapgs from getting speculatively skipped when coming from
* user space.
*/
.macro FENCE_SWAPGS_USER_ENTRY
ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
.endm
.macro FENCE_SWAPGS_KERNEL_ENTRY
ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
.endm

#else /* CONFIG_X86_64 */

/*
Expand Down
25 changes: 21 additions & 4 deletions arch/x86/entry/entry_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -551,6 +551,7 @@ END(irq_entries_start)
* tracking that we're in kernel mode.
*/
SWAPGS
FENCE_SWAPGS_USER_ENTRY
SWITCH_KERNEL_CR3

/*
Expand All @@ -566,8 +567,10 @@ END(irq_entries_start)
#ifdef CONFIG_CONTEXT_TRACKING
call enter_from_user_mode
#endif

jmp 2f
1:
FENCE_SWAPGS_KERNEL_ENTRY
2:
/*
* Save previous stack pointer, optionally switch to interrupt stack.
* irq_count is used to check if a CPU is already on an interrupt stack
Expand Down Expand Up @@ -1083,6 +1086,13 @@ ENTRY(paranoid_entry)
movq %rax, %cr3
2:
#endif
/*
* The above doesn't do an unconditional CR3 write, even in the PTI
* case. So do an lfence to prevent GS speculation, regardless of
* whether PTI is enabled.
*/
FENCE_SWAPGS_KERNEL_ENTRY

ret
END(paranoid_entry)

Expand Down Expand Up @@ -1139,12 +1149,12 @@ ENTRY(error_entry)
testb $3, CS+8(%rsp)
jz .Lerror_kernelspace

.Lerror_entry_from_usermode_swapgs:
/*
* We entered from user mode or we're pretending to have entered
* from user mode due to an IRET fault.
*/
SWAPGS
FENCE_SWAPGS_USER_ENTRY

.Lerror_entry_from_usermode_after_swapgs:
/*
Expand All @@ -1158,6 +1168,8 @@ ENTRY(error_entry)
#endif
ret

.Lerror_entry_done_lfence:
FENCE_SWAPGS_KERNEL_ENTRY
.Lerror_entry_done:
TRACE_IRQS_OFF
ret
Expand All @@ -1176,14 +1188,16 @@ ENTRY(error_entry)
cmpq %rax, RIP+8(%rsp)
je .Lbstep_iret
cmpq $gs_change, RIP+8(%rsp)
jne .Lerror_entry_done
jne .Lerror_entry_done_lfence

/*
* hack: gs_change can fail with user gsbase. If this happens, fix up
* gsbase and proceed. We'll fix up the exception and land in
* gs_change's error handler with kernel gsbase.
*/
jmp .Lerror_entry_from_usermode_swapgs
SWAPGS
FENCE_SWAPGS_USER_ENTRY
jmp .Lerror_entry_done

.Lbstep_iret:
/* Fix truncated RIP */
Expand All @@ -1196,6 +1210,7 @@ ENTRY(error_entry)
* Switch to kernel gsbase:
*/
SWAPGS
FENCE_SWAPGS_USER_ENTRY

/*
* Pretend that the exception came from user mode: set up pt_regs
Expand Down Expand Up @@ -1292,6 +1307,7 @@ ENTRY(nmi)
* to switch CR3 here.
*/
cld
FENCE_SWAPGS_USER_ENTRY
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
pushq 5*8(%rdx) /* pt_regs->ss */
Expand Down Expand Up @@ -1580,6 +1596,7 @@ end_repeat_nmi:
movq %rax, %cr3
2:
#endif
FENCE_SWAPGS_KERNEL_ENTRY

/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
call do_nmi
Expand Down
10 changes: 6 additions & 4 deletions arch/x86/include/asm/cpufeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -192,17 +192,17 @@
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */

#define X86_FEATURE_FENCE_SWAPGS_USER ( 7*32+10) /* "" LFENCE in user entry SWAPGS path */
#define X86_FEATURE_FENCE_SWAPGS_KERNEL ( 7*32+11) /* "" LFENCE in kernel entry SWAPGS path */

#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */

#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */

#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */

/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */

#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
Expand All @@ -215,6 +215,7 @@
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */

/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
Expand Down Expand Up @@ -338,5 +339,6 @@
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */

#endif /* _ASM_X86_CPUFEATURES_H */
105 changes: 96 additions & 9 deletions arch/x86/kernel/cpu/bugs.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include <asm/intel-family.h>
#include <asm/e820.h>

static void __init spectre_v1_select_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
Expand Down Expand Up @@ -87,17 +88,11 @@ void __init check_bugs(void)
if (boot_cpu_has(X86_FEATURE_STIBP))
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;

/* Select the proper spectre mitigation before patching alternatives */
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();

/*
* Select proper mitigation for any exposure to the Speculative Store
* Bypass vulnerability.
*/
ssb_select_mitigation();

l1tf_select_mitigation();

mds_select_mitigation();

arch_smt_update();
Expand Down Expand Up @@ -251,6 +246,98 @@ static int __init mds_cmdline(char *str)
}
early_param("mds", mds_cmdline);

#undef pr_fmt
#define pr_fmt(fmt) "Spectre V1 : " fmt

enum spectre_v1_mitigation {
SPECTRE_V1_MITIGATION_NONE,
SPECTRE_V1_MITIGATION_AUTO,
};

static enum spectre_v1_mitigation spectre_v1_mitigation =
SPECTRE_V1_MITIGATION_AUTO;

static const char * const spectre_v1_strings[] = {
[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
[SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
};

/*
* Does SMAP provide full mitigation against speculative kernel access to
* userspace?
*/
static bool smap_works_speculatively(void)
{
if (!boot_cpu_has(X86_FEATURE_SMAP))
return false;

/*
* On CPUs which are vulnerable to Meltdown, SMAP does not
* prevent speculative access to user data in the L1 cache.
* Consider SMAP to be non-functional as a mitigation on these
* CPUs.
*/
if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
return false;

return true;
}

static void __init spectre_v1_select_mitigation(void)
{
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
return;
}

if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
/*
* With Spectre v1, a user can speculatively control either
* path of a conditional swapgs with a user-controlled GS
* value. The mitigation is to add lfences to both code paths.
*
* If FSGSBASE is enabled, the user can put a kernel address in
* GS, in which case SMAP provides no protection.
*
* [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
* FSGSBASE enablement patches have been merged. ]
*
* If FSGSBASE is disabled, the user can only put a user space
* address in GS. That makes an attack harder, but still
* possible if there's no SMAP protection.
*/
if (!smap_works_speculatively()) {
/*
* Mitigation can be provided from SWAPGS itself or
* PTI as the CR3 write in the Meltdown mitigation
* is serializing.
*
* If neither is there, mitigate with an LFENCE to
* stop speculation through swapgs.
*/
if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
!boot_cpu_has(X86_FEATURE_KAISER))
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);

/*
* Enable lfences in the kernel entry (non-swapgs)
* paths, to prevent user entry from speculatively
* skipping swapgs.
*/
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
}
}

pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
}

static int __init nospectre_v1_cmdline(char *str)
{
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
return 0;
}
early_param("nospectre_v1", nospectre_v1_cmdline);

#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt

Expand Down Expand Up @@ -1154,7 +1241,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
break;

case X86_BUG_SPECTRE_V1:
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);

case X86_BUG_SPECTRE_V2:
return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
Expand Down
Loading

0 comments on commit 1dfc5e1

Please sign in to comment.