mirror of
https://github.com/torvalds/linux.git
synced 2025-12-07 20:06:24 +00:00
Patch series "kasan: unify kasan_enabled() and remove arch-specific
implementations", v6.
This patch series addresses the fragmentation in KASAN initialization
across architectures by introducing a unified approach that eliminates
duplicate static keys and arch-specific kasan_arch_is_ready()
implementations.
The core issue is that different architectures have inconsistent approaches
to KASAN readiness tracking:
- PowerPC, LoongArch, and UML arch, each implement own kasan_arch_is_ready()
- Only HW_TAGS mode had a unified static key (kasan_flag_enabled)
- Generic and SW_TAGS modes relied on arch-specific solutions
or always-on behavior
This patch (of 2):
Introduce CONFIG_ARCH_DEFER_KASAN to identify architectures [1] that need
to defer KASAN initialization until shadow memory is properly set up, and
unify the static key infrastructure across all KASAN modes.
[1] PowerPC, UML, LoongArch selects ARCH_DEFER_KASAN.
The core issue is that different architectures haveinconsistent approaches
to KASAN readiness tracking:
- PowerPC, LoongArch, and UML arch, each implement own
kasan_arch_is_ready()
- Only HW_TAGS mode had a unified static key (kasan_flag_enabled)
- Generic and SW_TAGS modes relied on arch-specific solutions or always-on
behavior
This patch addresses the fragmentation in KASAN initialization across
architectures by introducing a unified approach that eliminates duplicate
static keys and arch-specific kasan_arch_is_ready() implementations.
Let's replace kasan_arch_is_ready() with existing kasan_enabled() check,
which examines the static key being enabled if arch selects
ARCH_DEFER_KASAN or has HW_TAGS mode support. For other arch,
kasan_enabled() checks the enablement during compile time.
Now KASAN users can use a single kasan_enabled() check everywhere.
Link: https://lkml.kernel.org/r/20250810125746.1105476-1-snovitoll@gmail.com
Link: https://lkml.kernel.org/r/20250810125746.1105476-2-snovitoll@gmail.com
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217049
Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> #powerpc
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Alexandre Ghiti <alexghiti@rivosinc.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: David Gow <davidgow@google.com>
Cc: Dmitriy Vyukov <dvyukov@google.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@loongson.cn>
Cc: Marco Elver <elver@google.com>
Cc: Qing Zhang <zhangqing@loongson.cn>
Cc: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
88 lines
3.2 KiB
C
88 lines
3.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_KASAN_H
|
|
#define __ASM_KASAN_H
|
|
|
|
#ifndef __ASSEMBLER__
|
|
|
|
#include <linux/linkage.h>
|
|
#include <linux/mmzone.h>
|
|
#include <asm/addrspace.h>
|
|
#include <asm/io.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
#define KASAN_SHADOW_SCALE_SHIFT 3
|
|
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
|
|
|
|
#define XRANGE_SHIFT (48)
|
|
|
|
/* Valid address length */
|
|
#define XRANGE_SHADOW_SHIFT min(cpu_vabits, VA_BITS)
|
|
/* Used for taking out the valid address */
|
|
#define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
|
|
/* One segment whole address space size */
|
|
#define XRANGE_SIZE (XRANGE_SHADOW_MASK + 1)
|
|
|
|
/* 64-bit segment value. */
|
|
#define XKPRANGE_UC_SEG (0x8000)
|
|
#define XKPRANGE_CC_SEG (0x9000)
|
|
#define XKPRANGE_WC_SEG (0xa000)
|
|
#define XKVRANGE_VC_SEG (0xffff)
|
|
|
|
/* Cached */
|
|
#define XKPRANGE_CC_START CACHE_BASE
|
|
#define XKPRANGE_CC_SIZE XRANGE_SIZE
|
|
#define XKPRANGE_CC_KASAN_OFFSET (0)
|
|
#define XKPRANGE_CC_SHADOW_SIZE (XKPRANGE_CC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
|
|
#define XKPRANGE_CC_SHADOW_END (XKPRANGE_CC_KASAN_OFFSET + XKPRANGE_CC_SHADOW_SIZE)
|
|
|
|
/* UnCached */
|
|
#define XKPRANGE_UC_START UNCACHE_BASE
|
|
#define XKPRANGE_UC_SIZE XRANGE_SIZE
|
|
#define XKPRANGE_UC_KASAN_OFFSET XKPRANGE_CC_SHADOW_END
|
|
#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
|
|
#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
|
|
|
|
/* WriteCombine */
|
|
#define XKPRANGE_WC_START WRITECOMBINE_BASE
|
|
#define XKPRANGE_WC_SIZE XRANGE_SIZE
|
|
#define XKPRANGE_WC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
|
|
#define XKPRANGE_WC_SHADOW_SIZE (XKPRANGE_WC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
|
|
#define XKPRANGE_WC_SHADOW_END (XKPRANGE_WC_KASAN_OFFSET + XKPRANGE_WC_SHADOW_SIZE)
|
|
|
|
/* VMALLOC (Cached or UnCached) */
|
|
#define XKVRANGE_VC_START MODULES_VADDR
|
|
#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
|
|
#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_WC_SHADOW_END
|
|
#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
|
|
#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
|
|
|
|
/* KAsan shadow memory start right after vmalloc. */
|
|
#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
|
|
#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
|
|
#define KASAN_SHADOW_END (round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) - 1)
|
|
|
|
#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
|
|
#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
|
|
#define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET)
|
|
#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
|
|
|
|
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
|
|
|
|
#define kasan_mem_to_shadow kasan_mem_to_shadow
|
|
void *kasan_mem_to_shadow(const void *addr);
|
|
|
|
#define kasan_shadow_to_mem kasan_shadow_to_mem
|
|
const void *kasan_shadow_to_mem(const void *shadow_addr);
|
|
|
|
#define addr_has_metadata addr_has_metadata
|
|
static __always_inline bool addr_has_metadata(const void *addr)
|
|
{
|
|
return (kasan_mem_to_shadow((void *)addr) != NULL);
|
|
}
|
|
|
|
void kasan_init(void);
|
|
asmlinkage void kasan_early_init(void);
|
|
|
|
#endif
|
|
#endif
|