Files
lkl_linux/include/linux/init.h
Sami Tolvanen cf68fffb66 add support for Clang CFI
This change adds support for Clang’s forward-edge Control Flow
Integrity (CFI) checking. With CONFIG_CFI_CLANG, the compiler
injects a runtime check before each indirect function call to ensure
the target is a valid function with the correct static type. This
restricts possible call targets and makes it more difficult for
an attacker to exploit bugs that allow the modification of stored
function pointers. For more details, see:

  https://clang.llvm.org/docs/ControlFlowIntegrity.html

Clang requires CONFIG_LTO_CLANG to be enabled with CFI to gain
visibility to possible call targets. Kernel modules are supported
with Clang’s cross-DSO CFI mode, which allows checking between
independently compiled components.

With CFI enabled, the compiler injects a __cfi_check() function into
the kernel and each module for validating local call targets. For
cross-module calls that cannot be validated locally, the compiler
calls the global __cfi_slowpath_diag() function, which determines
the target module and calls the correct __cfi_check() function. This
patch includes a slowpath implementation that uses __module_address()
to resolve call targets, and with CONFIG_CFI_CLANG_SHADOW enabled, a
shadow map that speeds up module look-ups by ~3x.

Clang implements indirect call checking using jump tables and
offers two methods of generating them. With canonical jump tables,
the compiler renames each address-taken function to <function>.cfi
and points the original symbol to a jump table entry, which passes
__cfi_check() validation. This isn’t compatible with stand-alone
assembly code, which the compiler doesn’t instrument, and would
result in indirect calls to assembly code to fail. Therefore, we
default to using non-canonical jump tables instead, where the compiler
generates a local jump table entry <function>.cfi_jt for each
address-taken function, and replaces all references to the function
with the address of the jump table entry.

Note that because non-canonical jump table addresses are local
to each component, they break cross-module function address
equality. Specifically, the address of a global function will be
different in each module, as it's replaced with the address of a local
jump table entry. If this address is passed to a different module,
it won’t match the address of the same function taken there. This
may break code that relies on comparing addresses passed from other
components.

CFI checking can be disabled in a function with the __nocfi attribute.
Additionally, CFI can be disabled for an entire compilation unit by
filtering out CC_FLAGS_CFI.

By default, CFI failures result in a kernel panic to stop a potential
exploit. CONFIG_CFI_PERMISSIVE enables a permissive mode, where the
kernel prints out a rate-limited warning instead, and allows execution
to continue. This option is helpful for locating type mismatches, but
should only be enabled during development.

Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Tested-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20210408182843.1754385-2-samitolvanen@google.com
2021-04-08 16:04:20 -07:00

371 lines
11 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_INIT_H
#define _LINUX_INIT_H
#include <linux/compiler.h>
#include <linux/types.h>
/* Built-in __init functions needn't be compiled with retpoline */
#if defined(__noretpoline) && !defined(MODULE)
#define __noinitretpoline __noretpoline
#else
#define __noinitretpoline
#endif
/* These macros are used to mark some functions or
* initialized data (doesn't apply to uninitialized data)
* as `initialization' functions. The kernel can take this
* as hint that the function is used only during the initialization
* phase and free up used memory resources after
*
* Usage:
* For functions:
*
* You should add __init immediately before the function name, like:
*
* static void __init initme(int x, int y)
* {
* extern int z; z = x * y;
* }
*
* If the function has a prototype somewhere, you can also add
* __init between closing brace of the prototype and semicolon:
*
* extern int initialize_foobar_device(int, int, int) __init;
*
* For initialized data:
* You should insert __initdata or __initconst between the variable name
* and equal sign followed by value, e.g.:
*
* static int init_variable __initdata = 0;
* static const char linux_logo[] __initconst = { 0x32, 0x36, ... };
*
* Don't forget to initialize data not at file scope, i.e. within a function,
* as gcc otherwise puts the data into the bss section and not into the init
* section.
*/
/* These are for everybody (although not all archs will actually
discard it in modules) */
#define __init __section(".init.text") __cold __latent_entropy __noinitretpoline __nocfi
#define __initdata __section(".init.data")
#define __initconst __section(".init.rodata")
#define __exitdata __section(".exit.data")
#define __exit_call __used __section(".exitcall.exit")
/*
* modpost check for section mismatches during the kernel build.
* A section mismatch happens when there are references from a
* code or data section to an init section (both code or data).
* The init sections are (for most archs) discarded by the kernel
* when early init has completed so all such references are potential bugs.
* For exit sections the same issue exists.
*
* The following markers are used for the cases where the reference to
* the *init / *exit section (code or data) is valid and will teach
* modpost not to issue a warning. Intended semantics is that a code or
* data tagged __ref* can reference code or data from init section without
* producing a warning (of course, no warning does not mean code is
* correct, so optimally document why the __ref is needed and why it's OK).
*
* The markers follow same syntax rules as __init / __initdata.
*/
#define __ref __section(".ref.text") noinline
#define __refdata __section(".ref.data")
#define __refconst __section(".ref.rodata")
#ifdef MODULE
#define __exitused
#else
#define __exitused __used
#endif
#define __exit __section(".exit.text") __exitused __cold notrace
/* Used for MEMORY_HOTPLUG */
#define __meminit __section(".meminit.text") __cold notrace \
__latent_entropy
#define __meminitdata __section(".meminit.data")
#define __meminitconst __section(".meminit.rodata")
#define __memexit __section(".memexit.text") __exitused __cold notrace
#define __memexitdata __section(".memexit.data")
#define __memexitconst __section(".memexit.rodata")
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
#define __INIT .section ".init.text","ax"
#define __FINIT .previous
#define __INITDATA .section ".init.data","aw",%progbits
#define __INITRODATA .section ".init.rodata","a",%progbits
#define __FINITDATA .previous
#define __MEMINIT .section ".meminit.text", "ax"
#define __MEMINITDATA .section ".meminit.data", "aw"
#define __MEMINITRODATA .section ".meminit.rodata", "a"
/* silence warnings when references are OK */
#define __REF .section ".ref.text", "ax"
#define __REFDATA .section ".ref.data", "aw"
#define __REFCONST .section ".ref.rodata", "a"
#ifndef __ASSEMBLY__
/*
* Used for initialization calls..
*/
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
typedef int initcall_entry_t;
static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
{
return offset_to_ptr(entry);
}
#else
typedef initcall_t initcall_entry_t;
static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
{
return *entry;
}
#endif
extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
/* Used for contructor calls. */
typedef void (*ctor_fn_t)(void);
struct file_system_type;
/* Defined in init/main.c */
extern int do_one_initcall(initcall_t fn);
extern char __initdata boot_command_line[];
extern char *saved_command_line;
extern unsigned int reset_devices;
/* used by init/main.c */
void setup_arch(char **);
void prepare_namespace(void);
void __init init_rootfs(void);
extern struct file_system_type rootfs_fs_type;
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
extern bool rodata_enabled;
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void);
#endif
extern void (*late_time_init)(void);
extern bool initcall_debug;
#endif
#ifndef MODULE
#ifndef __ASSEMBLY__
/*
* initcalls are now grouped by functionality into separate
* subsections. Ordering inside the subsections is determined
* by link order.
* For backwards compatibility, initcall() puts the call in
* the device init subsection.
*
* The `id' arg to __define_initcall() is needed so that multiple initcalls
* can point at the same handler without causing duplicate-symbol build errors.
*
* Initcalls are run by placing pointers in initcall sections that the
* kernel iterates at runtime. The linker can do dead code / data elimination
* and remove that completely, so the initcall sections have to be marked
* as KEEP() in the linker script.
*/
/* Format: <modname>__<counter>_<line>_<fn> */
#define __initcall_id(fn) \
__PASTE(__KBUILD_MODNAME, \
__PASTE(__, \
__PASTE(__COUNTER__, \
__PASTE(_, \
__PASTE(__LINE__, \
__PASTE(_, fn))))))
/* Format: __<prefix>__<iid><id> */
#define __initcall_name(prefix, __iid, id) \
__PASTE(__, \
__PASTE(prefix, \
__PASTE(__, \
__PASTE(__iid, id))))
#ifdef CONFIG_LTO_CLANG
/*
* With LTO, the compiler doesn't necessarily obey link order for
* initcalls. In order to preserve the correct order, we add each
* variable into its own section and generate a linker script (in
* scripts/link-vmlinux.sh) to specify the order of the sections.
*/
#define __initcall_section(__sec, __iid) \
#__sec ".init.." #__iid
/*
* With LTO, the compiler can rename static functions to avoid
* global naming collisions. We use a global stub function for
* initcalls to create a stable symbol name whose address can be
* taken in inline assembly when PREL32 relocations are used.
*/
#define __initcall_stub(fn, __iid, id) \
__initcall_name(initstub, __iid, id)
#define __define_initcall_stub(__stub, fn) \
int __init __stub(void); \
int __init __stub(void) \
{ \
return fn(); \
} \
__ADDRESSABLE(__stub)
#else
#define __initcall_section(__sec, __iid) \
#__sec ".init"
#define __initcall_stub(fn, __iid, id) fn
#define __define_initcall_stub(__stub, fn) \
__ADDRESSABLE(fn)
#endif
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
#define ____define_initcall(fn, __stub, __name, __sec) \
__define_initcall_stub(__stub, fn) \
asm(".section \"" __sec "\", \"a\" \n" \
__stringify(__name) ": \n" \
".long " __stringify(__stub) " - . \n" \
".previous \n");
#else
#define ____define_initcall(fn, __unused, __name, __sec) \
static initcall_t __name __used \
__attribute__((__section__(__sec))) = fn;
#endif
#define __unique_initcall(fn, id, __sec, __iid) \
____define_initcall(fn, \
__initcall_stub(fn, __iid, id), \
__initcall_name(initcall, __iid, id), \
__initcall_section(__sec, __iid))
#define ___define_initcall(fn, id, __sec) \
__unique_initcall(fn, id, __sec, __initcall_id(fn))
#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id)
/*
* Early initcalls run before initializing SMP.
*
* Only for built-in code, not modules.
*/
#define early_initcall(fn) __define_initcall(fn, early)
/*
* A "pure" initcall has no dependencies on anything else, and purely
* initializes variables that couldn't be statically initialized.
*
* This only exists for built-in code, not for modules.
* Keep main.c:initcall_level_names[] in sync.
*/
#define pure_initcall(fn) __define_initcall(fn, 0)
#define core_initcall(fn) __define_initcall(fn, 1)
#define core_initcall_sync(fn) __define_initcall(fn, 1s)
#define postcore_initcall(fn) __define_initcall(fn, 2)
#define postcore_initcall_sync(fn) __define_initcall(fn, 2s)
#define arch_initcall(fn) __define_initcall(fn, 3)
#define arch_initcall_sync(fn) __define_initcall(fn, 3s)
#define subsys_initcall(fn) __define_initcall(fn, 4)
#define subsys_initcall_sync(fn) __define_initcall(fn, 4s)
#define fs_initcall(fn) __define_initcall(fn, 5)
#define fs_initcall_sync(fn) __define_initcall(fn, 5s)
#define rootfs_initcall(fn) __define_initcall(fn, rootfs)
#define device_initcall(fn) __define_initcall(fn, 6)
#define device_initcall_sync(fn) __define_initcall(fn, 6s)
#define late_initcall(fn) __define_initcall(fn, 7)
#define late_initcall_sync(fn) __define_initcall(fn, 7s)
#define __initcall(fn) device_initcall(fn)
#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
#define console_initcall(fn) ___define_initcall(fn, con, .con_initcall)
struct obs_kernel_param {
const char *str;
int (*setup_func)(char *);
int early;
};
/*
* Only for really core code. See moduleparam.h for the normal way.
*
* Force the alignment so the compiler doesn't space elements of the
* obs_kernel_param "array" too far apart in .init.setup.
*/
#define __setup_param(str, unique_id, fn, early) \
static const char __setup_str_##unique_id[] __initconst \
__aligned(1) = str; \
static struct obs_kernel_param __setup_##unique_id \
__used __section(".init.setup") \
__aligned(__alignof__(struct obs_kernel_param)) \
= { __setup_str_##unique_id, fn, early }
#define __setup(str, fn) \
__setup_param(str, fn, fn, 0)
/*
* NOTE: fn is as per module_param, not __setup!
* Emits warning if fn returns non-zero.
*/
#define early_param(str, fn) \
__setup_param(str, fn, fn, 1)
#define early_param_on_off(str_on, str_off, var, config) \
\
int var = IS_ENABLED(config); \
\
static int __init parse_##var##_on(char *arg) \
{ \
var = 1; \
return 0; \
} \
early_param(str_on, parse_##var##_on); \
\
static int __init parse_##var##_off(char *arg) \
{ \
var = 0; \
return 0; \
} \
early_param(str_off, parse_##var##_off)
/* Relies on boot_command_line being set */
void __init parse_early_param(void);
void __init parse_early_options(char *cmdline);
#endif /* __ASSEMBLY__ */
#else /* MODULE */
#define __setup_param(str, unique_id, fn) /* nothing */
#define __setup(str, func) /* nothing */
#endif
/* Data marked not to be saved by software suspend */
#define __nosavedata __section(".data..nosave")
#ifdef MODULE
#define __exit_p(x) x
#else
#define __exit_p(x) NULL
#endif
#endif /* _LINUX_INIT_H */