mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
mm: kmsan: maintain KMSAN metadata for page operations
Insert KMSAN hooks that make the necessary bookkeeping changes: - poison page shadow and origins in alloc_pages()/free_page(); - clear page shadow and origins in clear_page(), copy_user_highpage(); - copy page metadata in copy_highpage(), wp_page_copy(); - handle vmap()/vunmap()/iounmap(); Link: https://lkml.kernel.org/r/20220915150417.722975-15-glider@google.com Signed-off-by: Alexander Potapenko <glider@google.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrey Konovalov <andreyknvl@gmail.com> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Eric Biggers <ebiggers@google.com> Cc: Eric Biggers <ebiggers@kernel.org> Cc: Eric Dumazet <edumazet@google.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Ilya Leoshkevich <iii@linux.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Kees Cook <keescook@chromium.org> Cc: Marco Elver <elver@google.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Michael S. Tsirkin <mst@redhat.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
d596b04f59
commit
b073d7f8ae
145
include/linux/kmsan.h
Normal file
145
include/linux/kmsan.h
Normal file
@@ -0,0 +1,145 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* KMSAN API for subsystems.
|
||||
*
|
||||
* Copyright (C) 2017-2022 Google LLC
|
||||
* Author: Alexander Potapenko <glider@google.com>
|
||||
*
|
||||
*/
|
||||
#ifndef _LINUX_KMSAN_H
|
||||
#define _LINUX_KMSAN_H
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/kmsan-checks.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct page;
|
||||
|
||||
#ifdef CONFIG_KMSAN
|
||||
|
||||
/**
|
||||
* kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
|
||||
* @page: struct page pointer returned by alloc_pages().
|
||||
* @order: order of allocated struct page.
|
||||
* @flags: GFP flags used by alloc_pages()
|
||||
*
|
||||
* KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
|
||||
* @flags contain __GFP_ZERO.
|
||||
*/
|
||||
void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
|
||||
|
||||
/**
|
||||
* kmsan_free_page() - Notify KMSAN about a free_pages() call.
|
||||
* @page: struct page pointer passed to free_pages().
|
||||
* @order: order of deallocated struct page.
|
||||
*
|
||||
* KMSAN marks freed memory as uninitialized.
|
||||
*/
|
||||
void kmsan_free_page(struct page *page, unsigned int order);
|
||||
|
||||
/**
|
||||
* kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
|
||||
* @dst: destination page.
|
||||
* @src: source page.
|
||||
*
|
||||
* KMSAN copies the contents of metadata pages for @src into the metadata pages
|
||||
* for @dst. If @dst has no associated metadata pages, nothing happens.
|
||||
* If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
|
||||
*/
|
||||
void kmsan_copy_page_meta(struct page *dst, struct page *src);
|
||||
|
||||
/**
|
||||
* kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
|
||||
* @start: start of vmapped range.
|
||||
* @end: end of vmapped range.
|
||||
* @prot: page protection flags used for vmap.
|
||||
* @pages: array of pages.
|
||||
* @page_shift: page_shift passed to vmap_range_noflush().
|
||||
*
|
||||
* KMSAN maps shadow and origin pages of @pages into contiguous ranges in
|
||||
* vmalloc metadata address range.
|
||||
*/
|
||||
void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
|
||||
pgprot_t prot, struct page **pages,
|
||||
unsigned int page_shift);
|
||||
|
||||
/**
|
||||
* kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
|
||||
* @start: start of vunmapped range.
|
||||
* @end: end of vunmapped range.
|
||||
*
|
||||
* KMSAN unmaps the contiguous metadata ranges created by
|
||||
* kmsan_map_kernel_range_noflush().
|
||||
*/
|
||||
void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
|
||||
|
||||
/**
|
||||
* kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
|
||||
* @addr: range start.
|
||||
* @end: range end.
|
||||
* @phys_addr: physical range start.
|
||||
* @prot: page protection flags used for ioremap_page_range().
|
||||
* @page_shift: page_shift argument passed to vmap_range_noflush().
|
||||
*
|
||||
* KMSAN creates new metadata pages for the physical pages mapped into the
|
||||
* virtual memory.
|
||||
*/
|
||||
void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys_addr, pgprot_t prot,
|
||||
unsigned int page_shift);
|
||||
|
||||
/**
|
||||
* kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
|
||||
* @start: range start.
|
||||
* @end: range end.
|
||||
*
|
||||
* KMSAN unmaps the metadata pages for the given range and, unlike for
|
||||
* vunmap_page_range(), also deallocates them.
|
||||
*/
|
||||
void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
|
||||
|
||||
#else
|
||||
|
||||
static inline int kmsan_alloc_page(struct page *page, unsigned int order,
|
||||
gfp_t flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void kmsan_free_page(struct page *page, unsigned int order)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
|
||||
unsigned long end,
|
||||
pgprot_t prot,
|
||||
struct page **pages,
|
||||
unsigned int page_shift)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kmsan_vunmap_range_noflush(unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kmsan_ioremap_page_range(unsigned long start,
|
||||
unsigned long end,
|
||||
phys_addr_t phys_addr,
|
||||
pgprot_t prot,
|
||||
unsigned int page_shift)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kmsan_iounmap_page_range(unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_KMSAN_H */
|
||||
Reference in New Issue
Block a user