mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 08:03:01 +09:00
Merge tag 'mm-stable-2023-06-24-19-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull mm updates from Andrew Morton: - Yosry Ahmed brought back some cgroup v1 stats in OOM logs - Yosry has also eliminated cgroup's atomic rstat flushing - Nhat Pham adds the new cachestat() syscall. It provides userspace with the ability to query pagecache status - a similar concept to mincore() but more powerful and with improved usability - Mel Gorman provides more optimizations for compaction, reducing the prevalence of page rescanning - Lorenzo Stoakes has done some maintanance work on the get_user_pages() interface - Liam Howlett continues with cleanups and maintenance work to the maple tree code. Peng Zhang also does some work on maple tree - Johannes Weiner has done some cleanup work on the compaction code - David Hildenbrand has contributed additional selftests for get_user_pages() - Thomas Gleixner has contributed some maintenance and optimization work for the vmalloc code - Baolin Wang has provided some compaction cleanups, - SeongJae Park continues maintenance work on the DAMON code - Huang Ying has done some maintenance on the swap code's usage of device refcounting - Christoph Hellwig has some cleanups for the filemap/directio code - Ryan Roberts provides two patch series which yield some rationalization of the kernel's access to pte entries - use the provided APIs rather than open-coding accesses - Lorenzo Stoakes has some fixes to the interaction between pagecache and directio access to file mappings - John Hubbard has a series of fixes to the MM selftesting code - ZhangPeng continues the folio conversion campaign - Hugh Dickins has been working on the pagetable handling code, mainly with a view to reducing the load on the mmap_lock - Catalin Marinas has reduced the arm64 kmalloc() minimum alignment from 128 to 8 - Domenico Cerasuolo has improved the zswap reclaim mechanism by reorganizing the LRU management - Matthew Wilcox provides some fixups to make gfs2 work better with the buffer_head code - Vishal Moola also has done some folio conversion work - Matthew Wilcox has removed the remnants of the pagevec code - their functionality is migrated over to struct folio_batch * tag 'mm-stable-2023-06-24-19-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (380 commits) mm/hugetlb: remove hugetlb_set_page_subpool() mm: nommu: correct the range of mmap_sem_read_lock in task_mem() hugetlb: revert use of page_cache_next_miss() Revert "page cache: fix page_cache_next/prev_miss off by one" mm/vmscan: fix root proactive reclaim unthrottling unbalanced node mm: memcg: rename and document global_reclaim() mm: kill [add|del]_page_to_lru_list() mm: compaction: convert to use a folio in isolate_migratepages_block() mm: zswap: fix double invalidate with exclusive loads mm: remove unnecessary pagevec includes mm: remove references to pagevec mm: rename invalidate_mapping_pagevec to mapping_try_invalidate mm: remove struct pagevec net: convert sunrpc from pagevec to folio_batch i915: convert i915_gpu_error to use a folio_batch pagevec: rename fbatch_count() mm: remove check_move_unevictable_pages() drm: convert drm_gem_put_pages() to use a folio_batch i915: convert shmem_sg_free_table() to use a folio_batch scatterlist: add sg_set_folio() ...
This commit is contained in:
@@ -725,7 +725,6 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
|
||||
|
||||
#else /* CONFIG_PER_VMA_LOCK */
|
||||
|
||||
static inline void vma_init_lock(struct vm_area_struct *vma) {}
|
||||
static inline bool vma_start_read(struct vm_area_struct *vma)
|
||||
{ return false; }
|
||||
static inline void vma_end_read(struct vm_area_struct *vma) {}
|
||||
@@ -866,11 +865,24 @@ static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
|
||||
return mas_find(&vmi->mas, ULONG_MAX);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
|
||||
{
|
||||
return mas_next_range(&vmi->mas, ULONG_MAX);
|
||||
}
|
||||
|
||||
|
||||
static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
|
||||
{
|
||||
return mas_prev(&vmi->mas, 0);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
|
||||
{
|
||||
return mas_prev_range(&vmi->mas, 0);
|
||||
}
|
||||
|
||||
static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
|
||||
{
|
||||
return vmi->mas.index;
|
||||
@@ -1208,17 +1220,6 @@ enum compound_dtor_id {
|
||||
#endif
|
||||
NR_COMPOUND_DTORS,
|
||||
};
|
||||
extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS];
|
||||
|
||||
static inline void set_compound_page_dtor(struct page *page,
|
||||
enum compound_dtor_id compound_dtor)
|
||||
{
|
||||
struct folio *folio = (struct folio *)page;
|
||||
|
||||
VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
|
||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
folio->_folio_dtor = compound_dtor;
|
||||
}
|
||||
|
||||
static inline void folio_set_compound_dtor(struct folio *folio,
|
||||
enum compound_dtor_id compound_dtor)
|
||||
@@ -1229,16 +1230,6 @@ static inline void folio_set_compound_dtor(struct folio *folio,
|
||||
|
||||
void destroy_large_folio(struct folio *folio);
|
||||
|
||||
static inline void set_compound_order(struct page *page, unsigned int order)
|
||||
{
|
||||
struct folio *folio = (struct folio *)page;
|
||||
|
||||
folio->_folio_order = order;
|
||||
#ifdef CONFIG_64BIT
|
||||
folio->_folio_nr_pages = 1U << order;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Returns the number of bytes in this potentially compound page. */
|
||||
static inline unsigned long page_size(struct page *page)
|
||||
{
|
||||
@@ -1932,39 +1923,35 @@ static inline bool is_zero_folio(const struct folio *folio)
|
||||
return is_zero_page(&folio->page);
|
||||
}
|
||||
|
||||
/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */
|
||||
/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
|
||||
#ifdef CONFIG_MIGRATION
|
||||
static inline bool is_longterm_pinnable_page(struct page *page)
|
||||
static inline bool folio_is_longterm_pinnable(struct folio *folio)
|
||||
{
|
||||
#ifdef CONFIG_CMA
|
||||
int mt = get_pageblock_migratetype(page);
|
||||
int mt = folio_migratetype(folio);
|
||||
|
||||
if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
|
||||
return false;
|
||||
#endif
|
||||
/* The zero page can be "pinned" but gets special handling. */
|
||||
if (is_zero_page(page))
|
||||
if (is_zero_folio(folio))
|
||||
return true;
|
||||
|
||||
/* Coherent device memory must always allow eviction. */
|
||||
if (is_device_coherent_page(page))
|
||||
if (folio_is_device_coherent(folio))
|
||||
return false;
|
||||
|
||||
/* Otherwise, non-movable zone pages can be pinned. */
|
||||
return !is_zone_movable_page(page);
|
||||
/* Otherwise, non-movable zone folios can be pinned. */
|
||||
return !folio_is_zone_movable(folio);
|
||||
|
||||
}
|
||||
#else
|
||||
static inline bool is_longterm_pinnable_page(struct page *page)
|
||||
static inline bool folio_is_longterm_pinnable(struct folio *folio)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool folio_is_longterm_pinnable(struct folio *folio)
|
||||
{
|
||||
return is_longterm_pinnable_page(&folio->page);
|
||||
}
|
||||
|
||||
static inline void set_page_zone(struct page *page, enum zone_type zone)
|
||||
{
|
||||
page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
|
||||
@@ -2375,6 +2362,9 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
|
||||
unmap_mapping_range(mapping, holebegin, holelen, 0);
|
||||
}
|
||||
|
||||
static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
|
||||
unsigned long addr);
|
||||
|
||||
extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
|
||||
void *buf, int len, unsigned int gup_flags);
|
||||
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
@@ -2383,19 +2373,42 @@ extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||
void *buf, int len, unsigned int gup_flags);
|
||||
|
||||
long get_user_pages_remote(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas, int *locked);
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
int *locked);
|
||||
long pin_user_pages_remote(struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas, int *locked);
|
||||
int *locked);
|
||||
|
||||
static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
int gup_flags,
|
||||
struct vm_area_struct **vmap)
|
||||
{
|
||||
struct page *page;
|
||||
struct vm_area_struct *vma;
|
||||
int got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
|
||||
|
||||
if (got < 0)
|
||||
return ERR_PTR(got);
|
||||
if (got == 0)
|
||||
return NULL;
|
||||
|
||||
vma = vma_lookup(mm, addr);
|
||||
if (WARN_ON_ONCE(!vma)) {
|
||||
put_page(page);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
*vmap = vma;
|
||||
return page;
|
||||
}
|
||||
|
||||
long get_user_pages(unsigned long start, unsigned long nr_pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas);
|
||||
unsigned int gup_flags, struct page **pages);
|
||||
long pin_user_pages(unsigned long start, unsigned long nr_pages,
|
||||
unsigned int gup_flags, struct page **pages,
|
||||
struct vm_area_struct **vmas);
|
||||
unsigned int gup_flags, struct page **pages);
|
||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
struct page **pages, unsigned int gup_flags);
|
||||
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
@@ -2445,6 +2458,7 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
#define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \
|
||||
MM_CP_UFFD_WP_RESOLVE)
|
||||
|
||||
bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
|
||||
int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
|
||||
static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
|
||||
{
|
||||
@@ -2810,14 +2824,25 @@ static inline void pgtable_pte_page_dtor(struct page *page)
|
||||
dec_lruvec_page_state(page, NR_PAGETABLE);
|
||||
}
|
||||
|
||||
#define pte_offset_map_lock(mm, pmd, address, ptlp) \
|
||||
({ \
|
||||
spinlock_t *__ptl = pte_lockptr(mm, pmd); \
|
||||
pte_t *__pte = pte_offset_map(pmd, address); \
|
||||
*(ptlp) = __ptl; \
|
||||
spin_lock(__ptl); \
|
||||
__pte; \
|
||||
})
|
||||
pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
|
||||
static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
|
||||
{
|
||||
return __pte_offset_map(pmd, addr, NULL);
|
||||
}
|
||||
|
||||
pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
|
||||
unsigned long addr, spinlock_t **ptlp);
|
||||
static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
|
||||
unsigned long addr, spinlock_t **ptlp)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
__cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp));
|
||||
return pte;
|
||||
}
|
||||
|
||||
pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
|
||||
unsigned long addr, spinlock_t **ptlp);
|
||||
|
||||
#define pte_unmap_unlock(pte, ptl) do { \
|
||||
spin_unlock(ptl); \
|
||||
@@ -2938,7 +2963,8 @@ extern unsigned long free_reserved_area(void *start, void *end,
|
||||
|
||||
extern void adjust_managed_page_count(struct page *page, long count);
|
||||
|
||||
extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
|
||||
extern void reserve_bootmem_region(phys_addr_t start,
|
||||
phys_addr_t end, int nid);
|
||||
|
||||
/* Free the reserved page into the buddy system, so it gets managed. */
|
||||
static inline void free_reserved_page(struct page *page)
|
||||
@@ -3017,12 +3043,6 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn);
|
||||
#endif
|
||||
|
||||
extern void set_dma_reserve(unsigned long new_dma_reserve);
|
||||
extern void memmap_init_range(unsigned long, int, unsigned long,
|
||||
unsigned long, unsigned long, enum meminit_context,
|
||||
struct vmem_altmap *, int migratetype);
|
||||
extern void setup_per_zone_wmarks(void);
|
||||
extern void calculate_min_free_kbytes(void);
|
||||
extern int __meminit init_per_zone_wmark_min(void);
|
||||
extern void mem_init(void);
|
||||
extern void __init mmap_init(void);
|
||||
|
||||
@@ -3043,11 +3063,6 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
|
||||
|
||||
extern void setup_per_cpu_pageset(void);
|
||||
|
||||
/* page_alloc.c */
|
||||
extern int min_free_kbytes;
|
||||
extern int watermark_boost_factor;
|
||||
extern int watermark_scale_factor;
|
||||
|
||||
/* nommu.c */
|
||||
extern atomic_long_t mmap_pages_allocated;
|
||||
extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
|
||||
@@ -3494,9 +3509,58 @@ static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
|
||||
if (debug_pagealloc_enabled_static())
|
||||
__kernel_map_pages(page, numpages, 0);
|
||||
}
|
||||
|
||||
extern unsigned int _debug_guardpage_minorder;
|
||||
DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
|
||||
|
||||
static inline unsigned int debug_guardpage_minorder(void)
|
||||
{
|
||||
return _debug_guardpage_minorder;
|
||||
}
|
||||
|
||||
static inline bool debug_guardpage_enabled(void)
|
||||
{
|
||||
return static_branch_unlikely(&_debug_guardpage_enabled);
|
||||
}
|
||||
|
||||
static inline bool page_is_guard(struct page *page)
|
||||
{
|
||||
if (!debug_guardpage_enabled())
|
||||
return false;
|
||||
|
||||
return PageGuard(page);
|
||||
}
|
||||
|
||||
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
|
||||
int migratetype);
|
||||
static inline bool set_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype)
|
||||
{
|
||||
if (!debug_guardpage_enabled())
|
||||
return false;
|
||||
return __set_page_guard(zone, page, order, migratetype);
|
||||
}
|
||||
|
||||
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
|
||||
int migratetype);
|
||||
static inline void clear_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype)
|
||||
{
|
||||
if (!debug_guardpage_enabled())
|
||||
return;
|
||||
__clear_page_guard(zone, page, order, migratetype);
|
||||
}
|
||||
|
||||
#else /* CONFIG_DEBUG_PAGEALLOC */
|
||||
static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
|
||||
static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
|
||||
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
|
||||
static inline bool debug_guardpage_enabled(void) { return false; }
|
||||
static inline bool page_is_guard(struct page *page) { return false; }
|
||||
static inline bool set_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype) { return false; }
|
||||
static inline void clear_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype) {}
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
#ifdef __HAVE_ARCH_GATE_AREA
|
||||
@@ -3609,6 +3673,10 @@ extern void shake_page(struct page *p);
|
||||
extern atomic_long_t num_poisoned_pages __read_mostly;
|
||||
extern int soft_offline_page(unsigned long pfn, int flags);
|
||||
#ifdef CONFIG_MEMORY_FAILURE
|
||||
/*
|
||||
* Sysfs entries for memory failure handling statistics.
|
||||
*/
|
||||
extern const struct attribute_group memory_failure_attr_group;
|
||||
extern void memory_failure_queue(unsigned long pfn, int flags);
|
||||
extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
|
||||
bool *migratable_cleared);
|
||||
@@ -3701,11 +3769,6 @@ enum mf_action_page_type {
|
||||
MF_MSG_UNKNOWN,
|
||||
};
|
||||
|
||||
/*
|
||||
* Sysfs entries for memory failure handling statistics.
|
||||
*/
|
||||
extern const struct attribute_group memory_failure_attr_group;
|
||||
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
|
||||
extern void clear_huge_page(struct page *page,
|
||||
unsigned long addr_hint,
|
||||
@@ -3735,33 +3798,6 @@ static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
|
||||
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
extern unsigned int _debug_guardpage_minorder;
|
||||
DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
|
||||
|
||||
static inline unsigned int debug_guardpage_minorder(void)
|
||||
{
|
||||
return _debug_guardpage_minorder;
|
||||
}
|
||||
|
||||
static inline bool debug_guardpage_enabled(void)
|
||||
{
|
||||
return static_branch_unlikely(&_debug_guardpage_enabled);
|
||||
}
|
||||
|
||||
static inline bool page_is_guard(struct page *page)
|
||||
{
|
||||
if (!debug_guardpage_enabled())
|
||||
return false;
|
||||
|
||||
return PageGuard(page);
|
||||
}
|
||||
#else
|
||||
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
|
||||
static inline bool debug_guardpage_enabled(void) { return false; }
|
||||
static inline bool page_is_guard(struct page *page) { return false; }
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
#if MAX_NUMNODES > 1
|
||||
void __init setup_nr_node_ids(void);
|
||||
#else
|
||||
|
||||
Reference in New Issue
Block a user