[PATCH OpenHarmony-5.10 0/2] mm: add cma reuse feature

Heesub Shin (1): mm: redirect page allocation to CMA lijiawei (1): mm: add MIGRATE_CMA to pcp lists include/linux/gfp.h | 17 ++++++++++++++--- include/linux/highmem.h | 4 +++- include/linux/mmzone.h | 11 ++++++++++- mm/Kconfig | 10 ++++++++++ mm/compaction.c | 2 +- mm/page_alloc.c | 36 +++++++++++++++++++++++++++++++++--- 6 files changed, 71 insertions(+), 9 deletions(-) -- 2.25.1

From: Heesub Shin <heesub.shin@samsung.com> maillist inclusion category: feature issue:#I4LUG4 CVE: NA Reference: https://lkml.org/lkml/2020/11/2/646 Signed-off-by: lijiawei <lijiawei49@huawei.com> ------------------------------------- cma: redirect page allocation to CMA CMA pages are designed to be used as fallback for movable allocations and cannot be used for non-movable allocations. If CMA pages are utilized poorly, non-movable allocations may end up getting starved if all regular movable pages are allocated and the only pages left are CMA. Always using CMA pages first creates unacceptable performance problems. As a midway alternative, use CMA pages for certain userspace allocations. The userspace pages can be migrated or dropped quickly which giving decent utilization. Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Heesub Shin <heesub.shin@samsung.com> Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> [cgoldswo@codeaurora.org: Place in bugfixes] Signed-off-by: Chris Goldsworthy <cgoldswo@codeaurora.org> Reported-by: kernel test robot <rong.a.chen@intel.com> --- include/linux/gfp.h | 15 ++++++++ include/linux/highmem.h | 4 +- include/linux/mmzone.h | 4 ++ mm/page_alloc.c | 84 +++++++++++++++++++++++++---------------- 4 files changed, 74 insertions(+), 33 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c603237e006c..e80b7d2f5b38 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -39,11 +39,21 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x100000u #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u +#ifdef CONFIG_CMA +#define ___GFP_CMA 0x800000u +#else +#define ___GFP_CMA 0 +#endif #ifdef CONFIG_LOCKDEP +#ifdef CONFIG_CMA +#define ___GFP_NOLOCKDEP 0x1000000u +#else #define ___GFP_NOLOCKDEP 0x800000u +#endif #else #define ___GFP_NOLOCKDEP 0 #endif + /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -57,6 +67,7 @@ struct vm_area_struct; #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /** @@ -224,7 +235,11 @@ struct vm_area_struct; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ +#ifdef CONFIG_CMA +#define __GFP_BITS_SHIFT (24 + IS_ENABLED(CONFIG_LOCKDEP)) +#else #define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP)) +#endif #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 14e6202ce47f..97241457d97b 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -274,7 +274,9 @@ static inline struct page * alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, unsigned long vaddr) { - return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); + return __alloc_zeroed_user_highpage( + __GFP_MOVABLE | __GFP_CMA, vma, + vaddr); } static inline void clear_highpage(struct page *page) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 63b550403317..b6bcef45bc1d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -451,6 +451,10 @@ struct zone { struct pglist_data *zone_pgdat; struct per_cpu_pageset __percpu *pageset; +#ifdef CONFIG_CMA + bool cma_alloc; +#endif + #ifndef CONFIG_SPARSEMEM /* * Flags for a pageblock_nr_pages block. See pageblock-flags.h. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6d189b69a9e1..7250a0b59861 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2844,36 +2844,34 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; - if (IS_ENABLED(CONFIG_CMA)) { - /* - * Balance movable allocations between regular and CMA areas by - * allocating from CMA when over half of the zone's free memory - * is in the CMA area. - */ - if (alloc_flags & ALLOC_CMA && - zone_page_state(zone, NR_FREE_CMA_PAGES) > - zone_page_state(zone, NR_FREE_PAGES) / 2) { - page = __rmqueue_cma_fallback(zone, order); - if (page) - goto out; - } - } retry: page = __rmqueue_smallest(zone, order, migratetype); - if (unlikely(!page)) { - if (alloc_flags & ALLOC_CMA) - page = __rmqueue_cma_fallback(zone, order); - - if (!page && __rmqueue_fallback(zone, order, migratetype, - alloc_flags)) - goto retry; - } + if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype, + alloc_flags)) + goto retry; out: if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } +static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, + int migratetype, + unsigned int alloc_flags) +{ + struct page *page = 0; + +#ifdef CONFIG_CMA + if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc) + page = __rmqueue_cma_fallback(zone, order); + else +#endif + page = __rmqueue_smallest(zone, order, migratetype); + + trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA); + return page; +} + /* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list. @@ -2881,14 +2879,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, - int migratetype, unsigned int alloc_flags) + int migratetype, unsigned int alloc_flags, int cma) { int i, alloced = 0; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { - struct page *page = __rmqueue(zone, order, migratetype, - alloc_flags); + struct page *page = NULL; + + if (cma) + page = __rmqueue_cma(zone, order, migratetype, + alloc_flags); + else + page = __rmqueue(zone, order, migratetype, alloc_flags); + if (unlikely(page == NULL)) break; @@ -3374,7 +3378,8 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, unsigned int alloc_flags, struct per_cpu_pages *pcp, - struct list_head *list) + struct list_head *list, + gfp_t gfp_flags) { struct page *page; @@ -3382,7 +3387,8 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, if (list_empty(list)) { pcp->count += rmqueue_bulk(zone, 0, pcp->batch, list, - migratetype, alloc_flags); + migratetype, alloc_flags, + gfp_flags && __GFP_CMA); if (unlikely(list_empty(list))) return NULL; } @@ -3408,7 +3414,8 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, local_irq_save(flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; - page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); + page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list, + gfp_flags); if (page) { __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); zone_statistics(preferred_zone, zone); @@ -3434,7 +3441,7 @@ struct page *rmqueue(struct zone *preferred_zone, * MIGRATE_MOVABLE pcplist could have the pages on CMA area and * we need to skip it when CMA area isn't allowed. */ - if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || + if (!IS_ENABLED(CONFIG_CMA) || gfp_flags & __GFP_CMA || migratetype != MIGRATE_MOVABLE) { page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, migratetype, alloc_flags); @@ -3462,8 +3469,14 @@ struct page *rmqueue(struct zone *preferred_zone, if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); } - if (!page) - page = __rmqueue(zone, order, migratetype, alloc_flags); + if (!page) { + if (gfp_flags & __GFP_CMA) + page = __rmqueue_cma(zone, order, migratetype, + alloc_flags); + else + page = __rmqueue(zone, order, migratetype, + alloc_flags); + } } while (page && check_new_pages(page, order)); spin_unlock(&zone->lock); if (!page) @@ -3776,7 +3789,8 @@ static inline unsigned int current_alloc_flags(gfp_t gfp_mask, unsigned int pflags = current->flags; if (!(pflags & PF_MEMALLOC_NOCMA) && - gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE && + gfp_mask & __GFP_CMA) alloc_flags |= ALLOC_CMA; #endif @@ -8533,6 +8547,9 @@ int alloc_contig_range(unsigned long start, unsigned long end, if (ret) return ret; +#ifdef CONFIG_CMA + cc.zone->cma_alloc = 1; +#endif /* * In case of -EBUSY, we'd like to know which page causes problem. * So, just fall through. test_pages_isolated() has a tracepoint @@ -8614,6 +8631,9 @@ int alloc_contig_range(unsigned long start, unsigned long end, done: undo_isolate_page_range(pfn_max_align_down(start), pfn_max_align_up(end), migratetype); +#ifdef CONFIG_CMA + cc.zone->cma_alloc = 0; +#endif return ret; } EXPORT_SYMBOL(alloc_contig_range); -- 2.25.1

From: lijiawei <lijiawei49@huawei.com> ohos inclusion category: feature issue:#I4LUG4 CVE: NA Signed-off-by: lijiawei <lijiawei49@huawei.com> --- include/linux/gfp.h | 24 ++++----- include/linux/mmzone.h | 15 ++++-- mm/Kconfig | 10 ++++ mm/compaction.c | 2 +- mm/page_alloc.c | 118 ++++++++++++++++++++++------------------- 5 files changed, 95 insertions(+), 74 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index e80b7d2f5b38..4de43f1c5178 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -39,21 +39,12 @@ struct vm_area_struct; #define ___GFP_HARDWALL 0x100000u #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u -#ifdef CONFIG_CMA #define ___GFP_CMA 0x800000u -#else -#define ___GFP_CMA 0 -#endif #ifdef CONFIG_LOCKDEP -#ifdef CONFIG_CMA #define ___GFP_NOLOCKDEP 0x1000000u #else -#define ___GFP_NOLOCKDEP 0x800000u -#endif -#else #define ___GFP_NOLOCKDEP 0 #endif - /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -235,11 +226,7 @@ struct vm_area_struct; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#ifdef CONFIG_CMA #define __GFP_BITS_SHIFT (24 + IS_ENABLED(CONFIG_LOCKDEP)) -#else -#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP)) -#endif #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** @@ -331,6 +318,8 @@ struct vm_area_struct; static inline int gfp_migratetype(const gfp_t gfp_flags) { + unsigned int ret_mt = 0; + VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); @@ -339,7 +328,14 @@ static inline int gfp_migratetype(const gfp_t gfp_flags) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + ret_mt = (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + +#ifdef CONFIG_CMA_REUSE + if (ret_mt == MIGRATE_MOVABLE && (gfp_flags & __GFP_CMA)) + return MIGRATE_CMA; +#endif + + return ret_mt; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b6bcef45bc1d..3ac2799dcb4a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -42,9 +42,12 @@ enum migratetype { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, +#ifdef CONFIG_CMA_REUSE + MIGRATE_CMA, +#endif MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) /* * MIGRATE_CMA migration type is designed to mimic the way * ZONE_MOVABLE works. Only movable pages can be allocated @@ -77,6 +80,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES]; # define is_migrate_cma_page(_page) false #endif +#ifdef CONFIG_CMA_REUSE +# define get_cma_migratetype() MIGRATE_CMA +#else +# define get_cma_migratetype() MIGRATE_MOVABLE +#endif + static inline bool is_migrate_movable(int mt) { return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; @@ -451,10 +460,6 @@ struct zone { struct pglist_data *zone_pgdat; struct per_cpu_pageset __percpu *pageset; -#ifdef CONFIG_CMA - bool cma_alloc; -#endif - #ifndef CONFIG_SPARSEMEM /* * Flags for a pageblock_nr_pages block. See pageblock-flags.h. diff --git a/mm/Kconfig b/mm/Kconfig index 9d606d258ab4..acfc5e88ac05 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -528,6 +528,16 @@ config CMA_AREAS If unsure, leave the default value "7" in UMA and "19" in NUMA. +config CMA_REUSE + bool "CMA reuse feature" + depends on CMA + help + If enabled, it will add MIGRATE_CMA to pcp lists and movable + allocations with __GFP_CMA flag will use cma areas prior to + movable areas. + + It improves the utilization ratio of cma areas. + config MEM_SOFT_DIRTY bool "Track memory changes" depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS diff --git a/mm/compaction.c b/mm/compaction.c index dba424447473..22e6a6e21df8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2021,7 +2021,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) #ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ - if (migratetype == MIGRATE_MOVABLE && + if (migratetype == get_cma_migratetype() && !free_area_empty(area, MIGRATE_CMA)) return COMPACT_SUCCESS; #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7250a0b59861..83c0146cb59e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -324,8 +324,11 @@ const char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Movable", "Reclaimable", +#ifdef CONFIG_CMA_REUSE + "CMA", +#endif "HighAtomic", -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) "CMA", #endif #ifdef CONFIG_MEMORY_ISOLATION @@ -2834,6 +2837,27 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, } +static __always_inline struct page * +__rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, + int migratetype, unsigned int alloc_flags) +{ + struct page *page = NULL; +retry: + page = __rmqueue_smallest(zone, order, migratetype); + + if (unlikely(!page) && is_migrate_cma(migratetype)) { + migratetype = MIGRATE_MOVABLE; + alloc_flags &= ~ALLOC_CMA; + page = __rmqueue_smallest(zone, order, migratetype); + } + + if (unlikely(!page) && + __rmqueue_fallback(zone, order, migratetype, alloc_flags)) + goto retry; + + return page; +} + /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. @@ -2844,34 +2868,41 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; +#ifdef CONFIG_CMA_REUSE + page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); + goto out; +#endif + + if (IS_ENABLED(CONFIG_CMA)) { + /* + * Balance movable allocations between regular and CMA areas by + * allocating from CMA when over half of the zone's free memory + * is in the CMA area. + */ + if (alloc_flags & ALLOC_CMA && + zone_page_state(zone, NR_FREE_CMA_PAGES) > + zone_page_state(zone, NR_FREE_PAGES) / 2) { + page = __rmqueue_cma_fallback(zone, order); + if (page) + goto out; + } + } retry: page = __rmqueue_smallest(zone, order, migratetype); - if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype, - alloc_flags)) - goto retry; + if (unlikely(!page)) { + if (alloc_flags & ALLOC_CMA) + page = __rmqueue_cma_fallback(zone, order); + + if (!page && __rmqueue_fallback(zone, order, migratetype, + alloc_flags)) + goto retry; + } out: if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); return page; } -static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, - int migratetype, - unsigned int alloc_flags) -{ - struct page *page = 0; - -#ifdef CONFIG_CMA - if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc) - page = __rmqueue_cma_fallback(zone, order); - else -#endif - page = __rmqueue_smallest(zone, order, migratetype); - - trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA); - return page; -} - /* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list. @@ -2879,20 +2910,14 @@ static struct page *__rmqueue_cma(struct zone *zone, unsigned int order, */ static int rmqueue_bulk(struct zone *zone, unsigned int order, unsigned long count, struct list_head *list, - int migratetype, unsigned int alloc_flags, int cma) + int migratetype, unsigned int alloc_flags) { int i, alloced = 0; spin_lock(&zone->lock); for (i = 0; i < count; ++i) { - struct page *page = NULL; - - if (cma) - page = __rmqueue_cma(zone, order, migratetype, - alloc_flags); - else - page = __rmqueue(zone, order, migratetype, alloc_flags); - + struct page *page = __rmqueue(zone, order, migratetype, + alloc_flags); if (unlikely(page == NULL)) break; @@ -3378,8 +3403,7 @@ static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, unsigned int alloc_flags, struct per_cpu_pages *pcp, - struct list_head *list, - gfp_t gfp_flags) + struct list_head *list) { struct page *page; @@ -3387,8 +3411,7 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, if (list_empty(list)) { pcp->count += rmqueue_bulk(zone, 0, pcp->batch, list, - migratetype, alloc_flags, - gfp_flags && __GFP_CMA); + migratetype, alloc_flags); if (unlikely(list_empty(list))) return NULL; } @@ -3414,8 +3437,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone, local_irq_save(flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; - page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list, - gfp_flags); + page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); if (page) { __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); zone_statistics(preferred_zone, zone); @@ -3441,8 +3463,9 @@ struct page *rmqueue(struct zone *preferred_zone, * MIGRATE_MOVABLE pcplist could have the pages on CMA area and * we need to skip it when CMA area isn't allowed. */ - if (!IS_ENABLED(CONFIG_CMA) || gfp_flags & __GFP_CMA || - migratetype != MIGRATE_MOVABLE) { + if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || + migratetype != MIGRATE_MOVABLE || + IS_ENABLED(CONFIG_CMA_REUSE)) { page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, migratetype, alloc_flags); goto out; @@ -3469,14 +3492,8 @@ struct page *rmqueue(struct zone *preferred_zone, if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); } - if (!page) { - if (gfp_flags & __GFP_CMA) - page = __rmqueue_cma(zone, order, migratetype, - alloc_flags); - else - page = __rmqueue(zone, order, migratetype, - alloc_flags); - } + if (!page) + page = __rmqueue(zone, order, migratetype, alloc_flags); } while (page && check_new_pages(page, order)); spin_unlock(&zone->lock); if (!page) @@ -3789,8 +3806,7 @@ static inline unsigned int current_alloc_flags(gfp_t gfp_mask, unsigned int pflags = current->flags; if (!(pflags & PF_MEMALLOC_NOCMA) && - gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE && - gfp_mask & __GFP_CMA) + gfp_migratetype(gfp_mask) == get_cma_migratetype()) alloc_flags |= ALLOC_CMA; #endif @@ -8547,9 +8563,6 @@ int alloc_contig_range(unsigned long start, unsigned long end, if (ret) return ret; -#ifdef CONFIG_CMA - cc.zone->cma_alloc = 1; -#endif /* * In case of -EBUSY, we'd like to know which page causes problem. * So, just fall through. test_pages_isolated() has a tracepoint @@ -8631,9 +8644,6 @@ int alloc_contig_range(unsigned long start, unsigned long end, done: undo_isolate_page_range(pfn_max_align_down(start), pfn_max_align_up(end), migratetype); -#ifdef CONFIG_CMA - cc.zone->cma_alloc = 0; -#endif return ret; } EXPORT_SYMBOL(alloc_contig_range); -- 2.25.1
participants (1)
-
lijiawei