diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 665f06675c834e45f9624b1a990d658f91eb99f3..827a7b7e3d27dd89a9b12558a60192b0fb638c1e 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -15,6 +15,8 @@ struct vm_area_struct; static inline int gfp_migratetype(const gfp_t gfp_flags) { + unsigned int ret_mt = 0; + VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); @@ -26,7 +28,15 @@ static inline int gfp_migratetype(const gfp_t gfp_flags) return MIGRATE_UNMOVABLE; /* Group based on mobility */ - return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + /*return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;*/ + ret_mt = (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; + +#ifdef CONFIG_CMA_REUSE + if (ret_mt == MIGRATE_MOVABLE && (gfp_flags & __GFP_CMA)) + return MIGRATE_CMA; +#endif + + return ret_mt; } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 6583a58670c571050ad410e0dcf0718b6477292b..16de62fc891c39b8dac3156039e23e7f2668588b 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -46,6 +46,7 @@ typedef unsigned int __bitwise gfp_t; #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u #define ___GFP_ZEROTAGS 0x800000u +#define ___GFP_CMA 0x800000u #ifdef CONFIG_KASAN_HW_TAGS #define ___GFP_SKIP_ZERO 0x1000000u #define ___GFP_SKIP_KASAN 0x2000000u @@ -54,7 +55,8 @@ typedef unsigned int __bitwise gfp_t; #define ___GFP_SKIP_KASAN 0 #endif #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x4000000u +/*#define ___GFP_NOLOCKDEP 0x4000000u*/ +#define ___GFP_NOLOCKDEP 0x1000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -71,6 +73,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) #define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) /** @@ -249,7 +252,8 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) +/*#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP))*/ +#define __GFP_BITS_SHIFT (24 + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 0f62786269d0c1e789d4266ecf6d7d866fd546db..58be39d6b951ac761fd3e6272795dd476a345af5 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -47,8 +47,11 @@ enum migratetype { MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ +#ifdef CONFIG_CMA_REUSE + MIGRATE_CMA, +#endif MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) /* * MIGRATE_CMA migration type is designed to mimic the way * ZONE_MOVABLE works. Only movable pages can be allocated @@ -78,6 +81,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES]; # define is_migrate_cma_page(_page) false #endif +#ifdef CONFIG_CMA_REUSE +# define get_cma_migratetype() MIGRATE_CMA +#else +# define get_cma_migratetype() MIGRATE_MOVABLE +#endif + static inline bool is_migrate_movable(int mt) { return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; diff --git a/mm/Kconfig b/mm/Kconfig index 264a2df5ecf5b91a2883e4594bd7707219ca309c..414d96ee780cf843c695e6248f4b0ca57f81016f 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -941,6 +941,16 @@ config CMA_AREAS If unsure, leave the default value "7" in UMA and "19" in NUMA. +config CMA_REUSE + bool "CMA reuse feature" + depends on CMA + help + If enabled, it will add MIGRATE_CMA to pcp lists and movable + allocations with __GFP_CMA flag will use cma areas prior to + movable areas. + + It improves the utilization ratio of cma areas. + config MEM_SOFT_DIRTY bool "Track memory changes" depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS diff --git a/mm/compaction.c b/mm/compaction.c index 38c8d216c6a3bffd9d75fd430981558c66614750..080aaf0f80a7bae9c5525a2aa3337d5127e2ec14 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2235,7 +2235,7 @@ static enum compact_result __compact_finished(struct compact_control *cc) #ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ - if (migratetype == MIGRATE_MOVABLE && + if (migratetype == get_cma_migratetype() && !free_area_empty(area, MIGRATE_CMA)) return COMPACT_SUCCESS; #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index afed33fd876128ac3b53db3e8da303b353c4c991..923f296f9f3c89bb64d9d98e78c929ec659ed991 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -275,8 +275,11 @@ const char * const migratetype_names[MIGRATE_TYPES] = { "Unmovable", "Movable", "Reclaimable", +#ifdef CONFIG_CMA_REUSE + "CMA", +#endif "HighAtomic", -#ifdef CONFIG_CMA +#if defined(CONFIG_CMA) && !defined(CONFIG_CMA_REUSE) "CMA", #endif #ifdef CONFIG_MEMORY_ISOLATION @@ -2078,6 +2081,27 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, } +static __always_inline struct page * +__rmqueue_with_cma_reuse(struct zone *zone, unsigned int order, + int migratetype, unsigned int alloc_flags) +{ + struct page *page = NULL; +retry: + page = __rmqueue_smallest(zone, order, migratetype); + + if (unlikely(!page) && is_migrate_cma(migratetype)) { + migratetype = MIGRATE_MOVABLE; + alloc_flags &= ~ALLOC_CMA; + page = __rmqueue_smallest(zone, order, migratetype); + } + + if (unlikely(!page) && + __rmqueue_fallback(zone, order, migratetype, alloc_flags)) + goto retry; + + return page; +} + /* * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. @@ -2088,6 +2112,11 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, { struct page *page; +#ifdef CONFIG_CMA_REUSE + page = __rmqueue_with_cma_reuse(zone, order, migratetype, alloc_flags); + goto out; +#endif + if (IS_ENABLED(CONFIG_CMA)) { /* * Balance movable allocations between regular and CMA areas by @@ -2112,6 +2141,10 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, alloc_flags)) goto retry; } + /*return page;*/ +out: + if (page) + trace_mm_page_alloc_zone_locked(page, order, migratetype, migratetype == MIGRATE_MOVABLE); return page; } @@ -2773,7 +2806,8 @@ struct page *rmqueue(struct zone *preferred_zone, */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - if (likely(pcp_allowed_order(order))) { + /*if (likely(pcp_allowed_order(order)))*/ + if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||migratetype != MIGRATE_MOVABLE ||IS_ENABLED(CONFIG_CMA_REUSE)) { page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) @@ -3034,7 +3068,9 @@ static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, unsigned int alloc_flags) { #ifdef CONFIG_CMA - if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) + unsigned int pflags = current->flags; + /*if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)*/ + if (!(pflags & PF_MEMALLOC_PIN) && gfp_migratetype(gfp_mask) == get_cma_migratetype()) alloc_flags |= ALLOC_CMA; #endif return alloc_flags;