mem_init\n---|---> memblock_free_all (this will put all unused low memory onto the freelists) \n---|---|--->free_low_memory_core_early \n---|---|---|--->__free_memory_core(start, end) \n---|---|---|---|--->__free_pages_memory(start_pfn, end_pfn) \n---|---|---|---|---|---> memblock_free_pages (pfn_to_page(start), start, order) \n---|---|---|---|---|---|---> __free_pages_core (page, order) \n---|---|---|---|---|---|---|--->__free_pages_ok (page, order, fpi_flags) \n---|---|---|---|---|---|---|---|--->free_one_page (page, order, fpi_flags)\n```\n\n\n\n\n\n\n```\n/**\n * memblock_free_all - release free pages to the buddy allocator\n *\n * Return: the number of pages actually released.\n */\nunsigned long __init memblock_free_all(void)\n{\n unsigned long pages;\n\n /* 涉及到两个关键结构体:\nstruct pglist_data *pgdat (typedef pg_data_t); \nstruct zone *z;\n*/\n reset_all_zones_managed_pages();\n pages = free_low_memory_core_early();\n totalram_pages_add(pages);\n\n return pages;\n}\n```\n\n在函数__free_pages_memory中,对memblock的free内存按照order进行释放\n\n\n\n```\nstatic void __init __free_pages_memory(unsigned long start, unsigned long end)\n{\n int order;\n\n while (start < end) {\n order = min(MAX_ORDER - 1UL, __ffs(start));\n\n while (start + (1UL << order) > end)\n order--;\n\n memblock_free_pages(pfn_to_page(start), start, order);\n\n start += (1UL << order);\n }\n}\n```\n\n随后调用到函数__free_pages_core,将zone管理的页进行增加,执行页释放和页合并的过程,并最终通过__free_one_page将所释放出来的全部页按照order统合进伙伴系统中(使用函数add_to_free_list_tail,即将统合好的pages放进对应的free_list链表中)\n\n\n\n```\nvoid __free_pages_core(struct page *page, unsigned int order)\n{\n unsigned int nr_pages = 1 << order;\n struct page *p = page;\n unsigned int loop;\n\n /* 部分删减 */\n\n /* 对当前页对应的zone所管理的page大小进行设置(增加) */\n atomic_long_add(nr_pages, &page_zone(page)->managed_pages);\n /*\n * Bypass PCP and place fresh pages right to the tail, primarily\n * relevant for memory onlining.\n */\n /* 这里涉及复杂的page合并和释放过程,下面具体分析 */\n __free_pages_ok(page, order, FPI_TO_TAIL);\n}\n\n/* Used for pages not on another list */\nstatic inline void add_to_free_list_tail(struct page *page, struct zone *zone, unsigned int order, int migratetype)\n{\n struct free_area *area = &zone->free_area[order];\n\n list_add_tail(&page->lru, &area->free_list[migratetype]);\n area->nr_free++; /* 维护当前order还有多少空余的计数器 */\n}\n```\n\n

\n\n__free_one_page的注释翻译:这是Buddy System分配器的释放函数。\n\nBuddy System的概念是维护一种直接映射的表(包含位值),用于不同\"次序\"的存储块内存。底层表包含最小可分配的存储单元(在这里是页面)的映射,而每个上层级别则描述下层级别的两个存储单元,因此被称为\"伙伴\"。在高层次上,所发生的就是将底层表中的表项标记为可用,并根据需要向上传递更改,加上一些与VM系统的其他部分交互所需的账户。\n\n在每个级别上,我们保留一个页面列表,这些页面是长度为 (1 << order) 的连续可用页面列表的头,并带有 PageBuddy 标志。页面的顺序存储在 page_private(page) 字段中。因此,当我们分配或释放一个页面时,我们可以推导出另一个页面的状态。也就是说,如果我们分配了一个小块,并且两个页面都是空闲的,那么剩余的区域必须被分割成块。如果释放了一个块,并且它的伙伴也是空闲的,那么这将触发合并成更大尺寸的块。\n\n从上图也可以看出,本设备只有一个node和一个zone就是node0,zone_normal\n\n **相关视频推荐** \n\n[](https://www.bilibili.com/video/BV1GT4y1t7Hs/)\n\n **免费学习地址:Linux C/C++开发(后端/音视频/游戏/嵌入式/高性能网络/存储/基础架构/安全)** \n\n需要C/C++ Linux服务器架构师学习资料加qun **579733396** 获取(资料包括 **C/C++,Linux,golang技术,Nginx,ZeroMQ,MySQL,Redis,fastdfs,MongoDB,ZK,流媒体,CDN,P2P,K8S,Docker,TCP/IP,协程,DPDK,ffmpeg** 等),免费\n

\n\n### 2.2 关键数据或结构体\n\n### 2.2.1 pglist_data(内存结点)\n\n在 NUMA 机器上,每个 NUMA 节点都有一个 pg_data_t 描述其内存布局。在 UMA 机器上,存在一个单独的 pglist_data,它描述了整个内存。\n\n而内存统计信息和页面替换数据结构是基于每个区域(zone)进行维护的\n\n\n\n```\ntypedef struct pglist_data {\n /* node_zones 仅包含此节点的区域(zone)。并非所有区域都可能已经被填充,\n 但它是完整的列表(比如我手中的设备只有zone normal)。\n 它被本节点或其他节点的 node_zonelists 引用 */\n //包含节点中各内存域(ZONE_DMA, ZONE_DMA32, ZONE_NORMAL...)的数据结构 \n //详见enum zone_type 中的定义(include\\linux\\mmzone.h)\n struct zone node_zones[MAX_NR_ZONES]; \n\n /* node_zonelists 包含对所有节点中所有区域(zone)的引用。\n 通常,前几个区域(zone)将引用本节点的 node_zones */\n //对于非NUMA的设备而言,实际上只指向一个指定的zone\n struct zonelist node_zonelists[MAX_ZONELISTS]; \n\n int nr_zones; /* 此节点中已填充区域(zone)的数量 */\n /* 部分删除 */\n unsigned long node_start_pfn; // 当前NUMA节点第一页帧逻辑编号。在UMA总是0.\n unsigned long node_present_pages; /* 结点中页帧的数目 */\n unsigned long node_spanned_pages; /* 结点以页帧为单位计算的长度,包含内存空洞 */\n int node_id; //全局结点ID,系统中的NUMA结点都从0开始编号\n wait_queue_head_t kswapd_wait; //交换守护进程的等待队列,在将页帧换出结点时会用到\n wait_queue_head_t pfmemalloc_wait;\n struct task_struct *kswapd; /* 指向负责该结点的交换守护进程的task_struc*/\n int kswapd_order;// 定义需要释放的区域的长度。\n \n /* 部分删除 */\n struct lruvec __lruvec; // lru缓存链表,这个东西也非常复杂,以后有空再详细分析\n\n} pg_data_t;\n```\n\n### 2.2.2 zone(内存域)\n\n\n\n```\nstruct zone {\n /* Read-mostly fields */\n\n /* zone watermarks, access with *_wmark_pages(zone) macros */\n unsigned long _watermark[NR_WMARK]; //记录着oom的水位线\n unsigned long watermark_boost; //水位线计算和该值相关\n\n /* 部分删除 */\n\n struct pglist_data *zone_pgdat; //指向该zone对应的内存节点\n struct per_cpu_pageset __percpu *pageset; //PCP技术,这里先不分析\n /*\n * Flags for a pageblock_nr_pages block. See pageblock-flags.h.\n * In SPARSEMEM, this map is stored in struct mem_section\n */\n unsigned long *pageblock_flags; //管理着位图信息,祥见pageblock-flags.h\n \n /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */\n unsigned long zone_start_pfn;\n\n /* `spanned_pages`是该区域所涵盖的总页面数,包括空洞,其计算公式为: \n * spanned_pages = zone_end_pfn - zone_start_pfn; \n * \n * `present_pages`是该区域内物理页面的数量,该数量不包括空洞,其计算公式为: \n * present_pages = spanned_pages - absent_pages(空洞中的页面数); \n * \n * `managed_pages` 是 buddy system 管理的 `present_pages` 页的数量,其计算公式为\n * (`reserved_pages` 包括由 bootmem 分配器分配的页面): \n * managed_pages = present_pages - reserved_pages; \n * \n * 因此,`present_pages` 可由内存热插拔或内存电源管理逻辑使用,\n * 通过检查(`present_pages - managed_pages`)来查找未管理的页面。\n * `managed_pages` 应该由页分配器和VM扫描器用于计算各种水印和阈值。 \n */\n atomic_long_t managed_pages;\n unsigned long spanned_pages;\n unsigned long present_pages;\n\n /* 部分删除 */\n /* free areas of different sizes */\n //用于管理该zone的伙伴系统信息。伙伴系统将基于这些信息管理该zone的物理内存。\n //该数组中每个数组项用于管理一个空闲内存页块链表,同一个链表中的内存页块的大小相同,\n //并且大小为2的数组下标次方页。MAX_ORDER定义了支持的最大的内存页块大小\n struct free_area free_area[MAX_ORDER]; \n\n /* 部分删除 */ \n} ____cacheline_internodealigned_in_smp;\n```\n\n关于zone,cat /proc/zoneinfo可以获得如下信息\n\n

\n\nmin: wmark_low (水位线相关的具体看)\n\nlow: wmark_ low\n\nhigh: wmark_ high\n\nspanned: 该node的全部页,包括空洞\n\npresent:该node的实际内存页,去掉空洞\n\nmanaged:实际管理的内存,去掉了预留\n\n当我们修min_free_kbytes:zoneinfo对应的水位也同步修改了\n\n

\n\n

\n\n

\n\n### 2.2.3 free_area\n\n\n\n```\nstruct free_area {\n //用于将具有该大小的内存页块连接起来。由于内存页块表示的是连续的物理页,\n //因而对于加入到链表中的每个内存页块来说,只需要将内存页块中的第一个页加入该链表即可。\n //因此这些链表连接的是每个内存页块中第一个内存页,使用了struct page中的\n //struct list_head成员lru。free_list数组元素的每一个对应一种属性的类型,\n //可用于不同的目地,但是它们的大小和组织方式相同\n struct list_head free_list[MIGRATE_TYPES];\n \n //内存页块的数目,对于0阶的表示以1页为单位计算,\n //对于1阶的以2页为单位计算,n阶的以2的n次方为单位计算\n unsigned long nr_free; \n};\n```\n\n其中migreatetype定义如下\n\n\n\n```\nenum migratetype {\n // 在内存中有固定位置, 不能移动到其他地方。核心内核分配的大多数内存属于该类别\n MIGRATE_UNMOVABLE, \n // 可以随意地移动 属于用户空间应用程序的页属于该类别.\n // 它们是通过页表映射的,如果它们复制到新位置,\n //页表项可以相应地更新,应用程序不会注意到任何事\n MIGRATE_MOVABLE, \n //不能直接移动, 但可以删除, 其内容可以从某些源重新生成。\n //例如,映射自文件的数据属于该类别。kwapd守护进程会根据可回收页访问的频繁程度,\n //周期性释放此类内存.页面回收本身就是一个复杂的过程. \n //内核会在可回收页占据了太多内存时进行回收,在内存短缺(即分配失败)时也可以发起页面回收\n MIGRATE_RECLAIMABLE, \n //the number of types on the pcp lists \n //用来表示每CPU页框高速缓存的数据结构中的链表的迁移类型数目\n MIGRATE_PCPTYPES, \n //在罕见的情况下,内核需要分配一个高阶的页面块而不能休眠.\n //如果向具有特定可移动性的列表请求分配内存失败,\n //这种紧急情况下可从MIGRATE_HIGHATOMIC中分配内存\n MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, \n#ifdef CONFIG_CMA\n /*\n * MIGRATE_CMA migration type is designed to mimic the way\n * ZONE_MOVABLE works. Only movable pages can be allocated\n * from MIGRATE_CMA pageblocks and page allocator never\n * implicitly change migration type of MIGRATE_CMA pageblock.\n *\n * The way to use it is to change migratetype of a range of\n * pageblocks to MIGRATE_CMA which can be done by\n * __free_pageblock_cma() function. What is important though\n * is that a range of pageblocks must be aligned to\n * MAX_ORDER_NR_PAGES should biggest page be bigger then\n * a single pageblock. Linux内核最新的连续内存分配器(CMA), \n * 用于避免预留大块内存导致系统可用内存减少而实现的,即当驱动不使用内存时,\n * 将其分配给用户使用,而需要时则通过回收或者迁移的方式将内存腾出来\n */\n MIGRATE_CMA,\n#endif\n#ifdef CONFIG_MEMORY_ISOLATION\n /* can\'t allocate from here 是一个特殊的虚拟区域, 用于跨越NUMA结点移动物理内存页. \n 在大型系统上, 它有益于将物理内存页移动到接近于使用该页最频繁的CPU */\n MIGRATE_ISOLATE, \n#endif\n MIGRATE_TYPES\n};\n```\n\n在分配内存时,都会带分配参数比如GPF_KERNEL等等,那么,一次内存分配从哪个zone分配了?这里就必需把mask转换成zone,gfp_mask(下面会简单介绍)低4位用于表示分配的zone\n\n\n\n```\n#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)\n\n/* Convert GFP flags to their corresponding migrate type */\n#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) =0x18\n#define GFP_MOVABLE_SHIFT 3 //低4位决定了使用哪一种migratetype\n\n/* 该函数就是gfp转migratetype */\nstatic inline int gfp_migratetype(const gfp_t gfp_flags)\n{\n /* 部分删除 */\n/* Group based on mobility */\n(gfp_flags & 0x18) >> 3\nreturn (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;\n以GFP_KERNEL为例 值为0x400|0x800|0x40|0x80 & 0x18 =0 \n也就是说 GFP_KERNEL 就是使用的 MIGRATE_UNMOVABLE\n}\n```\n\n### 2.2.4 gfp_mask\n\nGFP是get free page的缩写, 分配掩码包括两部分,内存域修饰符(占低4位)和内存分配标志(从第5位开始),如下图所示\n\n

\n\n内存域zone的几种类型:ZONE_DMA、ZONE_DMA32、ZONE_NORMAL、ZONE_HIGHMEM、ZONE_MOVABLE。与类型不同,内存域的修饰符只有___GFP_DMA、___GFP_HIGHMEM、___GFP_DMA32、___GFP_MOVABLE 4种,没有ZONE_NORMAL对应的修饰符,因为ZONE_NORMAL是默认的内存申请类型。如下所示,为内存修饰符的定义,划款的4个为内存域修饰符\n\n

\n\n内存域修饰符与伙伴系统分配器扫描内存域的顺序的关系,如下所示:\n\n

\n\n### 2.2.5 图示\n\n

\n\n

\n\n### 2.3 伙伴系统内存释放(伙伴整合)\n\n具体分析__free_pages_ok,函数__free_pages_ok位于mm\\page_alloc.c,源码如下:\n\n\n\n```\nstatic void __free_pages_ok(struct page *page, unsigned int order,\n fpi_t fpi_flags)\n{\n unsigned long flags;\n int migratetype;\n unsigned long pfn = page_to_pfn(page); //页转为页框号\n\n //这里最终会调用clear_page(汇编)\n if (!free_pages_prepare(page, order, true)) \n return;\n\n //获取对应页框的migratetype \n //该值会在memmap_init -> memmap_init_zone_range -> memmap_init_zone 的时候 \n //初始化为 MIGRATE_MOVABLE\n migratetype = get_pfnblock_migratetype(page, pfn);\n local_irq_save(flags);\n __count_vm_events(PGFREE, 1 << order);\n \n //对应调用 __free_one_page\n free_one_page(page_zone(page), page, pfn, order, migratetype,\n fpi_flags);\n local_irq_restore(flags);\n}\n```\n\n伙伴系统维护了直接映射表,其中包含各种“级别”的内存块(包含位值)。底层表包含内存可分配的最小单元(page)的位图,它的每个上层(父级)描述下层的一对单元(一对伙伴),因此是“buddies”的概念。在每个级别上,我们保留页面列表,其中包含长度为 (1<
lock 加锁保护。\n 为了记录页面的阶数,我们使用 page_private(page)。\n */\n if (!page_is_buddy(page, buddy, order)) //确保是伙伴系统页\n goto done_merging;\n /*\n * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,\n * merge with it and move up one order.\n */\n if (page_is_guard(buddy))\n clear_page_guard(zone, buddy, order, migratetype);\n else\n //对应内存域的对应order的free页自减\n del_page_from_free_list(buddy, zone, order); \n \n // 两个伙伴页框合并假设页框地址6的0阶伙伴是 6^(1<<0) = 7 那么 6&7 = 6\n combined_pfn = buddy_pfn & pfn; \n page = page + (combined_pfn - pfn); //合并之后的页地址是当前页+页框偏移\n pfn = combined_pfn; //页框赋值为当前合并后的页框\n order++; //阶++\n }\n \n /* 删除部分代码 */\ndone_merging:\n set_buddy_order(page, order);\n\n /* 删除部分代码 */\n // 这里伙伴页合并完了之后,在对应zone的对应order的free页++\n add_to_free_list(page, zone, order, migratetype); \n /* 删除部分代码 */\n}\n```\n\n### 2.3.1 伙伴算法:\n\n假设:*_mem_map 是至少连续到 MAX_ORDER 的。为了找到伙伴分配中匹配 buddy 和被组合成的页面 page 的 struct page 结构体。\n\n任何伙伴 B1 都有一个与之成对的 O 阶伙伴 B2 ,满足以下等式:\n\nB2 = B1 ^ (1< alloc_pages_node -> __alloc_pages_node -> __alloc_pages -> __alloc_pages_nodemask 源码如下:\n\n\n\n```\nstatic inline struct page *\n__alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid)\n{\n preferred_nid -> 对应nid 如果是UMA 则 nid只能是0\n return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL);\n}\n\n/*\n * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what\n * GFP flags are used before interrupts are enabled. Once interrupts are\n * enabled, it is set to __GFP_BITS_MASK while the system is running. During\n * hibernation, it is used by PM to avoid I/O during memory allocation while\n * devices are suspended.\n */\n//gfp_allowed_mask在早期引导期间设置为GFP_BOOT_MASK,以限制在中断启用之前使用哪些GFP标志。\n//一旦启用中断,它将设置为__GFP_BITS_MASK,而系统正在运行。\n//在休眠期间,它被用于PM,以避免在设备挂起期间的内存分配期间进行I / O操作。\nextern gfp_t gfp_allowed_mask;\n\n/*\n * This is the \'heart\' of the zoned buddy allocator. \n * (这是buddy 分配器的“核心”) kmalloc跟到最后 实际使用的也是该函数\n */\nstruct page * __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, \n int preferred_nid, nodemask_t *nodemask)\n{\n struct page *page;\n unsigned int alloc_flags = ALLOC_WMARK_LOW; //快速路径的水位基准是low\n gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */\n struct alloc_context ac = { };\n\n /* 删除部分数值检查 */\n\n gfp_mask &= gfp_allowed_mask; //gfp掩码检测\n alloc_mask = gfp_mask;\n\n /* 对关键参数进行检查,如果检查通过,则选定首选内存申请的zone */\n if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, \n &ac, &alloc_mask, &alloc_flags))\n return NULL;\n\n /* 删除部分*/\n\n /* First allocation attempt 函数详细分析见下面*/\n page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);\n if (likely(page))\n goto out;\n\n /*\n * Apply scoped allocation constraints. This is mainly about GFP_NOFS\n * resp. GFP_NOIO which has to be inherited for all allocation requests\n * from a particular context which has been marked by\n * memalloc_no{fs,io}_{save,restore}.\n */\n alloc_mask = current_gfp_context(gfp_mask);\n ac.spread_dirty_pages = false;\n\n /*\n * Restore the original nodemask if it was potentially replaced with\n * &cpuset_current_mems_allowed to optimize the fast-path attempt.\n */\n ac.nodemask = nodemask;\n\n /* 详细分析见下面 */\n page = __alloc_pages_slowpath(alloc_mask, order, &ac);\n\nout:\n if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&\n unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {\n __free_pages(page, order);\n page = NULL;\n }\n\n trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);\n\n return page;\n}\n```\n\n### 2.4.2 get_page_from_freelist\n\n伙伴系统内存申请优先使用该函数,尝试获取一个指定order的内存块\n\n\n\n```\n/*\n * get_page_from_freelist goes through the zonelist trying to allocate\n * a page.\n */\nstatic struct page *\nget_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, \n const struct alloc_context *ac)\n{\n struct zoneref *z;\n struct zone *zone;\n struct pglist_data *last_pgdat_dirty_limit = NULL;\n bool no_fallback; //如果需要避免内存碎片,则 no_fallback = true\n\nretry:\n /*\n * Scan zonelist, looking for a zone with enough free.\n * See also __cpuset_node_allowed() comment in kernel/cpuset.c.\n */\n no_fallback = alloc_flags & ALLOC_NOFRAGMENT; //是否需要避免内存碎片\nz = ac->preferred_zoneref;\n //开始遍历 zonelist,查找可以满足本次内存分配的物理内存区域 zone\n for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,\n ac->nodemask) {\n struct page *page; //指向分配成功之后的内存\n unsigned long mark; //内存分配过程中设定的水位线\n\n /* 删除部分代码,zone节点查找和各种校验 */\n\n // 获取本次内存分配需要考虑到的内存水位线,\n // 快速路径下是 WMARK_LOW, 慢速路径下是 WMARK_MIN\n mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);\n /* 内存是否有剩余,水位线判断,后面进行分析 */\n if (!zone_watermark_fast(zone, order, mark,\n ac->highest_zoneidx, alloc_flags,\n gfp_mask)) {\n int ret;\n\n /* Checked here to keep the fast path fast */\n BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);\n /* 如果是可以不校验水位线的,就选择这个zone进行page申请 */\n if (alloc_flags & ALLOC_NO_WATERMARKS)\n goto try_this_zone; \n /* 删除部分代码 */\n continue; /* 否则在找其他zone */\n }\n\ntry_this_zone:\n //这里就是伙伴系统的入口,rmqueue 函数中封装的就是伙伴系统的核心逻辑\n page = rmqueue(ac->preferred_zoneref->zone, zone, order,\n gfp_mask, alloc_flags, ac->migratetype);\n if (page) {\n /* 伙伴系统找到了一个page,对这个page执行一些其他操作,\n 比如kasan标记,poison该page,如果开启了CONFIG_PAGE_OWNER 还可以设置owner */\n prep_new_page(page, order, gfp_mask, alloc_flags);\n\n /*\n * If this is a high-order atomic allocation then check\n * if the pageblock should be reserved for the future\n */\n if (unlikely(order && (alloc_flags & ALLOC_HARDER)))\n reserve_highatomic_pageblock(page, zone, order);\n\n return page;\n } /* else删除 */\n }\n\n /* 删除 */\n\n return NULL;\n}\n```\n\n虽然 get_page_from_freelist 函数的代码比较冗长,但是其核心逻辑比较简单,主干框架就是通过 for_next_zone_zonelist_nodemask 来遍历当前 NUMA 节点以及备用节点的所有内存区域(zonelist),然后逐个通过 zone_watermark_fast 检查这些内存区域 zone 中的剩余空闲内存容量是否在指定的水位线 mark 之上。如果满足水位线的要求则直接调用 rmqueue 进入伙伴系统分配内存,分配成功之后通过 prep_new_page 初始化分配好的内存页 page。\n\n如果当前正在遍历的 zone 中剩余空闲内存容量在指定的水位线 mark 之下,就需要通过 node_reclaim 触发内存回收,随后通过 zone_watermark_ok 检查经过内存回收之后,内核是否回收到了足够的内存以满足本次内存分配的需要。如果内存回收到了足够的内存则 zone_watermark_ok = true 随后跳转到 try_this_zone 分支在本内存区域 zone 中分配内存。否则继续遍历下一个 zone。\n\n### 2.4.2.1 水位线检查\n\n\n\n```\nstatic inline bool zone_watermark_fast(struct zone *z, unsigned int order,\n unsigned long mark, int highest_zoneidx,\n unsigned int alloc_flags, gfp_t gfp_mask)\n{\n long free_pages;\n // 获取当前内存区域中所有空闲的物理内存页\n free_pages = zone_page_state(z, NR_FREE_PAGES);\n\n // 快速检查分配阶 order = 0 情况下相关水位线,\n // 空闲内存需要刨除掉为 highatomic 预留的紧急内存\n if (!order) {\n long fast_free;\n \n /* 可供本次内存分配使用的符合要求的真实可用内存,\n 初始为 free_pages,free_pages 为空闲内存页的全集其中\n 也包括了不能为本次内存分配提供内存的空闲内存 */\n fast_free = free_pages;\n \n //计算真正可供内存分配的空闲页数量:空闲内存页全集 - 不能使用的空闲页\n fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);\n /* 如果可用的空闲内存页数量大于内存水位线与预留内存之和\n 那么表示物理内存区域中的可用空闲内存能够满足本次内存分配的需要 */\n if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])\n return true;\n }\n // 近一步检查内存区域伙伴系统中是否有足够的 order 阶的内存块可供分配\n if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, free_pages))\n return true;\n /* 部分删除 */\n\n return false;\n}\n```\n\n如果本次内存分配申请的是高阶内存块(order > 0),则会进入 __zone_watermark_ok 函数中,近一步判断伙伴系统中是否有足够的高阶内存块能够满足 order 阶的内存分配:\n\n\n\n```\nbool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,\n int highest_zoneidx, unsigned int alloc_flags,long free_pages)\n{\n // 保证内存分配顺利进行的最低水位线\n long min = mark;\n int o;\n const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));\n\n /* free_pages may go negative - that\'s OK */\n // 获取真正可用的剩余空闲内存页数量,需要把CMA(若非CMA内存申请)和HIGH预留的减掉\n free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);\n\n // 如果设置了 ALLOC_HIGH 则水位线降低二分之一,使内存分配更加激进一些\n if (alloc_flags & ALLOC_HIGH)\n min -= min / 2;\n\n if (unlikely(alloc_harder)) {\n // 在要进行 OOM 的情况下内存分配会比普通的 ALLOC_HARDER 策略更加激进一些,\n // 所以这里水位线会降低二分之一\n if (alloc_flags & ALLOC_OOM)\n min -= min / 2;\n else\n min -= min / 4;\n }\n\n // 检查当前可用剩余内存是否在指定水位线之上。\n // 内存的分配必须保证可用剩余内存容量在指定水位线之上,否则不能进行内存分配\n if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])\n return false;\n\n // 流程走到这里,对应内存分配阶 order = 0 的情况下就已经 OK 了,\n // 剩余空闲内存在水位线之上,那么肯定能够分配一页出来\n if (!order)\n return true;\n\n // 但是对于 high-order 的内存分配,这里还需要近一步检查伙伴系统,\n // 根据伙伴系统内存分配的原理,这里需要检查高阶 free_list \n // 中是否有足够的空闲内存块可供分配\n for (o = order; o < MAX_ORDER; o++) {\n // 从当前分配阶 order 对应的 free_area 中检查是否有足够的内存块\n struct free_area *area = &z->free_area[o];\n int mt;\n \n // 如果当前 free_area 中的 nr_free = 0 表示对应 free_list \n // 中没有合适的空闲内存块,那么继续到高阶 free_area 中查找\n if (!area->nr_free)\n continue;\n // 检查 free_area 中UME 3种迁移类型 free_list 是否有足够的内存块\n for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {\n if (!free_area_empty(area, mt)) \n return true; //非空,代表有足够的内存块\n }\n\n /* 删除部分 */\n\n // 如果设置了 ALLOC_HARDER,则表示可以从 HIGHATOMIC \n // 区中的紧急预留内存中分配,检查对应 free_list\n if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))\n return true;\n }\n return false;\n}\n```\n\n当内存分配策略 alloc_flags 设置了 ALLOC_HARDER 时,水位线的要求会降低原来的四分之一,相当于放宽了内存分配的限制。比原来更加努力使内存分配成功。\n\n当内存分配策略 alloc_flags 设置了 ALLOC_HIGH 时,水位线的要求会降低原来的二分之一,相当于更近一步放宽了内存分配的限制。比原来更加激进些\n\n### 2.4.2.2 rmqueue\n\n快速分配:通过__rmqueue_smallest函数在指定的migratetype类型链表上进行扫描分配内存,成功则返回page\n\n慢速分配:如果(1)分配失败,就调用__rmqueue_fallback尝试在其他类型的链表进行分配(注意:若指定分配类型是MIGRATE_MOVABLE,当进入慢分配流程时要先调用__rmqueue_smallest在MIGRATE_CMA类型列表上进行分配,成功返回;若再失败最后才调用__rmqueue_fallback函数进行分配).\n\n\n\n```\n/*\n * Allocate a page from the given zone. Use pcplists for order-0 allocations.\n */\nstatic inline\nstruct page *rmqueue(struct zone *preferred_zone,\n struct zone *zone, unsigned int order,\n gfp_t gfp_flags, unsigned int alloc_flags,\n int migratetype)\n{\n unsigned long flags;\n struct page *page;\n\n /* 如果order == 0 意味着只申请一个page,那么优先从pcp中申请该page */\n if (likely(order == 0)) {\n /*\n * MIGRATE_MOVABLE pcplist could have the pages on CMA area and\n * we need to skip it when CMA area isn\'t allowed.\n */\n if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||\n migratetype != MIGRATE_MOVABLE) {\n /* 在pcp中申请不详细研究,本文不分析pcp */\n page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,\n migratetype, alloc_flags);\n goto out;\n }\n }\n\n /*\n * We most definitely don\'t want callers attempting to\n * allocate greater than order-1 page units with __GFP_NOFAIL.\n */\n WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));\n spin_lock_irqsave(&zone->lock, flags);\n\n do {\n page = NULL;\n /*\n * order-0 request can reach here when the pcplist is skipped\n * due to non-CMA allocation context. HIGHATOMIC area is\n * reserved for high-order atomic allocation, so order-0\n * request should skip it.\n */\n if (order > 0 && alloc_flags & ALLOC_HARDER) {\n /* 优先使用MIGRATE_HIGHATOMIC(实际=PCP)去申请看看 */\n page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);\n if (page)\n trace_mm_page_alloc_zone_locked(page, order, migratetype);\n }\n if (!page)\n page = __rmqueue(zone, order, migratetype, alloc_flags);\n /* 如果PCP的没申请到,使用正常的migratetype,\n 该函数最后也是调用的__rmqueue_smallest \n 只不过在该函数中还涉及更多的申请路线*/\n } while (page && check_new_pages(page, order));\n /* 这里对找到的page flag进行检查(page->flags & __PG_HWPOISON) */\n spin_unlock(&zone->lock);\n if (!page)\n goto failed;\n\n /* 删除部分代码 */\nout:\n /* Separate test+clear to avoid unnecessary atomics */\n /* 是否需要唤醒kswapd进行内存回收 函数__rmqueue的子函数会触发置位。\n 可以看出来,在快速路径会优先申请内存,\n 然后才根据情况唤醒kswapd执行内存回收 */\n if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {\n clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);\n wakeup_kswapd(zone, 0, 0, zone_idx(zone));\n }\n\n VM_BUG_ON_PAGE(page && bad_range(zone, page), page);\n return page;\n\nfailed:\n local_irq_restore(flags);\n return NULL;\n}\n\n/*\n * Do the hard work of removing an element from the buddy allocator.\n * Call me with the zone->lock already held.\n */\nstatic __always_inline struct page *\n__rmqueue(struct zone *zone, unsigned int order, int migratetype,\n unsigned int alloc_flags)\n{\n struct page *page;\n\n /* 删除部分 CMA相关的 */\nretry:\n /*\n *上面分配失败,那么就调用__rmqueue_fallback尝试从\n 其他类型的链表分配(MIGRATE_MOVABLE例外):\n \n *(1)若指定的迁移类型是MIGRATE_MOVABLE,先调用 __rmqueue_smallest\n 直接快速在MIGRATE_CMA类型列表中去查找,成功直接返回,\n 失败则调用__rmqueue_fallback在慢速的进行内存块分配\n \n *(2)根据static int fallbacks[MIRGRATE_TYPES][MIGRATE_TYPES-1] \n 这个fallback(退路)数据来顺序查找\n */\n page = __rmqueue_smallest(zone, order, migratetype);\n if (unlikely(!page)) {\n if (alloc_flags & ALLOC_CMA)\n page = __rmqueue_cma_fallback(zone, order);\n\n if (!page && __rmqueue_fallback(zone, order, migratetype,\n alloc_flags))\n goto retry;\n }\nout:\n if (page)\n trace_mm_page_alloc_zone_locked(page, order, migratetype);\n return page;\n}\n```\n\n### **__rmqueue_smallest** \n\n\n\n```\n/*\n * 遍历指定迁移类型的伙伴系统链表,从链表中移动最小数量的页面返回给调用者.\n * 这是伙伴系统的快速处理流程.\n *@zone: 在该管理区的伙伴系统中分配页面\n *@order: 要分配的页面数量阶.\n *@migratetype: 在该迁移类型的链表中获取页面\n */\nstatic __always_inline\nstruct page *__rmqueue_smallest(struct zone *zone, unsigned int order,\n int migratetype)\n{\n unsigned int current_order;\n struct free_area *area;\n struct page *page;\n\n /* Find a page of the appropriate size in the preferred list */\n //从指定的阶到最大阶进行遍历,直到找到一个可以分配的链表\n for (current_order = order; current_order < MAX_ORDER; ++current_order) {\n //找到该阶对应的空闲页面链表\n area = &(zone->free_area[current_order]);\n // 搜索该阶的空闲链中是否有指定迁移类型的空闲页块,\n // 没有就搜索下一阶链表(page是链表的第一个元素)\n page = get_page_from_free_area(area, migratetype);\n if (!page)\n continue;\n /* 如果找到了,就从当前order的链表中删除一个元素 */\n del_page_from_free_list(page, zone, current_order);\n \n /* 如果cur_order已经大于最开始传入的order,\n 就是说明已经有一个更大的伙伴被拆分开了,\n 那么就在后续层级的所有order中增加一个free值 */\n expand(zone, page, order, current_order, migratetype);\n set_pcppage_migratetype(page, migratetype);\n return page;\n }\n\n return NULL;\n}\n```\n\n### __rmqueue_fallback\n\n尝试从fallbacks备选迁移列表中搜索出一块大小为2^order个页的连续空闲页块,也就是说按照备选方案,U(UNMOVABLE) M(MOVABLE) E(RECLAIMABLE)之间是可以相互转换的\n\n\n\n```\n/*\n * This array describes the order lists are fallen back to when\n * the free lists for the desirable migrate type are depleted\n */\nstatic int fallbacks[MIGRATE_TYPES][3] = {\n //UNMOVEABLE的次选为可回收的和MOVABLE\n [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },\n \n //MOVABLE的次选为可回收的和UNMOVEABLE\n [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },\n [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },\n#ifdef CONFIG_CMA\n [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */\n#endif\n#ifdef CONFIG_MEMORY_ISOLATION\n [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */\n#endif\n};\n\nstatic __always_inline bool\n__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,\n unsigned int alloc_flags)\n{\n struct free_area *area;\n int current_order;\n int min_order = order;\n struct page *page;\n int fallback_mt;\n bool can_steal;\n\n /*\n * Do not steal pages from freelists belonging to other pageblocks\n * i.e. orders < pageblock_order. If there are no local zones free,\n * the zonelists will be reiterated without ALLOC_NOFRAGMENT.\n */\n if (alloc_flags & ALLOC_NOFRAGMENT)\n min_order = pageblock_order;\n\n /*\n *从最高阶搜索,这样可以尽量的将其他迁移列表中的大块分割,避免形成过多的碎片\n */\n for (current_order = MAX_ORDER - 1; current_order >= min_order;\n --current_order) {\n area = &(zone->free_area[current_order]);\n /*\n *在area内存区域中,遍历start_migratetype对应的备用数组,\n 看是否能在备选迁移类型的列表中找到一块满足要求的内存块\n (阶大于等于current_order小于等于Max_ORDER-1)\n \n *(1)函数返回-1表示未找到满足要求的内存块\n \n *(2)*can_steal为True表示需要先把该函数找到的后补空闲内存块\n 先迁移到指定的迁移类型列表上去(避免碎片化)\n */\n fallback_mt = find_suitable_fallback(area, current_order,\n start_migratetype, false, &can_steal);\n if (fallback_mt == -1)\n continue;\n\n /* 我们无法从页面块中窃取所有可用页面,并且请求的migrateype是可移动的。\n 在这种情况下,最好窃取并拆分最小的可用页面,而不是最大的可用页面。\n 因为即使下一个可移动分配落回与此不同的页面块,也不会导致永久碎片。*/\n if (!can_steal && start_migratetype == MIGRATE_MOVABLE\n && current_order > order)\n goto find_smallest;\n\n goto do_steal;\n }\n\n return false;\n\nfind_smallest:\n /* 使用当前小阶位在尝试找一下有没有可以作为退路的迁移类型 */\n for (current_order = order; current_order < MAX_ORDER;\n current_order++) {\n area = &(zone->free_area[current_order]);\n fallback_mt = find_suitable_fallback(area, current_order,\n start_migratetype, false, &can_steal);\n if (fallback_mt != -1)\n break;\n }\n\n /*\n * This should not happen - we already found a suitable fallback\n * when looking for the largest page.\n */\n VM_BUG_ON(current_order == MAX_ORDER);\n\ndo_steal:\n page = get_page_from_free_area(area, fallback_mt);\n\n steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,\n can_steal);\n\n trace_mm_page_alloc_extfrag(page, order, current_order,\n start_migratetype, fallback_mt);\n\n return true;\n\n}\n\n/*\n 此功能实现实际的”盗窃”行为。如果订单足够大,我们可以偷走整个页面块。\n 如果没有,我们首先将该页面块中的空闲页面移动到我们的migrateype,\n 并确定在具有兼容migrateype的页面块中有多少已经分配的页面。\n 如果至少有一半的页面是free的或compatible(兼容)的,\n 我们可以更改页面块本身的migrateype,这样将来释放的页面将被放在正确的free_list中。 */\nstatic void steal_suitable_fallback(struct zone *zone, struct page *page, \n unsigned int alloc_flags, int start_type, bool whole_block)\n{\n unsigned int current_order = buddy_order(page);\n int free_pages, movable_pages, alike_pages;\n int old_block_type;\n\n // 记录该page块当前的migrate type\n old_block_type = get_pageblock_migratetype(page);\n\n /*\n * This can happen due to races and we want to prevent broken\n * highatomic accounting.\n */\n if (is_migrate_highatomic(old_block_type))\n goto single_page;\n\n /* Take ownership for orders >= pageblock_order */\n //当要迁移的页被内核认定为大页,内核会将将超出的部分的迁移类型设为指定迁移类型\n if (current_order >= pageblock_order) {\n change_pageblock_range(page, current_order, start_type);\n goto single_page;\n }\n\n /*\n * 提高水印以增加回收压力,从而降低未来出现倒退的可能性。\n * 现在唤醒kswapd,因为节点可能会整体平衡,kswapd不会自然唤醒。 */\n if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))\n set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);\n\n /* We are not allowed to try stealing from the whole block */\n // 也就是传入的can_steal如果是1,就把整块都拿走(这样就避免了碎片化)\n if (!whole_block)\n goto single_page;\n //按照新的迁移类型做页块迁移\n free_pages = move_freepages_block(zone, page, start_type,\n &movable_pages);\n /*\n * Determine how many pages are compatible with our allocation.\n * For movable allocation, it\'s the number of movable pages which\n * we just obtained. For other types it\'s a bit more tricky.\n */\n if (start_type == MIGRATE_MOVABLE) {\n alike_pages = movable_pages;\n } else {\n /*\n * If we are falling back a RECLAIMABLE or UNMOVABLE allocation\n * to MOVABLE pageblock, consider all non-movable pages as\n * compatible. If it\'s UNMOVABLE falling back to RECLAIMABLE or\n * vice versa, be conservative since we can\'t distinguish the\n * exact migratetype of non-movable pages.\n */\n if (old_block_type == MIGRATE_MOVABLE)\n alike_pages = pageblock_nr_pages\n - (free_pages + movable_pages);\n else\n alike_pages = 0;\n }\n\n /* moving whole block can fail due to zone boundary conditions */\n if (!free_pages)\n goto single_page;\n\n /*\n * If a sufficient number of pages in the block are either free or of\n * comparable migratability as our allocation, claim the whole block.\n */\n if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||\n page_group_by_mobility_disabled)\n //对整块的page设置迁移类型\n set_pageblock_migratetype(page, start_type);\n\n return;\n\nsingle_page:\n //将“偷”过来的对应order的页加入到对应的迁移类型中\n move_to_free_list(page, zone, current_order, start_type);\n}\n```\n\n我们可以通过 cat /proc/pagetypeinfo 命令可以查看当前各个内存区域中的伙伴系统中不同页面迁移类型以及不同 order 尺寸的内存块个数\n\n
\n\n从这里也可以看出一个设备初始状态的内存大部分都是Movable,当某种迁移类型内存不足时,都是以内存块的方式,从Movable中“偷”出内存\n\n### 2.4.3 __alloc_pages_slowpath\n\n__alloc_pages_slowpath()是用于慢速页面分配,允许等待,内存压缩和内存回收等.需要注意的是慢速分配仍然要调用到get_page_from_freelist函数来进行内存的获取,慢速分配的大致过程概况如下:\n\n
\n\n\n\n```\nstatic inline struct page *\n__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,\n struct alloc_context *ac)\n{\n bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;\n const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;\n struct page *page = NULL;\n unsigned int alloc_flags;\n unsigned long did_some_progress;\n enum compact_priority compact_priority;\n enum compact_result compact_result;\n int compaction_retries;\n int no_progress_loops;\n unsigned int cpuset_mems_cookie;\n int reserve_flags;\n\n /*\n * We also sanity check to catch abuse of atomic reserves being used by\n * callers that are not in atomic context.\n */\n if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==\n (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))\n gfp_mask &= ~__GFP_ATOMIC;\n\nretry_cpuset:\n compaction_retries = 0;\n no_progress_loops = 0;\n compact_priority = DEF_COMPACT_PRIORITY;\n cpuset_mems_cookie = read_mems_allowed_begin();\n\n /*\n * 快速路径下,内存申请是延后执行kswapd的,但是在慢速路径,\n * 会先判断alloc_flag 是否需要执行内存回收,然后后面执行回收再申请page,\n * 该函数中会对flag置位ALLOC_WMARK_MIN标志(保守内存分配转向为激进内存分配)\n */\n alloc_flags = gfp_to_alloc_flags(gfp_mask); \n\n /*\n 重置nodemask和zonelist,因为可能在fast path中对值进行了更新\n */\n ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,\n ac->highest_zoneidx, ac->nodemask);\n if (!ac->preferred_zoneref->zone)\n goto nopage;\n \n /* 基本内核里申请内存的标志都会有内存回收。\n ALLOC_KSWAPD 的值和 __GFP_KSWAPD_RECLAIM相等 */\n if (alloc_flags & ALLOC_KSWAPD) \n wake_all_kswapds(order, gfp_mask, ac); /* 唤醒内核线程kswapd,后面分析 */\n\n /*\n 调整后重新利用get_page_from_freelist在重新进行内存分配\n */\n page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);\n if (page)\n goto got_pg;\n\n /*\n *前面没有分配到内存可能由于内存碎片的缘故,\n 调用函数__alloc_pages_direct_compact,尝试内存规整操作,\n 进行页的迁移,然后再尝试分配执行该操作需要同时满足如下条件:\n \n *(1)分配请求允许直接回收(gfp_mask & __GFP_DIRECT_RECLAI为真)\n \n *(2)内存分配的阶要大于3(PAGE_ALLOC_COSTLY_ORDER):\n 因为低阶内存块受内存碎片化影响较小,内存规整不能解决问题。\n 或者order>0,移动类型是不可移动的\n \n *(3)本次内存分配不能是无水线限制的内存分配,\n 函数gfp_pfmemalloc_allowed(gfp_mask)返回false\n */ \n if (can_direct_reclaim &&\n (costly_order ||\n (order > 0 && ac->migratetype != MIGRATE_MOVABLE))\n && !gfp_pfmemalloc_allowed(gfp_mask)) {\n /* 这里有一套很复杂的页移动,压缩机制,先不具体分析 */\n page = __alloc_pages_direct_compact(gfp_mask, order,\n alloc_flags, ac,\n INIT_COMPACT_PRIORITY,\n &compact_result);\n if (page)\n goto got_pg;\n\n if (costly_order && (gfp_mask & __GFP_NORETRY)) {\n if (compact_result == COMPACT_SKIPPED ||\n compact_result == COMPACT_DEFERRED)\n goto nopage;\n compact_priority = INIT_COMPACT_PRIORITY;\n }\n }\n\nretry:\n /* Ensure kswapd doesn\'t accidentally go to sleep as long as we loop \n 确保交换线程没有意外睡去 */\n if (alloc_flags & ALLOC_KSWAPD)\n wake_all_kswapds(order, gfp_mask, ac);\n\n /* 对gfp_mask进行分析看是否可以不受水线限制进行内存分配 */\n reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);\n if (reserve_flags)\n alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);\n\n /*\n * 如果可以忽略内存策略,则重置nodemask和zonelist。\n * 这些分配是高优先级的,针对系统而不是针对用户。\n */\n if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {\n ac->nodemask = NULL;\n ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,\n ac->highest_zoneidx, ac->nodemask);\n }\n\n /* 尝试使用可能已调整的zonelist和alloc_flags 在进行快速页申请 */\n page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);\n if (page)\n goto got_pg;\n\n /* 如果调用者不愿意回收,我们无法平衡任何东西。那到这里就没办法申请了 */\n if (!can_direct_reclaim)\n goto nopage;\n\n /* Avoid recursion of direct reclaim */\n /* 如果当前进程不能使用紧急内存,内存回收很可能会失败,容易造成递归调用 */\n if (current->flags & PF_MEMALLOC)\n goto nopage;\n\n /* 尝试直接回收,然后分配,主要是执行内存回收(先不具体分析),\n 然后执行get_page_from_freelist */\n page = __alloc_pages_direct_reclaim(gfp_mask, order, \n alloc_flags, ac, &did_some_progress);\n if (page)\n goto got_pg;\n\n /* 尝试直接压缩(物理内存页的再排序,以合并多个空闲页成为更大的连续块),然后分配内存 */\n page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, \n compact_priority, &compact_result);\n if (page)\n goto got_pg;\n\n /* Do not loop if specifically requested */\n if (gfp_mask & __GFP_NORETRY)\n goto nopage;\n\n /* 除非gfp_mask设置了__GFP_REPEAT标志,否则退出高阶的空闲内存的循环申请(costly)*/\n if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))\n goto nopage;\n\n /* 检查回收重试是否继续进行。当我们连续 MAX_RECLAIM_RETRIES \n 次尝试回收仍然无法回收到页面,或者即使回收LRU列表上其余所有页面仍然无法满足水位线要求时,\n 我们会放弃。如果重试是可行的,返回true,否则返回false以进入OOM路径(内存用尽) */\n if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,\n did_some_progress > 0, &no_progress_loops))\n goto retry;\n\n /* 如果零阶回收无法取得任何进展,重试压缩是没有意义的,\n 因为当前的压缩实现取决于足够的空闲内存(请参见__compaction_suitable的实现)*/\n if (did_some_progress > 0 &&\n should_compact_retry(ac, order, alloc_flags,\n compact_result, &compact_priority,\n &compaction_retries))\n goto retry;\n\n /* 在我们开始进行OOM杀进程之前,处理可能存在的cpuset更新竞争情况。 */\n if (check_retry_cpuset(cpuset_mems_cookie, ac))\n goto retry_cpuset;\n\n /* 内存压缩和回收都不行了,开始oom */\n page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);\n if (page)\n goto got_pg;\n\n /* 避免在没有任何水位的情况下进行无止境的分配,\n 意思是如果已经oom过了还没获取到有效大小的page,那就不搞了 */\n if (tsk_is_oom_victim(current) &&\n (alloc_flags & ALLOC_OOM ||\n (gfp_mask & __GFP_NOMEMALLOC)))\n goto nopage;\n\n /* 当直接从oom获取到的页面分配内存失败,且oom已经杀死了一些优先级低的进程,\n 此时将no_progress_loops赋值为0,并跳转到retry再一次进行内存分配操作 */\n if (did_some_progress) {\n no_progress_loops = 0;\n goto retry;\n }\n\nnopage:\n /* Deal with possible cpuset update races before we fail */\n if (check_retry_cpuset(cpuset_mems_cookie, ac))\n goto retry_cpuset;\n\n /* 删除部分 */\nfail:\n warn_alloc(gfp_mask, ac->nodemask,\n \"page allocation failure: order:%u\", order);\ngot_pg:\n return page;\n}\n```\n\n### 2.4.4 GFP的几种标志翻译(gpt)\n\n### 2.4.4.1 关于回收相关的标志\n\n\n\n```\n#define __GFP_IO ((__force gfp_t)___GFP_IO)\n#define __GFP_FS ((__force gfp_t)___GFP_FS)\n#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */\n#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */\n#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))\n#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL)\n#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)\n#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)\n```\n\n### __GFP_IO\n\n%__GFP_IO标志表示在分配内存时可能会启动物理I/O操作,导致页面换入/换出。在使用%__GFP_IO标志进行内存分配时,可能会发生阻塞操作,因为内部算法可以通过调用页回写守护程序或者启动数据交换操作(swapping)来执行I/O操作。\n\n### __GFP_FS\n\n%__GFP_FS标志表示分配的内存可能会用于文件系统(FS)层,可以调用到底层文件系统操作。使用%__GFP_FS标志来进行内存分配时,可能会出现阻塞的情况。如果我们需要对于文件系统进行操作,则需要使用%__GFP_FS标志。但是,因为这会导致内存分配器递归进入文件系统,而文件系统可能已经持有锁,因此在某些情况下,需要清除该标志,以避免此类递归调用。\n\n### __GFP_DIRECT_RECLAIM\n\n%__GFP_DIRECT_RECLAIM标志表示调用者可能会进入直接回收(direct reclaim)的状态。进行内存分配时使用该标志,可能会导致阻塞,因为这会触发回收操作以释放更多的空闲内存。但是,如果我们已经有了备选项,则可以清除此标志,以避免不必要的延迟。这意味着如果可以从另一个位置获取可用的内存,则不需要进行回收操作。\n\n### __GFP_KSWAPD_RECLAIM\n\n%__GFP_KSWAPD_RECLAIM标志表示当内存低水位标记(low watermark)被触及时,调用者希望唤醒kswapd并进行内存回收,直到高水位标记(high watermark)被触及为止。当需要进行内存回收操作时,使用该标志进行内存分配可能会导致系统阻塞。如果有备选项,则调用者可能希望清除此标志,以避免系统中断事件的发生。其中一个常见的例子是THP(Transparent Huge Pages)的分配,其中回退选项很便宜,但回收/整理会导致系统发生间接停顿。\n\n### __GFP_RECLAIM\n\n%__GFP_RECLAIM标志是指同时允许/禁止直接回收(direct reclaim)和kswapd回收(kswapd reclaim),是这两个标志的简写。在进行内存分配时,使用它可以同时控制两个标志,有效地管理内存回收的行为。具体来说,如果我们想要在内存紧缩时通过kswapd回收内存并使用备选方案来避免直接回收,则可以使用该标志。\n\n### __GFP_NORETRY\n\n%__GFP_NORETRY:分配器将仅尝试轻量级内存直接回收,以在内存有压力的情况下获得一些内存(因此可能会休眠)。但是它将避免像OOM killer这样的不必要行为。在重度内存压力下,调用者必须处理失败的情况,这种情况很有可能发生。当失败可以很容易地以小成本进行处理时,比如降低吞吐量时,可以使用该标志。\n\n### __GFP_RETRY_MAYFAIL\n\n%__GFP_RETRY_MAYFAIL: 分配器将在先前失败的内存回收过程中重试,如果有一定迹象表明在其他地方已经取得了进展,则等待其他任务尝试释放内存的高级方法,例如收缩(移除碎片化)和页面回收。仍然存在确定性的重试次数限制,但是与%__GFP_NORETRY相比,限制更大。带有此标志的分配可能会失败,但只有可用内存是极少时才会失败。尽管这些分配不会直接触发OOM killer,但它们的失败表明系统很快可能需要使用OOM killer。调用者必须处理失败,但可以通过失败一个更高级别的请求或以更低效的方式完成来合理地处理失败。如果分配失败,并且调用者具备释放一些非必要内存的能力,那么这样做可能有利于整个系统。\n\n### __GFP_NOFAIL\n\n%__GFP_NOFAIL表示分配器必须无限重试:调用程序无法处理分配失败。分配可能会无限期阻塞,永远不会返回失败状态。测试失败是毫无意义的。新用户应该经过仔细评估(并且只有在没有合理失败策略的情况下才使用该标志),但绝对比使用开放式无限循环分配器来说更可取。强烈不建议将此标志用于昂贵的分配。\n\n### 2.4.4.2 一些常用的标志\n\n\n\n```\n#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)\n#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)\n#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)\n#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)\n```\n\n### GFP_ATOMIC\n\n%GFP_ATOMIC用户不能睡眠,并需要分配成功。应用了较低的内存水印,以允许访问“原子保留”(atomic reserve)。但是当前实现不支持NMI和其他严格的不可抢占上下文(例如raw_spin_lock)。这同样适用于%GFP_NOWAIT。\n\n### GFP_KERNEL\n\n内核级别的普通分配,只在进程的内核部分中使用,调用者要求%ZONE_NORMAL或更低的区域进行直接访问,可以进行直接回收(direct reclaim)\n\n### GFP_NOWAIT\n\n%GFP_NOWAIT用于内核分配,不会因为直接回收(direct reclaim)而阻塞。即所分配的内存不会让系统进程睡眠和等待。\n\n### GFP_USER\n\n%GFP_USER用于用户空间的分配,并需要内核或硬件直接访问,典型的是文件系统中超级块的申请。\n\n### 2.4.5 图示\n\n
\n\n
\n\n
\n\n### 2.5 内存申请释放图示\n\n### 2.5.1 伙伴系统内存申请\n\n我们假设当前伙伴系统中只有 order = 3 的空闲链表 free_area[3],其余剩下的分配阶 order 对应的空闲链表中均是空的。 free_area[3] 中仅有一个空闲的内存块,其中包含了连续的 8 个 page。\n\n现在我们向伙伴系统申请一个 page 大小的内存(对应的分配阶 order = 0),那么内核会在伙伴系统中首先查看 order = 0 对应的空闲链表 free_area[0] 中是否有空闲内存块可供分配。\n\n随后内核会根据前边解析的内存分配逻辑,继续升级到 free_area[1] , free_area[2] 链表中寻找空闲内存块,直到查找到 free_area[3] 发现有一个可供分配的内存块。这个内存块中包含了 8 个 连续的空闲 page,但是我们只要一个 page 就够了,那该怎么办呢?\n\n于是内核先将 free_area[3] 中的这个空闲内存块从链表中摘下,然后减半分裂成两个内存块,分裂出来的这两个内存块分别包含 4 个 page(分配阶 order = 2)。\n\n
\n\n随后内核会将分裂出的后半部分(图中绿色部分,order = 2),插入到 free_rea[2] 链表中。\n\n
\n\n
\n\n前半部分(图中黄色部分,order = 2)继续减半分裂,分裂出来的这两个内存块分别包含 2 个 page(分配阶 order = 1)。如下图中第 4 步所示,前半部分为黄色,后半部分为紫色。同理按照前边的分裂逻辑,内核会将后半部分内存块(紫色部分,分配阶 order = 1)插入到 free_area[1] 链表中。\n\n
\n\n前半部分(图中黄色部分,order = 1)在上图中的第 6 步继续减半分裂,分裂出来的这两个内存块分别包含 1 个 page(分配阶 order = 0),前半部分为青色,后半部分为黄色。\n\n后半部分插入到 frea_area[0] 链表中,前半部分返回给进程,这时内存分配成功,流程结束\n\n### 2.5.2 伙伴系统内存释放\n\n伙伴系统中的内存回收刚好和内存分配的过程相反,核心则是从低阶 free_list 中寻找释放内存块的伙伴,如果没有伙伴则将要释放的内存块插入到对应分配阶 order 的 free_list中。如果存在伙伴,则将释放内存块与它的伙伴合并,作为一个新的内存块继续到更高阶的 free_list 中循环重复上述过程,直到不能合并为止。\n\n下面是物理内存页在物理内存上的真实视图(page10将要被释放)\n\n
\n\n
\n\n假设当前伙伴系统的状态如上图所示,现在我们需要向伙伴系统释放一个内存页(order = 0),编号为10。\n\n由于我们要释放的内存块只包含了一个物理内存页 page10,所以它的分配阶 order = 0,首先内核需要在伙伴系统 free_area[0] 中查找与 page10 大小相等并且连续的内存块(伙伴)。\n\n而page11 是 page10 (10^(1<0) = 11)的伙伴,于是将 page11 从 free_area[0] 上摘下并与 page10 合并组成一个新的内存块(分配阶 order = 1)。随后内核会在 free_area[1] 中查找新内存块的伙伴:\n\n
\n\n现在 free_area[1] 中 page8 和 page9 组成的内存块与 page10 和 page11 组成的内存块是伙伴(10^(1<1) = 8),于是继续将这两个内存块(分配阶 order = 1)继续合并成一个新的内存块(分配阶 order = 2)。随后内核会在 free_area[2] 中查找新内存块的伙伴:\n\n
\n\n现在 free_area[2] 中 page12,page13,page14,page15 组成的内存块与 page8,page9,page10,page11 组成的新内存块是伙伴(8^(1<2)=12),于是将它们从 free_area[2] 上摘下继续合并成一个新的内存块(分配阶 order = 3),随后内核会在 free_area[3] 中查找新内存块的伙伴:\n\n
\n\nfree_area[3] 中的内存块(page20 到 page 27)与新合并的内存块(page8 到 page15)虽然大小相同但是物理上并不连续,所以它们不是伙伴,不能在继续向上合并了。于是内核将 page8 到 pag15 组成的内存块(分配阶 order = 3)插入到 free_area[3] 中,至此内存释放过程结束。\n\n
\n\n## 3. 内存回收\n\n### 3.1 内存回收的目标\n\n对于内核并不是所有的物理内存都可以参与回收,比如内核的代码段,如果被内核回收了,系统就无法正常运行了,所以一般内核代码段、数据段、内核申请的内存、内核线程占用的内存等都是不可以回收的,除此之外的内存都可以是我们要回收的目标。\n\n内核空间是所有进程公用的,内核中使用的页通常是伴随整个系统运行周期的,频繁的页换入和换出是非常影响性能的,所以内核中的页基本上不能回收,不是技术上实现不了而是这样做得不偿失。\n\n同时,另外一种是应用程序主动申请锁定的页,它的实时性要求比较高,频繁的换入换出和缺页异常处理无法满足它对于时间上的要求,所以这部分程序可能使用mlock api将页主动锁定,不允许它进行回收。\n\n那么我们就比较明确了,并非内存中的所有页面都是可以交换出去的。事实上,只有与用户空间建立了映射关系的物理页面才会被换出去,而内核空间中内核所占的页面则常驻内存。我们下面对用户空间中的页面和内核空间中的页面给出进一步的分类讨论。可以把用户空间中的页面按其内容和性质分为以下几种:\n\n除此之外,内核在执行过程中使用的页面要经过动态分配,但永驻内存,此类页面根据其内容和性质可以分为两类:\n\n在内核中还有一种页面,虽然使用完毕,但其内容仍有保存价值,因此,并不立即释放。这类页面“释放”之后进入一个LRU队列,经过一段时间的缓冲让其“老 化”。如果在此期间又要用到其内容了,就又将其投入使用,否则便继续让其老化,直到条件不再允许时才加以回收。这种用途的内核页面大致有以下这些:\n\n按照以上所述,对于内存回收,大致可以分为以下两类:\n\n### 3.2 内存回收机制\n\n内核之所以要进行内存回收,主要原因有两个:\n\n当真的有大于空闲内存的申请到来的时候,会触发强制内存回收。我们只讨论针对zone的内存回收,对于内存回收讨论以下三种方式\n\n### 3.2.1 水位线和min_free_kbytes\n\n
\n\n达到 low 水位的时候,kswapd 开始异步回收内存;达到 min 水位的时候,进程被堵住进行 direct reclamation 同步回收内存\n\n水线关系图如下:\n\n
\n\n\n\n```\n#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)\n#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)\n#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)\n#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)\n```\n\n因此以如下截图的实际水线值为:\n\n
\n\n
\n\n\n\n```\nstatic inline bool boost_watermark(struct zone *zone)\n{\n unsigned long max_boost;\n\n if (!watermark_boost_factor)\n return false;\n \n if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))\n return false;\n\n //默认的 boost_factor=15000 故max_boost 是water[high] 的 1.5倍\n max_boost = mult_frac(zone->_watermark[WMARK_HIGH],\n watermark_boost_factor, 10000);\n\n if (!max_boost)\n return false;\n\n#define MAX_ORDER 11\n#define pageblock_order (MAX_ORDER-1) //10\n#define pageblock_nr_pages (1UL << pageblock_order) //1024(单位page)\n\n max_boost = max(pageblock_nr_pages, max_boost);\n\n //取4M和max_boost的最小值\n zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, max_boost); \n\n return true;\n}\n```\n\n当然触发boost的必要条件是产生过内存碎片化。\n\n其中关于min_free_kbytes有推荐值如下\n\n
\n\n水位线的计算源码如下\n\n\n\n```\nstatic void __setup_per_zone_wmarks(void)\n{\n unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);\n unsigned long lowmem_pages = 0;\n struct zone *zone;\n unsigned long flags;\n\n /* Calculate total number of !ZONE_HIGHMEM pages */\n for_each_zone(zone) {\n if (!is_highmem(zone))\n lowmem_pages += zone_managed_pages(zone);\n }\n\n for_each_zone(zone) {\n u64 tmp;\n\n spin_lock_irqsave(&zone->lock, flags);\n tmp = (u64)pages_min * zone_managed_pages(zone);\n do_div(tmp, lowmem_pages);\n \n /* 删除部分 */\n // 对于非NUMA而言就是min_free_kbytes >> 2\n zone->_watermark[WMARK_MIN] = tmp; \n \n /*\n * Set the kswapd watermarks distance according to the\n * scale factor in proportion to available memory, but\n * ensure a minimum size on small systems.\n */\n tmp = max_t(u64, tmp >> 2,\n mult_frac(zone_managed_pages(zone),\n watermark_scale_factor, 10000));\n\n zone->watermark_boost = 0;\n zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;\n zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;\n\n spin_unlock_irqrestore(&zone->lock, flags);\n }\n\n /* update totalreserve_pages */\n calculate_totalreserve_pages();\n}\n```\n\n### 3.2.2 关键结构体\n\n内存回收过程中有一个扫描控制结构体,用于控制这个回收过程。既然是回收内存,就需要明确要回收多少内存,在哪里回收,以及回收时的操作权限等,我们看下这个控制结构struct scan_control主要的一些变量\n\n\n\n```\nstruct scan_control {\n /* How many pages shrink_list() should reclaim */\n unsigned long nr_to_reclaim; //shrink_list()需要回收的页面数量\n\n /*\n * Nodemask of nodes allowed by the caller. If NULL, all nodes\n * are scanned.\n */\n nodemask_t *nodemask; //指定可以在那个node回收内存\n\n /*\n * The memory cgroup that hit its limit and as a result is the\n * primary target of this reclaim invocation.\n */\n struct mem_cgroup *target_mem_cgroup; //是否针对某个cgroup扫描回收内存\n\n /*\n * Scan pressure balancing between anon and file LRUs\n */\n unsigned long anon_cost;\n unsigned long file_cost;\n\n /* Can active pages be deactivated as part of reclaim? */\n#define DEACTIVATE_ANON 1\n#define DEACTIVATE_FILE 2\n unsigned int may_deactivate:2;\n unsigned int force_deactivate:1;\n unsigned int skipped_deactivate:1;\n\n /* Writepage batching in laptop mode; RECLAIM_WRITE */\n unsigned int may_writepage:1; //是否可以回写\n\n /* Can mapped pages be reclaimed? */\n unsigned int may_unmap:1; //是否可以执行unmap\n\n /* Can pages be swapped as part of reclaim? */\n unsigned int may_swap:1; //是否可以将页面交换\n\n /*\n * Cgroup memory below memory.low is protected as long as we\n * don\'t threaten to OOM. If any cgroup is reclaimed at\n * reduced force or passed over entirely due to its memory.low\n * setting (memcg_low_skipped), and nothing is reclaimed as a\n * result, then go back for one more cycle that reclaims the protected\n * memory (memcg_low_reclaim) to avert OOM.\n */\n unsigned int memcg_low_reclaim:1;\n unsigned int memcg_low_skipped:1;\n\n unsigned int hibernation_mode:1;\n\n /* One of the zones is ready for compaction */\n unsigned int compaction_ready:1; //是否可以进行内存压缩,即碎片整理\n\n /* There is easily reclaimable cold cache in the current node */\n unsigned int cache_trim_mode:1;\n\n /* The file pages on the current node are dangerously low */\n unsigned int file_is_tiny:1;\n\n /* Allocation order */\n s8 order; //进程内存分配页面数量,从分配器传递过来的参数\n\n /* Scan (total_size >> priority) pages at once */\n s8 priority; //控制每次扫描数量,默认是总页数的1/4096\n\n /* The highest zone to isolate pages for reclaim from */\n s8 reclaim_idx; //进行页面回收的最大zone id\n\n /* This context\'s GFP mask */\n gfp_t gfp_mask; //分配掩码\n\n /* Incremented by the number of inactive pages that were scanned */\n unsigned long nr_scanned; //已扫描的非活动页面数量\n\n /* Number of pages freed so far during a call to shrink_zones() */\n unsigned long nr_reclaimed; //shrink_zones()中已回收页面数量\n\n struct {\n unsigned int dirty;\n unsigned int unqueued_dirty;\n unsigned int congested;\n unsigned int writeback;\n unsigned int immediate;\n unsigned int file_taken;\n unsigned int taken;\n } nr;\n\n /* for recording the reclaimed slab by now */\n struct reclaim_state reclaim_state;\n};\n```\n\n### 3.2.3 快速内存回收\n\n快速回收的函数执行主体为:node_reclaim,该函数在 **非CONFIG_NUMA架构下不生效** 。zone_watermark_fast判断如果此次快速内存申请已经触及到水位线时,可以执行node_reclaim-> __node_reclaim\n\n快速内存回收,指定每轮进行回收的页面最大值为取需要回收的页面数和32的最大值,快速回收不能进行unmap,writeback操作,回收priority为4,即最多尝试调用shrink_node进行回收的次数为priority值,直到回收到的页数达到需要分配的内存页数或者完成4次循环为止,也就是最多能够回收128页\n\n\n\n```\n/*\n * Try to free up some pages from this node through reclaim.\n */\nstatic int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)\n{\n /* Minimum pages needed in order to stay on node */\n const unsigned long nr_pages = 1 << order; //需要释放的页数\n struct task_struct *p = current;\n unsigned int noreclaim_flag;\n\n#define node_reclaim_mode 0 \n\n struct scan_control sc = { //内存回收的条件\n .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), //最大回收32页\n .gfp_mask = current_gfp_context(gfp_mask),\n .order = order,\n .priority = NODE_RECLAIM_PRIORITY, //优先级为4\n .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),\n .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),\n .may_swap = 1,\n .reclaim_idx = gfp_zone(gfp_mask),\n };\n\n /* 删除部分代码 */\n\n if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {\n /*\n * Free memory by calling shrink node with increasing\n * priorities until we have enough memory freed.\n */\n do {\n shrink_node(pgdat, &sc); //内存回收的核心函数,后面重点分析\n } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);\n // 最多执行4次或者释放的页数已经满足条件\n }\n\n /* 删除部分代码 */\n\n return sc.nr_reclaimed >= nr_pages;\n}\n```\n\n### 3.2.4 直接内存回收\n\n函数入口为__alloc_pages_direct_reclaim,函数位于mm/page_alloc.c文件中\n\n\n\n```\n/* The really slow allocator path where we enter direct reclaim */\nstatic inline struct page *\n__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,\n unsigned int alloc_flags, const struct alloc_context *ac,\n unsigned long *did_some_progress)\n{\n struct page *page = NULL;\n bool drained = false;\n //直接同步页面回收的执行主体\n // __perform_reclaim -> \n // try_to_free_pages(回收最大32页) -> \n // do_try_to_free_pages\n *did_some_progress = __perform_reclaim(gfp_mask, order, ac);\n if (unlikely(!(*did_some_progress)))\n return NULL;\n\nretry:\n // 内存快速分配\n page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);\n\n /*\n * 如果在直接回收之后分配失败,可能是因为页面固定在每个\n * cpu列表上或处于高分配预留中\n * Shrink them and try again\n */\n if (!page && !drained) {\n unreserve_highatomic_pageblock(ac, false);\n // 释放固定在每个cpu列表上页面,这里最终调用flush_work即进行脏页的回写,\n // 当IO性能不是很好的时候会造成系统严重卡顿\n drain_all_pages(NULL); \n drained = true;\n goto retry;\n }\n\n return page;\n}\n\n/*\n这是一个直接页面回收的主入口点。如果非活动页面链表的完整扫描无法释放足够的内存,\n则表示“内存不足”,则需要杀死某些进程以释放内存。\n\n如果调用者是! __GFP_FS,则失败的概率相当高-\n区域可能充满了脏页面或正在写回的页面,而此调用方无能为力。\n我们唤醒写回线程并明确地休眠,希望一些这些页面可以写入。\n但是,如果分配任务持有防止写出的文件系统锁,则可能无法工作,分配尝试将失败。\n\n返回值:如果没有回收页面,则为0,否则为回收的页面数。\n */\nstatic unsigned long do_try_to_free_pages(struct zonelist *zonelist,\n struct scan_control *sc)\n{\n int initial_priority = sc->priority;\n pg_data_t *last_pgdat;\n struct zoneref *z;\n struct zone *zone;\nretry:\n /* 删除部分 */\n do {\n /* 删除部分 */\n sc->nr_scanned = 0;\n shrink_zones(zonelist, sc); //直接回收的核心,最终调用shrink_node\n\n //回收到了需要的page数\n if (sc->nr_reclaimed >= sc->nr_to_reclaim)\n break;\n //这个标志意味着一个可压缩的内存区域已经被识别出来了,\n //可以先执行内存压缩\n if (sc->compaction_ready)\n break;\n\n /*\n * If we\'re getting trouble reclaiming, start doing\n * writepage even in laptop mode.\n */\n //回收过程中遇到了麻烦,则需要回写。\n if (sc->priority < DEF_PRIORITY - 2)\n sc->may_writepage = 1;\n } while (--sc->priority >= 0);\n\n /* 删除部分 */\n\n if (sc->nr_reclaimed) //回收到了的page数\n return sc->nr_reclaimed;\n\n /* Aborted reclaim to try compaction? don\'t OOM, then */\n if (sc->compaction_ready)\n return 1;\n\n /* 删除部分 */\n return 0;\n}\n```\n\n### 3.2.5 kswapd\n\n为了避免总在CPU忙碌时也就是缺页异常发生时,临时再来搜寻空页面换出的页面进行换出,内核将定期检查并预先将若干页面换出以腾出空间,维持系统空闲内存的的保有量,以减轻系统在缺页异常发生时的负担。为此内核设置了一个专司页面换出的守护神kswapd进程。\n\nkswapd内核线程初始化时会为系统每个NUMA内存节点创建一个名为“kswapd%d”的内核线程,kswapd进程创建的代码如下:mm\\vmscan.c\n\n\n\n```\nstatic int __init kswapd_init(void)\n{\n int nid;\n\n // swap_setup函数根据物理内存大小设定全局变量page_cluster,\n // 当megs小于16时候,page_cluster为2,否则为3\n swap_setup(); \n for_each_node_state(nid, N_MEMORY)\n kswapd_run(nid);\n return 0;\n}\n\nvoid __init swap_setup(void)\n{\n unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT);\n\n /* Use a smaller cluster for small-memory machines */\n // page_cluster为每次swap in或者swap out操作多少内存页 为2的指数,\n // 通过/proc/sys/vm/page-cluster 查看\n if (megs < 16)\n page_cluster = 2;\n else\n page_cluster = 3;\n /*\n * Right now other parts of the system means that we\n * _really_ don\'t want to cluster much more\n */\n}\n```\n\n\n\n\n\n\n```\n/*\n * The background pageout daemon, started as a kernel thread\n * from the init process.\n *\n * This basically trickles out pages so that we have _some_\n * free memory available even if there is no other activity\n * that frees anything up. This is needed for things like routing\n * etc, where we otherwise might have all activity going on in\n * asynchronous contexts that cannot page things out.\n *\n * If there are applications that are active memory-allocators\n * (most normal use), this basically shouldn\'t matter.\n */\nstatic int kswapd(void *p)\n{\n unsigned int alloc_order, reclaim_order;\n unsigned int highest_zoneidx = MAX_NR_ZONES - 1;\n pg_data_t *pgdat = (pg_data_t*)p;\n struct task_struct *tsk = current;\n const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);\n\n if (!cpumask_empty(cpumask))\n set_cpus_allowed_ptr(tsk, cpumask);\n\n /*\n 告诉内存管理我们是一个“内存分配器”,并且如果我们需要更多内存,\n 我们应该无论如何都能够访问它(请参阅“__alloc_pages()”)。\n “kswapd”不应该被正常的页面释放逻辑所捕获。\n (Kswapd通常不需要内存,但有时您需要一小部分内存,以便能够分页出其他内容,\n 并且这个标志本质上可以保护我们免受在尝试释放第一个内存块时递归地尝试释放\n 更多内存的限制的影响)。 */\n //标识自己是kswap进程,并允许回写脏页到swap分区\n tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;\n set_freezable();\n\n WRITE_ONCE(pgdat->kswapd_order, 0);\n WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);\n for ( ; ; ) {\n bool ret;\n\n alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);\n highest_zoneidx = kswapd_highest_zoneidx(pgdat,\n highest_zoneidx);\n\nkswapd_try_sleep: // kswap进程尝试睡眠\n kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,\n highest_zoneidx);\n // 核心处理1,实质就是判断各个zone是否为balanced,\n // 是否balanced即判断zone内可申请的mem数量是否在watermark[high] 之上;\n /* Read the new order and highest_zoneidx */\n alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);\n highest_zoneidx = kswapd_highest_zoneidx(pgdat,\n highest_zoneidx);\n WRITE_ONCE(pgdat->kswapd_order, 0);\n WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);\n\n ret = try_to_freeze();//判断下当前是否是休眠操作\n \n //是否有人调用thread_stop,正常情况下就是在module_exit时候调用;\n if (kthread_should_stop())\n break;\n\n /*\n * We can speed up thawing tasks if we don\'t call balance_pgdat\n * after returning from the refrigerator\n */\n if (ret) //如果是suspend状态的话,就啥也不干,继续循环\n continue;\n\n /* 重新获取从请求的阶数开始,但如果高阶重新获取失败,\n 则kswapd会回退到为阶数0重新获取。如果发生这种情况,\n kswapd将考虑在完成重新获取时(重新获取顺序)睡眠,\n 但会唤醒kcompactd来紧缩原始请求(分配顺序)的空间。 */\n trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,\n alloc_order);\n 核心处理2,进行实质回收操作\n reclaim_order = balance_pgdat(pgdat, alloc_order,\n highest_zoneidx);\n if (reclaim_order < alloc_order) //回收数量不够,则再来一次;\n goto kswapd_try_sleep;\n }\n\n tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);\n\n return 0;\n}\n```\n\n这个主循环实际上干了两件事:\n\n
\n\n### 3.2.5.1 kswapd_try_to_sleep\n\n\n\n```\nstatic void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, \n int reclaim_order, unsigned int highest_zoneidx)\n{\n long remaining = 0;\n DEFINE_WAIT(wait);\n\n //如果需要退出,则直接返回\n if (freezing(current) || kthread_should_stop()) \n return;\n\n // wait加入kswap_wait queue中,即等待被唤醒,注意此时没有让出CPU\n prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);\n\n // 核心处理1:执行 pgdat_balanced(实际就是判断水位是否达到high) \n // 判断是否各个zone都是balanced\n if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {\n // 能平衡了\n /* 删除部分 */\n\n //唤醒compact线程处理,这个是压缩内存\n wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);\n\n remaining = schedule_timeout(HZ/10); //sleep 100ms\n\n /* 如果被提前唤醒,则重置kswapd_highest_zoneidx和order。\n 这些值将来自唤醒请求或先前因为提前唤醒而休眠的请求 */\n if (remaining) {//remaining > 0说明被唤醒而非100ms结束\n WRITE_ONCE(pgdat->kswapd_highest_zoneidx,\n kswapd_highest_zoneidx(pgdat,\n highest_zoneidx));\n\n if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)\n WRITE_ONCE(pgdat->kswapd_order, reclaim_order);\n }\n //将wait从kswapd_wait queue中移除,并将当前状态配置为running\n finish_wait(&pgdat->kswapd_wait, &wait);\n //将wait在加入kswapd wait queue,确保queue中只有一个等待事件;\n prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);\n }\n\n //到这里确实没有被唤醒的话\n if (!remaining &&\n prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {\n\n /* 删除部分 */\n //没有需要退出thread,则真正的进入睡眠,主动调用schedule调度\n if (!kthread_should_stop())\n schedule();\n\n /* 删除部分 */\n} \n/* 删除部分 */\n\n finish_wait(&pgdat->kswapd_wait, &wait); //唤醒\n}\n```\n\n其主要的流程为:\n\n
\n\n### 3.2.5.2 balance_pgdat\n\n从该函数的注释可以看出kswapd按高端内存->标准内存->DMA方向进行扫描,其次平衡的标准为free_pages > high_wmark_pages。\n\n\n\n```\n/*\n * For kswapd, balance_pgdat() will reclaim pages across a node from zones\n * that are eligible for use by the caller until at least one zone is\n * balanced.\n *\n * Returns the order kswapd finished reclaiming at.\n *\n * kswapd scans the zones in the highmem->normal->dma direction. It skips\n * zones which have free_pages > high_wmark_pages(zone), but once a zone is\n * found to have free_pages <= high_wmark_pages(zone), any page in that zone\n * or lower is eligible for reclaim until at least one usable zone is\n * balanced.\n */\nstatic int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)\n{\n int i;\n unsigned long nr_soft_reclaimed;\n unsigned long nr_soft_scanned;\n unsigned long pflags;\n unsigned long nr_boost_reclaim;\n unsigned long zone_boosts[MAX_NR_ZONES] = { 0, };\n bool boosted;\n struct zone *zone;\n struct scan_control sc = {\n .gfp_mask = GFP_KERNEL,\n .order = order,\n .may_unmap = 1,\n };\n\n/* 删除部分 */\n\n nr_boost_reclaim = 0;\n for (i = 0; i <= highest_zoneidx; i++) {\n zone = pgdat->node_zones + i;\n if (!managed_zone(zone))\n continue;\n\n nr_boost_reclaim += zone->watermark_boost;\n zone_boosts[i] = zone->watermark_boost; //记录该zone被抬升的水位\n }\n boosted = nr_boost_reclaim; //水位是否有被抬升\n\n#define DEF_PRIORITY 12\n\nrestart:\n sc.priority = DEF_PRIORITY;\n do {\n unsigned long nr_reclaimed = sc.nr_reclaimed;\n bool raise_priority = true;\n bool balanced;\n bool ret;\n\n sc.reclaim_idx = highest_zoneidx;\n\n if (buffer_heads_over_limit) {\n //如果 buffer_heads 超过限制,尝试释放buffer_heads 的 page\n for (i = MAX_NR_ZONES - 1; i >= 0; i--) {\n zone = pgdat->node_zones + i;\n if (!managed_zone(zone))\n continue;\n\n sc.reclaim_idx = i;\n break;\n }\n }\n\n // 如果当前order处于不平衡的状态,就忽略水线抬高,并重新开始\n balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);\n if (!balanced && nr_boost_reclaim) {\n nr_boost_reclaim = 0;\n goto restart;\n }\n\n // 如果是平衡状态并且非水线太高,直接out\n if (!nr_boost_reclaim && balanced)\n goto out;\n\n /* Limit the priority of boosting to avoid reclaim writeback */\n if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2)\n raise_priority = false;\n\n // 对于水线抬升的回收,不写回,也不swap\n sc.may_writepage = !laptop_mode && !nr_boost_reclaim;\n sc.may_swap = !nr_boost_reclaim;\n\n \n //走到这里说明将各个zone都判断过之后,回收内存仍不够用,所以对anon 进行老化处理\n age_active_anon(pgdat, &sc);\n\n /*\n * If we\'re getting trouble reclaiming, start doing writepage\n * even in laptop mode.\n */\n //优先级小于10还没有搞到足够内存的时候,需要打开writepage\n if (sc.priority < DEF_PRIORITY - 2)\n sc.may_writepage = 1;\n\n /* Call soft limit reclaim before calling shrink_node. */\n sc.nr_scanned = 0;\n nr_soft_scanned = 0;\n nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order, sc.gfp_mask, &nr_soft_scanned);\n sc.nr_reclaimed += nr_soft_reclaimed;\n\n /*\n * There should be no need to raise the scanning priority if\n * enough pages are already being scanned that that high\n * watermark would be met at 100% efficiency.\n */\n // 进行shrink_node回收\n if (kswapd_shrink_node(pgdat, &sc))\n raise_priority = false;\n\n /*\n * If the low watermark is met there is no need for processes\n * to be throttled on pfmemalloc_wait as they should not be\n * able to safely make forward progress. Wake them\n */\n if (waitqueue_active(&pgdat->pfmemalloc_wait) &&\n allow_direct_reclaim(pgdat))\n wake_up_all(&pgdat->pfmemalloc_wait);\n\n /* Check if kswapd should be suspending */\n __fs_reclaim_release();\n ret = try_to_freeze();\n __fs_reclaim_acquire();\n //suspend或者退出的话,这里直接跳出去;\n if (ret || kthread_should_stop()) \n break;\n\n /*\n * Raise priority if scanning rate is too low or there was no\n * progress in reclaiming pages\n */\n nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;\n nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed);\n\n /*\n * If reclaim made no progress for a boost, stop reclaim as\n * IO cannot be queued and it could be an infinite loop in\n * extreme circumstances.\n */\n if (nr_boost_reclaim && !nr_reclaimed)\n break;\n\n if (raise_priority || !nr_reclaimed)\n sc.priority--; //没回收够,则priority--\n } while (sc.priority >= 1);\n\n if (!sc.nr_reclaimed) //没回收完,失败次数++\n pgdat->kswapd_failures++;\n\nout:\n /* If reclaim was boosted, account for the reclaim done in this pass */\n if (boosted) {\n unsigned long flags;\n\n for (i = 0; i <= highest_zoneidx; i++) {\n if (!zone_boosts[i])\n continue;\n\n /* Increments are under the zone lock */\n zone = pgdat->node_zones + i;\n spin_lock_irqsave(&zone->lock, flags);\n //如果是被抬升过的,需要计算重新计算抬升水线,减回去\n zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);\n spin_unlock_irqrestore(&zone->lock, flags);\n }\n\n /*\n * As there is now likely space, wakeup kcompact to defragment\n * pageblocks.\n */\n wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);\n }\n\n snapshot_refaults(NULL, pgdat);\n __fs_reclaim_release();\n psi_memstall_leave(&pflags);\n set_task_reclaim_state(current, NULL);\n\n /*\n * Return the order kswapd stopped reclaiming at as\n * prepare_kswapd_sleep() takes it into account. If another caller\n * entered the allocator slow path while kswapd was awake, order will\n * remain at the higher level.\n */\n return sc.order;\n}\n```\n\n
\n\n### 3.2.6 shrink_node\n\nshrink_node是内存回收的核心函数,用于扫参数pgdat内存节点中所有的可回收页面,并进行回收处理。上述的三种回收方式,其核心实现都是shrink_node函数,不同的是准备动作和扫描控制器。下面将着重分析一下shrink_node的实现\n\n\n\n```\nstatic void shrink_node(pg_data_t *pgdat, struct scan_control *sc)\n{\n struct reclaim_state *reclaim_state = current->reclaim_state;\n unsigned long nr_reclaimed, nr_scanned;\n struct lruvec *target_lruvec;\n bool reclaimable = false;\n unsigned long file;\n\n // 获得目标lruvec,lruvec包含5个lru链表,分别是活跃/非活跃匿名页,\n // 活跃/非活跃文件页,不可回收链表\n target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);\n\nagain:\n // nr是记录扫描过程中,各类页框的数量\n memset(&sc->nr, 0, sizeof(sc->nr));\n\n // 获取已经扫描的可回收和可扫描页数\n nr_reclaimed = sc->nr_reclaimed;\n nr_scanned = sc->nr_scanned;\n\n /*\n * Determine the scan balance between anon and file LRUs.\n */\n spin_lock_irq(&pgdat->lru_lock);\n // 用于匿名页和文件页lru链表平衡\n sc->anon_cost = target_lruvec->anon_cost;\n sc->file_cost = target_lruvec->file_cost;\n spin_unlock_irq(&pgdat->lru_lock);\n\n /*\n * Target desirable inactive:active list ratios for the anon\n * and file LRU lists.\n */\n // 调整扫描匿名页和文件页的比率\n if (!sc->force_deactivate) {\n unsigned long refaults;\n\n refaults = lruvec_page_state(target_lruvec,\n WORKINGSET_ACTIVATE_ANON);\n // 如果工作匿名页相比上一次循环已经有了变化,\n // 或者当前非活跃匿名页链表数量过少,则需要扫描匿名页\n if (refaults != target_lruvec->refaults[0] ||\n inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))\n // 扫描匿名页\n sc->may_deactivate |= DEACTIVATE_ANON;\n else\n // 不扫描匿名页\n sc->may_deactivate &= ~DEACTIVATE_ANON;\n\n /*\n * When refaults are being observed, it means a new\n * workingset is being established. Deactivate to get\n * rid of any stale active pages quickly.\n */\n // 如果工作文件页相比上一次循环已经有了变化,\n // 或者当前非活跃文件页链表数量过少,则需要扫描文件页\n refaults = lruvec_page_state(target_lruvec,\n WORKINGSET_ACTIVATE_FILE);\n if (refaults != target_lruvec->refaults[1] ||\n inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))\n // 扫描文件页\n sc->may_deactivate |= DEACTIVATE_FILE;\n else\n // 不扫描文件页\n sc->may_deactivate &= ~DEACTIVATE_FILE;\n } else\n // 匿名和文件页都扫描\n sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;\n\n /*\n * If we have plenty of inactive file pages that aren\'t\n * thrashing, try to reclaim those first before touching\n * anonymous pages.\n */\n // 如果不活跃的文件页框数量很多并且本次不扫描文件页,则做扫描平衡时,优先扫描文件页\n file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);\n if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))\n sc->cache_trim_mode = 1;\n else\n sc->cache_trim_mode = 0;\n\n /*\n * Prevent the reclaimer from falling into the cache trap: as\n * cache pages start out inactive, every cache fault will tip\n * the scan balance towards the file LRU. And as the file LRU\n * shrinks, so does the window for rotation from references.\n * This means we have a runaway feedback loop where a tiny\n * thrashing file LRU becomes infinitely more attractive than\n * anon pages. Try to detect this based on file LRU size.\n */\n if (!cgroup_reclaim(sc)) {\n // 不支持mem_cgroup配置场景\n unsigned long total_high_wmark = 0;\n unsigned long free, anon;\n int z;\n // 计算node中所有zone的空闲页面数\n free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);\n // 计算node中所有文件页内存页数\n file = node_page_state(pgdat, NR_ACTIVE_FILE) +\n node_page_state(pgdat, NR_INACTIVE_FILE);\n\n for (z = 0; z < MAX_NR_ZONES; z++) {\n struct zone *zone = &pgdat->node_zones[z];\n if (!managed_zone(zone))\n continue;\n // 统计node中所有zone的高水位保留值\n total_high_wmark += high_wmark_pages(zone);\n }\n\n /*\n * Consider anon: if that\'s low too, this isn\'t a\n * runaway file reclaim problem, but rather just\n * extreme pressure. Reclaim as per usual then.\n */\n // node的匿名页数\n anon = node_page_state(pgdat, NR_INACTIVE_ANON);\n // 如果该node文件页框数量很少,则做扫描平衡时,选择匿名页\n sc->file_is_tiny =\n file + free <= total_high_wmark &&\n !(sc->may_deactivate & DEACTIVATE_ANON) &&\n anon >> sc->priority;\n }\n\n // 根据memcg配置进行页面回收,执行的主体为:\n // 对lru链表进行回收\n shrink_lruvec(lruvec, sc);\n // 对slab进行回收\n shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,\n sc->priority);\n shrink_node_memcgs(pgdat, sc);\n\n // slab的扫描到的页框数也计算上\n if (reclaim_state) {\n sc->nr_reclaimed += reclaim_state->reclaimed_slab;\n reclaim_state->reclaimed_slab = 0;\n }\n\n /* Record the subtree\'s reclaim efficiency */\n vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,\n sc->nr_scanned - nr_scanned,\n sc->nr_reclaimed - nr_reclaimed);\n\n // 如果本次内存回收扫描到页框,则重设kswapd失败计数器,避免kswapd任务运行过于频繁\n if (sc->nr_reclaimed - nr_reclaimed)\n reclaimable = true;\n\n if (current_is_kswapd()) {\n \n // 如果node中有很多页面正在被回写,则设置PGDAT_WRITEBACK标志\n if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken)\n set_bit(PGDAT_WRITEBACK, &pgdat->flags);\n\n /* Allow kswapd to start writing pages during reclaim.*/\n // 在lru的尾部发现很多脏页,则设置PGDAT_DIRTY标志\n if (sc->nr.unqueued_dirty == sc->nr.file_taken)\n set_bit(PGDAT_DIRTY, &pgdat->flags);\n\n /*\n * If kswapd scans pages marked for immediate\n * reclaim and under writeback (nr_immediate), it\n * implies that pages are cycling through the LRU\n * faster than they are written so also forcibly stall.\n */\n // 等待页面回写完成\n if (sc->nr.immediate)\n congestion_wait(BLK_RW_ASYNC, HZ/10);\n }\n\n // 如果lru中有很多脏页,需要置上LRUVEC_CONGESTED标记\n if ((current_is_kswapd() ||\n (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) &&\n sc->nr.dirty && sc->nr.dirty == sc->nr.congested)\n set_bit(LRUVEC_CONGESTED, &target_lruvec->flags);\n\n // 如果是直接回收,当node变得很拥挤(脏页过多),\n // 则阻塞等待一段时间,等一些页面回写完成后,才继续进行回收操作。但kswapd中此处不会阻塞\n if (!current_is_kswapd() && current_may_throttle() &&\n !sc->hibernation_mode &&\n test_bit(LRUVEC_CONGESTED, &target_lruvec->flags))\n wait_iff_congested(BLK_RW_ASYNC, HZ/10);\n\n // 判断是否需继续回收,继续回收的条件是该node中所有zone都不满足压缩条件\n if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,\n sc))\n goto again;\n\n // 如果回收通过,则重设kswapd失败计数器,该计数器可以延缓直接内存回收的启动时机\n if (reclaimable)\n pgdat->kswapd_failures = 0;\n}\n```\n\n下面简单总结下核心流程:\n\n### 3.2.6.1 shrink_lruvec\n\n\n\n```\nstatic void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)\n{\n unsigned long nr[NR_LRU_LISTS];\n unsigned long targets[NR_LRU_LISTS];\n unsigned long nr_to_scan;\n enum lru_list lru;\n unsigned long nr_reclaimed = 0;\n unsigned long nr_to_reclaim = sc->nr_to_reclaim;\n struct blk_plug plug;\n bool scan_adjusted;\n\n // 计算本次内存回收每个lru链表扫描的页面数,存放到nr数组中\n // 两个因素会影响扫描页面数:\n // 1、优先级,优先级越高扫描页面越少,优先级是0是,扫描全部链表\n // 2、swappiness,可以配置匿名页和文件页的扫描比率\n get_scan_count(lruvec, sc, nr);\n\n /* Record the original scan target for proportional adjustments later */\n // 将nr数组临时保存到targets数组中\n memcpy(targets, nr, sizeof(nr));\n\n scan_adjusted = (!cgroup_reclaim(sc) && !current_is_kswapd() &&\n sc->priority == DEF_PRIORITY);\n\n blk_start_plug(&plug);\n // 如果LRU_INACTIVE_ANON、LRU_ACTIVE_FILE、LRU_INACTIVE_FILE\n // 中任意一个没有回收完,都会继续回收\n while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||\n nr[LRU_INACTIVE_FILE]) {\n unsigned long nr_anon, nr_file, percentage;\n unsigned long nr_scanned;\n\n for_each_evictable_lru(lru) {\n if (nr[lru]) {\n // 一次最多扫描32个页面\n nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);\n nr[lru] -= nr_to_scan;\n // 从lru中回收nr_to_scan个页面,执行主体\n nr_reclaimed += shrink_list(lru, nr_to_scan,\n lruvec, sc);\n }\n }\n\n cond_resched();\n\n // 如果已经扫描到足够的空闲页,并且无需全部扫描nr中的页面,则停止扫描\n if (nr_reclaimed < nr_to_reclaim || scan_adjusted)\n continue;\n\n // 计算剩余的页面数\n nr_file = nr[LRU_INACTIVE_FILE] + nr[LRU_ACTIVE_FILE];\n nr_anon = nr[LRU_INACTIVE_ANON] + nr[LRU_ACTIVE_ANON];\n \n // 如果文件页或者匿名页已经回收完,则直接停止\n if (!nr_file || !nr_anon)\n break;\n\n // 计算哪种类型的页面还剩比较少,停止对剩余较少的lru进行扫描\n if (nr_file > nr_anon) {\n unsigned long scan_target = targets[LRU_INACTIVE_ANON] +\n targets[LRU_ACTIVE_ANON] + 1;\n lru = LRU_BASE;\n // 剩余需要扫描页面占比\n percentage = nr_anon * 100 / scan_target;\n } else {\n unsigned long scan_target = targets[LRU_INACTIVE_FILE] +\n targets[LRU_ACTIVE_FILE] + 1;\n lru = LRU_FILE;\n percentage = nr_file * 100 / scan_target;\n }\n\n /* Stop scanning the smaller of the LRU */\n nr[lru] = 0;\n nr[lru + LRU_ACTIVE] = 0;\n\n /*\n * Recalculate the other LRU scan count based on its original\n * scan target and the percentage scanning already complete\n */\n lru = (lru == LRU_FILE) ? LRU_BASE : LRU_FILE;\n nr_scanned = targets[lru] - nr[lru];\n nr[lru] = targets[lru] * (100 - percentage) / 100;\n nr[lru] -= min(nr[lru], nr_scanned);\n\n lru += LRU_ACTIVE;\n nr_scanned = targets[lru] - nr[lru];\n nr[lru] = targets[lru] * (100 - percentage) / 100;\n nr[lru] -= min(nr[lru], nr_scanned);\n\n scan_adjusted = true;\n }\n blk_finish_plug(&plug);\n \n // 累加总回收页面数\n sc->nr_reclaimed += nr_reclaimed;\n\n /*\n * Even if we did not try to evict anon pages at all, we want to\n * rebalance the anon lru active/inactive ratio.\n */\n // 非活动匿名页太少,从活动匿名页移动一部分到非活动匿名页中\n if (total_swap_pages && inactive_is_low(lruvec, LRU_INACTIVE_ANON))\n shrink_active_list(SWAP_CLUSTER_MAX, lruvec,\n sc, LRU_ACTIVE_ANON);\n}\n```\n\n在开启扫描之前,需要根据当前node的情况计算出匿名页和文件页的扫描比例。计算方式如下:\n\n### 3.2.6.2 shrink_list\n\n计算出各种类型的页框扫描的数量,每次去对应lru链表尾部取出一定数量的页框(一次最多32[nr_to_scan]个),但需要注意两点:1. 只有非活跃链表中的页框才能被回收,活跃链表可能会被放入到非活跃链表中。2.就算是非活跃链表中的页框,也不是全部都能回收,还需要根据该页最近是否被访问过来确定。\n\n\n\n```\nstatic unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc)\n{\n // 如果活动页不需要进行回收,但是需要考虑当非活动页数量不足时,从活动页移动到非活动页中\nif (is_active_lru(lru)) {\n // 如果是本次允许处理类型,才考虑放入到非活跃链表中,否则标记跳过\n if (sc->may_deactivate & (1 << is_file_lru(lru)))\n // 将页框从活动lru中隔离出来,然后加入到非活动lru中\n shrink_active_list(nr_to_scan, lruvec, sc, lru);\n else\n sc->skipped_deactivate = 1;\n return 0;\n }\n\n // 对非活动页进行回收\n return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);\n}\n```\n\n### 3.2.6.3 shrink_inactive_list\n\nshrink_list已经确定要进行内存回收的lru链表(只能是非活跃链表)以及需处理的page个数(nr_to_scan),接下来需要做以下几点事情:\n\n\n\n```\nstatic noinline_for_stack unsigned long\nshrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,\n struct scan_control *sc, enum lru_list lru)\n{\n LIST_HEAD(page_list);\n unsigned long nr_scanned;\n unsigned int nr_reclaimed = 0;\n unsigned long nr_taken;\n struct reclaim_stat stat;\n bool file = is_file_lru(lru);\n enum vm_event_item item;\n struct pglist_data *pgdat = lruvec_pgdat(lruvec);\n bool stalled = false;\n\n /* 删除部分 */\n\n // 将lru缓存刷入到lru链表中\n lru_add_drain();\n\n spin_lock_irq(&pgdat->lru_lock);\n\n // 从lru(非活动链表)尾部开始隔离一些page到page_list链表中,\n // 最终成功隔离的真实页框数量是nr_taken\n nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,\n &nr_scanned, sc, lru);\n\n __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);\n /* 删除部分 */\n\n spin_unlock_irq(&pgdat->lru_lock);\n\n // 没有隔离到页面,则返回0\n if (nr_taken == 0)\n return 0;\n\n // page_list中已经隔离出可回收的页,在这里进行回收,但不是所有都能回收掉\n nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);\n\n spin_lock_irq(&pgdat->lru_lock);\n\n // page_list可能还有本次无法最终完成回收的page,需要重新放回到lru链表中\n move_pages_to_lru(lruvec, &page_list);\n\n __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);\n lru_note_cost(lruvec, file, stat.nr_pageout);\n /* 删除部分 */\n\n spin_unlock_irq(&pgdat->lru_lock);\n\n // 最终将剩下的隔离出来页面都释放掉\n mem_cgroup_uncharge_list(&page_list);\n free_unref_page_list(&page_list);\n\n // 如果扫描到的脏页没有在IO队列中等待,则唤醒flusher任务进行回写\n if (stat.nr_unqueued_dirty == nr_taken)\n wakeup_flusher_threads(WB_REASON_VMSCAN);\n\n // 记录页面回收的情况\n sc->nr.dirty += stat.nr_dirty;\n sc->nr.congested += stat.nr_congested;\n sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;\n sc->nr.writeback += stat.nr_writeback;\n sc->nr.immediate += stat.nr_immediate;\n sc->nr.taken += nr_taken;\n if (file)\n sc->nr.file_taken += nr_taken;\n\n trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,\n nr_scanned, nr_reclaimed, &stat, sc->priority, file);\n return nr_reclaimed;\n}\n```\n\n### 3.2.6.4 shrink_page_list\n\n\n\n```\n/*\n * shrink_page_list() returns the number of reclaimed pages\n */\nstatic unsigned int shrink_page_list(struct list_head *page_list,\n struct pglist_data *pgdat,\n struct scan_control *sc,\n struct reclaim_stat *stat,\n bool ignore_references)\n{\n LIST_HEAD(ret_pages);\n LIST_HEAD(free_pages);\n unsigned int nr_reclaimed = 0;\n unsigned int pgactivate = 0;\n\n memset(stat, 0, sizeof(*stat));\n cond_resched();\n\n // 遍历所有page_list中的page,但并非所有都可以顺利释放掉\n while (!list_empty(page_list)) {\n struct address_space *mapping;\n struct page *page;\n enum page_references references = PAGEREF_RECLAIM;\n bool dirty, writeback, may_enter_fs;\n unsigned int nr_pages;\n\n cond_resched();\n\n // 从lru链表(非活跃链表)中获取一个page\n page = lru_to_page(page_list);\n // 从lru链表中删除该page节点\n list_del(&page->lru);\n\n // 尝试对page进行上锁操作,如果上锁失败,表示页面还在被占用,\n // 则将page添加到ret_pages中,后续统一处理\n if (!trylock_page(page))\n goto keep;\n\n VM_BUG_ON_PAGE(PageActive(page), page);\n\n // 获取page真实对应的页框数\n nr_pages = compound_nr(page);\n\n /* Account the number of base pages even though THP */\n sc->nr_scanned += nr_pages;\n\n // 如果page是不可回收页,则放置到ret_pages中统一处理\n if (unlikely(!page_evictable(page)))\n goto activate_locked;\n\n // 若当前回收不允许回收被映射了的页,剔除被映射了的页面(跳转到keep_locked)\n // 1.sc->may_unmap==1 表示允许回收映射的页面\n \n // 2.page_mapped(page)使用于判断page->_mapcount是否大于0.\n // 大于0表示有一个或多个用户PTE映射到该页\n \n // 3.此处if表明当不允许回收映射的页面,且此时有用户PTE映射到该页面,\n // 则直接跳转到keep_locked.\n if (!sc->may_unmap && page_mapped(page))\n goto keep_locked;\n\n may_enter_fs = (sc->gfp_mask & __GFP_FS) ||\n (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));\n\n // 检查page是否是脏页或者正在回写的页\n page_check_dirty_writeback(page, &dirty, &writeback);\n if (dirty || writeback)\n stat->nr_dirty++;\n\n // 如果是脏页但是不是正在回写的页,相应的没有回写的标记加1\n if (dirty && !writeback)\n stat->nr_unqueued_dirty++;\n\n /* 删除部分 */\n\n //若该页正处于回写状态,需要考虑下面3中场景:\n \n /*(1) 如果当前页面正处于回写状态且属于可回收页类型,\n 那么当当前页面回收者是kswapd线程且此时当前页对应的内存节点中有大量正在回写的页面时,\n linux os此时会增加nr_immediate计数,然后跳转到keep_locked标签处执行完当前页的\n 后续安置操作,接着继续扫描page_list链表,而不是睡眠等待当前页的回写完成\n (若等待回写完成,可能kswapd线程会导致无限期等待,\n 因为在页面回写时可能会出现磁盘I/O错误或者磁盘连接错误)*/\n \n /*(2) 若当前页面不是可回收页(无回收标记,通过PageReclaim判断)\n 或者是当前页面分配器的调用者并未使用__GFP_FS或__GFP_IO分配标志。\n 那么给当前页设置PG_PageReclaim标志位,并增加nr_writeback计数,\n 然后跳转到keep_locked标签处,继续对page_list链表进行扫描*/\n \n //(3) 除了上面两种情况外,若当前页正在回写,那么当前进程会睡眠等待当前页的回写完成 \n if (PageWriteback(page)) {\n /* Case 1 above */\n if (current_is_kswapd() &&\n PageReclaim(page) &&\n test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {\n stat->nr_immediate++;\n goto activate_locked;\n\n /* Case 2 above */\n } else if (writeback_throttling_sane(sc) ||\n !PageReclaim(page) || !may_enter_fs) {\n // 将page标记为需要回收,并且放置到lru的尾部,尽快回收\n SetPageReclaim(page);\n stat->nr_writeback++;\n goto activate_locked;\n\n /* Case 3 above */\n } else {\n unlock_page(page);\n //等待当前页回写完成\n wait_on_page_writeback(page);\n /* then go back and try same page again */\n // 当前页回写完成后,将当前页加入page_list链表尾,\n // continue后while循环又会扫到刚回写完成的当前页进行回收处理\n list_add_tail(&page->lru, page_list);\n continue;\n }\n }\n\n /* 删除部分 */\n\n /*\n * Anonymous process memory has backing store?\n * Try to allocate it some swap space here.\n * Lazyfree page could be freed directly\n */\n // 如果page是匿名页\n if (PageAnon(page) && PageSwapBacked(page)) {\n \n // 没有加入到swapcache中,则加入到swapcache。\n // 匿名页在没有最终写入到swap区时,页面都会\n // 一直停留在swapcache中,只有最终写入到swap区\n // 后,才最终从swapcache中删除。\n \n if (!PageSwapCache(page)) {\n // 如果本次内存申请不支持IO操作,\n // 则继续将page放置在原lru链表中\n if (!(sc->gfp_mask & __GFP_IO))\n goto keep_locked;\n if (page_maybe_dma_pinned(page))\n goto keep_locked;\n if (PageTransHuge(page)) {\n /* cannot split THP, skip it */\n if (!can_split_huge_page(page, NULL))\n goto activate_locked;\n /*\n * Split pages without a PMD map right\n * away. Chances are some or all of the\n * tail pages can be freed without IO.\n */\n if (!compound_mapcount(page) &&\n split_huge_page_to_list(page,\n page_list))\n goto activate_locked;\n }\n \n // 将page加入到swap分区,此时可以理解为在swap分区(实质是磁盘)中\n // 创建一个“文件”;并且将page标记为dirty,让page被回写到“文件”中。\n // 但由于回写是一个IO过程,此时对该page的访问其实是访问到page cache。\n if (!add_to_swap(page)) {\n if (!PageTransHuge(page))\n goto activate_locked_split;\n /* Fallback to swap normal pages */\n if (split_huge_page_to_list(page,\n page_list))\n goto activate_locked;\n#ifdef CONFIG_TRANSPARENT_HUGEPAGE\n count_vm_event(THP_SWPOUT_FALLBACK);\n#endif\n if (!add_to_swap(page))\n goto activate_locked_split;\n }\n // 后续可能会发生文件系统的相关操作\n may_enter_fs = true;\n\n /* Adding to swap updated mapping */\n // 获取此匿名页在swap cache中的address space\n mapping = page_mapping(page);\n }\n } else if (unlikely(PageTransHuge(page))) {\n /* Split file THP */\n if (split_huge_page_to_list(page, page_list))\n goto keep_locked;\n }\n\n /*\n * THP may get split above, need minus tail pages and update\n * nr_pages to avoid accounting tail pages twice.\n *\n * The tail pages that are added into swap cache successfully\n * reach here.\n */\n if ((nr_pages > 1) && !PageTransHuge(page)) {\n sc->nr_scanned -= (nr_pages - 1);\n nr_pages = 1;\n }\n\n /*\n * The page is mapped into the page tables of one or more\n * processes. Try to unmap it here.\n */\n // 由于该page要被回收,所以所有映射了该页的进程都需要做unmap操作\n if (page_mapped(page)) {\n enum ttu_flags flags = TTU_BATCH_FLUSH;\n bool was_swapbacked = PageSwapBacked(page);\n\n if (unlikely(PageTransHuge(page)))\n flags |= TTU_SPLIT_HUGE_PMD;\n // 对所有映射了该page的进程做unmap操作\n \n /*1、如果page是匿名页,则将进程页表项改成swap cache中的偏移量swp_entry_t\n ,这时候如果有进程继续访问页表,则访问的是swap cache,\n 并且停止将该page继续写入swap中,因为该页还在被使用。*/\n \n // 2、如果是文件页,清空对应的页表项即可。\n if (!try_to_unmap(page, flags)) {\n stat->nr_unmap_fail += nr_pages;\n if (!was_swapbacked && PageSwapBacked(page))\n stat->nr_lazyfree_fail += nr_pages;\n goto activate_locked;\n }\n }\n\n // 如果当前页是脏页(脏文件页或者上面添加到swap的匿名页)\n if (PageDirty(page)) {\n // 如果是脏文件页,并且不是在kswap(非kswap进程不能对文件页回写)\n // 中和不在内存回收流程中,则将page设置PG_reclaim标记,\n // 并放置到活动链表中,待回写到磁盘后,将其回收。\n if (page_is_file_lru(page) &&\n (!current_is_kswapd() || !PageReclaim(page) ||\n !test_bit(PGDAT_DIRTY, &pgdat->flags))) {\n /*\n * Immediately reclaim when written back.\n * Similar in principal to deactivate_page()\n * except we already have the page isolated\n * and know it\'s dirty\n */\n inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);\n SetPageReclaim(page);\n\n goto activate_locked;\n }\n\n if (references == PAGEREF_RECLAIM_CLEAN)\n goto keep_locked;\n if (!may_enter_fs)\n goto keep_locked;\n if (!sc->may_writepage)\n goto keep_locked;\n\n /*\n * Page is dirty. Flush the TLB if a writable entry\n * potentially exists to avoid CPU writes after IO\n * starts and then write it out here.\n */\n try_to_unmap_flush_dirty();\n // 运行到这里,说明的确是在回收流程中出现的脏页,尝试将脏页换出\n // 这里也只是将页写入到块中,并不是真正的写入到文件系统\n // pageout执行成功会标记PG_reclaim和PG_wirteable,并且清空PG_dirty标志\n // 当页回写完成,PG_reclaim和PG_wirteable标记都会被清除\n switch (pageout(page, mapping)) {\n case PAGE_KEEP:\n goto keep_locked;\n case PAGE_ACTIVATE:\n goto activate_locked;\n case PAGE_SUCCESS:\n stat->nr_pageout += thp_nr_pages(page);\n // 如果页还没有回写完成,则将该page继续放在该lru中(异步场景)\n if (PageWriteback(page))\n goto keep;\n // 正常来说pageout成功后,应该清除PG_dirty属性,\n // 如果此处还是有PG_dirty属性即保留该page\n if (PageDirty(page))\n goto keep;\n\n /*\n * A synchronous write - probably a ramdisk. Go\n * ahead and try to reclaim the page.\n */\n if (!trylock_page(page))\n goto keep;\n if (PageDirty(page) || PageWriteback(page))\n goto keep_locked;\n mapping = page_mapping(page);\n case PAGE_CLEAN:\n ; /* try to free the page below */\n }\n }\n\n // 运行到这里说明页已经回写完成(异步场景,如果没有回写完成,则上面已经调到keep流程了)\n // 如果是文件页(如果private域设置了PAGE_FLAGS_PRIVATE),还需要考虑释放buffer_head\n if (page_has_private(page)) {\n // 释放该page\n if (!try_to_release_page(page, sc->gfp_mask))\n goto activate_locked;\n if (!mapping && page_count(page) == 1) {\n unlock_page(page);\n if (put_page_testzero(page))\n goto free_it;\n else {\n /*\n * rare race with speculative reference.\n * the speculative reference will free\n * this page shortly, so we may\n * increment nr_reclaimed here (and\n * leave it off the LRU).\n */\n nr_reclaimed++;\n continue;\n }\n }\n }\n\n // 如果是匿名页并且没有被交换到swap区中,page的refcount应该是1\n if (PageAnon(page) && !PageSwapBacked(page)) {\n /* follow __remove_mapping for reference */\n if (!page_ref_freeze(page, 1))\n goto keep_locked;\n if (PageDirty(page)) {\n page_ref_unfreeze(page, 1);\n goto keep_locked;\n }\n\n count_vm_event(PGLAZYFREED);\n count_memcg_page_event(page, PGLAZYFREED);\n } else if (!mapping || !__remove_mapping(mapping, page, true,\n sc->target_mem_cgroup))\n \n // __remove_mapping负责将page从基树中移除,并且将page的refcount减2,如果\n // refcount最终是0,则表示该page可以回收了;但如果不是0,表示还有其他地方\n // 引用,则继续保留在lru中,不能回收。还有一种情况也不能释放,就是虽然refcount\n // 是0了,但是页面还是脏页,表示又有进程对页面进行了访问并且unmap操作,如果\n // 这种场景页释放了,那么数据就会丢失\n goto keep_locked;\n\n unlock_page(page);\nfree_it:\n /*\n * THP may get swapped out in a whole, need account\n * all base pages.\n */\n nr_reclaimed += nr_pages;\n\n /*\n * Is there need to periodically free_page_list? It would\n * appear not as the counts should be low\n */\n // 到这里的page都是可以被回收的,有三种情况:\n // 1、无需回写的页(refcount为0的非脏页)\n // 2、本次同步回写的页\n // 3、本次或者上次异步回写完成的页(上一次异步写的页可能在本次回收才最终完成)\n if (unlikely(PageTransHuge(page)))\n destroy_compound_page(page);\n else\n // 将page添加到free_pages链表等待回收\n list_add(&page->lru, &free_pages);\n continue;\n\nactivate_locked_split:\n /*\n * The tail pages that are failed to add into swap cache\n * reach here. Fixup nr_scanned and nr_pages.\n */\n if (nr_pages > 1) {\n sc->nr_scanned -= (nr_pages - 1);\n nr_pages = 1;\n }\nactivate_locked:\n // page被“锁”在内存中,不允许回收。\n /* Not a candidate for swapping, so reclaim swap space. */\n // 如果page已经被放置到swap区中,则尝试在swapcache中释放本页\n if (PageSwapCache(page) && (mem_cgroup_swap_full(page) ||\n PageMlocked(page)))\n try_to_free_swap(page);\n VM_BUG_ON_PAGE(PageActive(page), page);\n // 如果该page最近被访问过,则将其设置成active,后续会放到活动链表中\n if (!PageMlocked(page)) {\n int type = page_is_file_lru(page);\n SetPageActive(page);\n stat->nr_activate[type] += nr_pages;\n count_memcg_page_event(page, PGACTIVATE);\n }\nkeep_locked:\n unlock_page(page);\nkeep:\n // 将page添加到ret_pages链表中\n list_add(&page->lru, &ret_pages);\n VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);\n }\n\n pgactivate = stat->nr_activate[0] + stat->nr_activate[1];\n\n mem_cgroup_uncharge_list(&free_pages);\n try_to_unmap_flush();\n // 将所有page释放给伙伴系统的pcplist(order为0)中\n free_unref_page_list(&free_pages);\n\n // 将ret_pages和page_list链表合并,也就是本次无法回收的页面,需要继续\n // 放回到lru链表中。\n list_splice(&ret_pages, page_list);\n count_vm_events(PGACTIVATE, pgactivate);\n\n // 返回最终成功回收的页面数\n return nr_reclaimed;\n}\n```\n\nlru页框回收的核心函数,流程非常复杂,下面总结下关键流程:\n\n
\n\n\n\n
\n\n[https://zhuanlan.zhihu.com/p/696381207](https://zhuanlan.zhihu.com/p/696381207)
\n -->