How to swap out the anonymous page?


 匿名页被换出,以增加空闲内存的数量, 页面被换出,首先需要为非活跃(inactive)状态。当处于inactive状态的页面被选中换出时,其逻辑集中体现在shrink_page_list函数中。
 762 /*
 763  * shrink_page_list() returns the number of reclaimed pages
 764  */
 765 static unsigned long shrink_page_list(struct list_head *page_list,
 766                                       struct mem_cgroup_zone *mz,
 767                                       struct scan_control *sc,
 768                                       int priority,
 769                                       unsigned long *ret_nr_dirty,
 770                                       unsigned long *ret_nr_writeback)
 771 {
 772         LIST_HEAD(ret_pages);
 773         LIST_HEAD(free_pages);
 774         int pgactivate = 0;
 775         unsigned long nr_dirty = 0;
 776         unsigned long nr_congested = 0;
 777         unsigned long nr_reclaimed = 0;
 778         unsigned long nr_writeback = 0;
 779
 780         cond_resched();
 781
 782         while (!list_empty(page_list)) {
 783                 enum page_references references;
 784                 struct address_space *mapping;
 785                 struct page *page;
 786                 int may_enter_fs;
 787
 788                 cond_resched();
 789
 790                 page = lru_to_page(page_list);
 791                 list_del(&page->lru);
 792
 793                 if (!trylock_page(page))
 794                         goto keep;
 795
 796                 VM_BUG_ON(PageActive(page));
 797                 VM_BUG_ON(page_zone(page) != mz->zone);
 798
 799                 sc->nr_scanned++;
 800
 801                 if (unlikely(!page_evictable(page, NULL)))
 802                         goto cull_mlocked;
 803
 804                 if (!sc->may_unmap && page_mapped(page))
 805                         goto keep_locked;
 806
 807                 /* Double the slab pressure for mapped and swapcache pages */
 808                 if (page_mapped(page) || PageSwapCache(page))
 809                         sc->nr_scanned++;
 810
 811                 may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
 812                         (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 813
 814                 if (PageWriteback(page)) {
 815                         nr_writeback++;
 816                         /*
 817                          * Synchronous reclaim cannot queue pages for
 818                          * writeback due to the possibility of stack overflow
 819                          * but if it encounters a page under writeback, wait
 820                          * for the IO to complete.
 821                          */
 822                         if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
 823                             may_enter_fs)
 824                                 wait_on_page_writeback(page);
 825                         else {
 826                                 unlock_page(page);
 827                                 goto keep_lumpy;
 828                         }
 829                 }
 830
 831                 references = page_check_references(page, mz, sc);
 832                 switch (references) {
 833                 case PAGEREF_ACTIVATE:
 834                         goto activate_locked;
 835                 case PAGEREF_KEEP:
 836                         goto keep_locked;
 837                 case PAGEREF_RECLAIM:
 838                 case PAGEREF_RECLAIM_CLEAN:
 839                         ; /* try to reclaim the page below */
 840                 }
 841
 842                 /*
 843                  * Anonymous process memory has backing store?
 844                  * Try to allocate it some swap space here.
 845                  */
 846                 if (PageAnon(page) && !PageSwapCache(page)) {
 847                         if (!(sc->gfp_mask & __GFP_IO))
 848                                 goto keep_locked;
 849                         if (!add_to_swap(page))
 850                                 goto activate_locked;
 851                         may_enter_fs = 1;
 852                 }
 853
 854                 mapping = page_mapping(page);
 855
 856                 /*
 857                  * The page is mapped into the page tables of one or more
 858                  * processes. Try to unmap it here.
 859                  */
 860                 if (page_mapped(page) && mapping) {
 861                         switch (try_to_unmap(page, TTU_UNMAP)) {
 862                         case SWAP_FAIL:
 863                                 goto activate_locked;
 864                         case SWAP_AGAIN:
 865                                 goto keep_locked;
 866                         case SWAP_MLOCK:
 867                                 goto cull_mlocked;
 868                         case SWAP_SUCCESS:
 869                                 ; /* try to free the page below */
 870                         }
 871                 }
 872
 873                 if (PageDirty(page)) {
 874                         nr_dirty++;
 875
 876                         /*
 877                          * Only kswapd can writeback filesystem pages to
 878                          * avoid risk of stack overflow but do not writeback
 879                          * unless under significant pressure.
 880                          */
 881                         if (page_is_file_cache(page) &&
 882                                         (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) {
 883                                 /*
 884                                  * Immediately reclaim when written back.
 885                                  * Similar in principal to deactivate_page()
 886                                  * except we already have the page isolated
 887                                  * and know it's dirty
 888                                  */
 889                                 inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
 890                                 SetPageReclaim(page);
 891
 892                                 goto keep_locked;
 893                         }
 894
 895                         if (references == PAGEREF_RECLAIM_CLEAN)
 896                                 goto keep_locked;
 897                         if (!may_enter_fs)
 898                                 goto keep_locked;
 899                         if (!sc->may_writepage)
 900                                 goto keep_locked;
 901
 902                         /* Page is dirty, try to write it out here */
 903                         switch (pageout(page, mapping, sc)) {
 904                         case PAGE_KEEP:
 905                                 nr_congested++;
 906                                 goto keep_locked;
 907                         case PAGE_ACTIVATE:
 908                                 goto activate_locked;
 909                         case PAGE_SUCCESS:
 910                                 if (PageWriteback(page))
 911                                         goto keep_lumpy;
 912                                 if (PageDirty(page))
 913                                         goto keep;
 914
 915                                 /*
 916                                  * A synchronous write - probably a ramdisk.  Go
 917                                  * ahead and try to reclaim the page.
 918                                  */
 919                                 if (!trylock_page(page))
 920                                         goto keep;
 921                                 if (PageDirty(page) || PageWriteback(page))
 922                                         goto keep_locked;
 923                                 mapping = page_mapping(page);
 924                         case PAGE_CLEAN:
 925                                 ; /* try to free the page below */
 926                         }
 927                 }
 928
 929                 /*
 930                  * If the page has buffers, try to free the buffer mappings
 931                  * associated with this page. If we succeed we try to free
 932                  * the page as well.
 933                  *
 934                  * We do this even if the page is PageDirty().
 935                  * try_to_release_page() does not perform I/O, but it is
 936                  * possible for a page to have PageDirty set, but it is actually
 937                  * clean (all its buffers are clean).  This happens if the
 938                  * buffers were written out directly, with submit_bh(). ext3
 939                  * will do this, as well as the blockdev mapping.
 940                  * try_to_release_page() will discover that cleanness and will
 941                  * drop the buffers and mark the page clean - it can be freed.
 942                  *
 943                  * Rarely, pages can have buffers and no ->mapping.  These are
 944                  * the pages which were not successfully invalidated in
 945                  * truncate_complete_page().  We try to drop those buffers here
 946                  * and if that worked, and the page is no longer mapped into
 947                  * process address space (page_count == 1) it can be freed.
 948                  * Otherwise, leave the page on the LRU so it is swappable.
 949                  */
 950                 if (page_has_private(page)) {
 951                         if (!try_to_release_page(page, sc->gfp_mask))
 952                                 goto activate_locked;
 953                         if (!mapping && page_count(page) == 1) {
 954                                 unlock_page(page);
 955                                 if (put_page_testzero(page))
 956                                         goto free_it;
 957                                 else {
 958                                         /*
 959                                          * rare race with speculative reference.
 960                                          * the speculative reference will free
 961                                          * this page shortly, so we may
 962                                          * increment nr_reclaimed here (and
 963                                          * leave it off the LRU).
 964                                          */
 965                                         nr_reclaimed++;
 966                                         continue;
 967                                 }
 968                         }
 969                 }
 970
 971                 if (!mapping || !__remove_mapping(mapping, page))
 972                         goto keep_locked;
 973
 974                 /*
 975                  * At this point, we have no other references and there is
 976                  * no way to pick any more up (removed from LRU, removed
 977                  * from pagecache). Can use non-atomic bitops now (and
 978                  * we obviously don't have to worry about waking up a process
 979                  * waiting on the page lock, because there are no references.
 980                  */
 981                 __clear_page_locked(page);
 982 free_it:
 983                 nr_reclaimed++;
 984
 985                 /*
 986                  * Is there need to periodically free_page_list? It would
 987                  * appear not as the counts should be low
 988                  */
 989                 list_add(&page->lru, &free_pages);
 990                 continue;
 991
 992 cull_mlocked:
 993                 if (PageSwapCache(page))
 994                         try_to_free_swap(page);
 995                 unlock_page(page);
 996                 putback_lru_page(page);
 997                 reset_reclaim_mode(sc);
 998                 continue;
 999
1000 activate_locked:
1001                 /* Not a candidate for swapping, so reclaim swap space. */
1002                 if (PageSwapCache(page) && vm_swap_full())
1003                         try_to_free_swap(page);
1004                 VM_BUG_ON(PageActive(page));
1005                 SetPageActive(page);
1006                 pgactivate++;
1007 keep_locked:
1008                 unlock_page(page);
1009 keep:
1010                 reset_reclaim_mode(sc);
1011 keep_lumpy:
1012                 list_add(&page->lru, &ret_pages);
1013                 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
1014         }
1015
1016         /*
1017          * Tag a zone as congested if all the dirty pages encountered were
1018          * backed by a congested BDI. In this case, reclaimers should just
1019          * back off and wait for congestion to clear because further reclaim
1020          * will encounter the same problem
1021          */
1022         if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
1023                 zone_set_flag(mz->zone, ZONE_CONGESTED);
1024
1025         free_hot_cold_page_list(&free_pages, 1);
1026
1027         list_splice(&ret_pages, page_list);
1028         count_vm_events(PGACTIVATE, pgactivate);
1029         *ret_nr_dirty += nr_dirty;
1030         *ret_nr_writeback += nr_writeback;
1031         return nr_reclaimed;
1032 }

匿名页换出的步骤是:
1.  该匿名页是否在swap cache中。如果不在,则加入到swap cache. (Line842~852).加入到swap cache所做的事情是:将该页插入到swap cache中,并在swap area中分配一个swap slot,设置page->flgasPG_dirty.如果在这个过程中出错,则将该页插入到活跃链表上(Line 850)
2.  若该匿名页仍然被其他进程映射(Line 860),那么就通过try_to_unmap()解除映射(Line 861). Try_to_unmap()函数的作用正如其注释中所说:”try_to_unmap - try to remove all page table mappings to a page”.
3.  当该匿名页的PG_dirty被设置(Line 873),说明该页需要写到块设备上,就需要调用pageout()将该页的内容写出到磁盘上(Line 142)
4.  所有的匿名也在写到外部块设备后,该页就被回收了。那么回收之前,需要先从swap cache中删掉。为什么要从swap cache中删掉呢?因为页在swap cache中待着就是告诉其他进程,你需要从swap area中,换进该页的时候,该页已经被其他进程换入了,你用我这个就可以啦,不用做无用功了。因此,当该页要被回收掉重新分配做其他用途的时候,就需要从swap cache中删除(Line 210)
5.  一切就绪,将该匿名也加入到free_pages队列中,等待被回收到伙伴系统中。

相关讨论,可参考:
1.       http://www.spinics.net/lists/kernel/msg1485370.html

评论

此博客中的热门博文

提交了30次才AC ---【附】POJ 2488解题报告

n个进程共享m个资源得死锁问题证明