About __free_pages()

 __free_pages() first checks whether a single page or a larger contiguous block is to be freed.

If a single page is freed, it is not returned to buddy system, but is placed in the per-CPU cache ---in the warm list for all pages that are highly likely to reside in the CPU cache. free_hot_clod_page() do this work.

If more than one page is freed, __free_pages() delegates work to __free_page_ok() and finally to free_one_page().


void __free_pages(struct page *page, unsigned int order)
{
    if (put_page_testzero(page)) {
        if (order == 0)
            free_hot_cold_page(page, 0);
        else
            __free_pages_ok(page, order);
    }
}



/*
 * Free a 0-order page
 * cold == 1 ? free a cold page : free a hot page
 */
void free_hot_cold_page(struct page *page, int cold)
{
    struct zone *zone = page_zone(page);
    struct per_cpu_pages *pcp;
    unsigned long flags;
    int migratetype;
    int wasMlocked = __TestClearPageMlocked(page);

    if (!free_pages_prepare(page, 0))
        return;

    migratetype = get_pageblock_migratetype(page);
    set_page_private(page, migratetype);
    local_irq_save(flags);
    if (unlikely(wasMlocked))
        free_page_mlock(page);
    __count_vm_event(PGFREE);

    /*
     * We only track unmovable, reclaimable and movable on pcp lists.
     * Free ISOLATE pages back to the allocator because they are being
     * offlined but treat RESERVE as movable pages so we can get those
     * areas back if necessary. Otherwise, we may have to free
     * excessively into the page allocator
     */
    if (migratetype >= MIGRATE_PCPTYPES) {
        if (unlikely(migratetype == MIGRATE_ISOLATE)) {
            free_one_page(zone, page, 0, migratetype);
            goto out;
        }
        migratetype = MIGRATE_MOVABLE;
    }

    pcp = &this_cpu_ptr(zone->pageset)->pcp;
    if (cold)
        list_add_tail(&page->lru, &pcp->lists[migratetype]);
    else
        list_add(&page->lru, &pcp->lists[migratetype]);
    pcp->count++;
    if (pcp->count >= pcp->high) {
        free_pcppages_bulk(zone, pcp->batch, pcp);
        pcp->count -= pcp->batch;
    }

out:
    local_irq_restore(flags);
}



__free_pages_ok() is cornerstone of memory freeing.

static void __free_pages_ok(struct page *page, unsigned int order)
{
    unsigned long flags;
    int wasMlocked = __TestClearPageMlocked(page);

    if (!free_pages_prepare(page, order))
        return;

    local_irq_save(flags);
    if (unlikely(wasMlocked))
        free_page_mlock(page);
    __count_vm_events(PGFREE, 1 << order);
    free_one_page(page_zone(page), page, order,
                    get_pageblock_migratetype(page));
    local_irq_restore(flags);
}


作者: letmego163   发布时间: 2010-10-19