diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 1e6d34bfa094..d754b2dfbf2d 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -104,5 +104,40 @@ static inline void page_cgroup_init(void) { } +#endif + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP +#include +extern struct mem_cgroup * +swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem); +extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); +#else +#include + +static inline +struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) +{ + return NULL; +} + +static inline +struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) +{ + return NULL; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + #endif #endif diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index df1e54a5ed19..685e7c8e1fd6 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -8,6 +8,7 @@ #include #include #include +#include static void __meminit __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn) @@ -270,3 +271,199 @@ void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) } #endif + + +#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP + +static DEFINE_MUTEX(swap_cgroup_mutex); +struct swap_cgroup_ctrl { + struct page **map; + unsigned long length; +}; + +struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; + +/* + * This 8bytes seems big..maybe we can reduce this when we can use "id" for + * cgroup rather than pointer. + */ +struct swap_cgroup { + struct mem_cgroup *val; +}; +#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) +#define SC_POS_MASK (SC_PER_PAGE - 1) + +/* + * SwapCgroup implements "lookup" and "exchange" operations. + * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge + * against SwapCache. At swap_free(), this is accessed directly from swap. + * + * This means, + * - we have no race in "exchange" when we're accessed via SwapCache because + * SwapCache(and its swp_entry) is under lock. + * - When called via swap_free(), there is no user of this entry and no race. + * Then, we don't need lock around "exchange". + * + * TODO: we can push these buffers out to HIGHMEM. + */ + +/* + * allocate buffer for swap_cgroup. + */ +static int swap_cgroup_prepare(int type) +{ + struct page *page; + struct swap_cgroup_ctrl *ctrl; + unsigned long idx, max; + + if (!do_swap_account) + return 0; + ctrl = &swap_cgroup_ctrl[type]; + + for (idx = 0; idx < ctrl->length; idx++) { + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto not_enough_page; + ctrl->map[idx] = page; + } + return 0; +not_enough_page: + max = idx; + for (idx = 0; idx < max; idx++) + __free_page(ctrl->map[idx]); + + return -ENOMEM; +} + +/** + * swap_cgroup_record - record mem_cgroup for this swp_entry. + * @ent: swap entry to be recorded into + * @mem: mem_cgroup to be recorded + * + * Returns old value at success, NULL at failure. + * (Of course, old value can be NULL.) + */ +struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) +{ + int type = swp_type(ent); + unsigned long offset = swp_offset(ent); + unsigned long idx = offset / SC_PER_PAGE; + unsigned long pos = offset & SC_POS_MASK; + struct swap_cgroup_ctrl *ctrl; + struct page *mappage; + struct swap_cgroup *sc; + struct mem_cgroup *old; + + if (!do_swap_account) + return NULL; + + ctrl = &swap_cgroup_ctrl[type]; + + mappage = ctrl->map[idx]; + sc = page_address(mappage); + sc += pos; + old = sc->val; + sc->val = mem; + + return old; +} + +/** + * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry + * @ent: swap entry to be looked up. + * + * Returns pointer to mem_cgroup at success. NULL at failure. + */ +struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) +{ + int type = swp_type(ent); + unsigned long offset = swp_offset(ent); + unsigned long idx = offset / SC_PER_PAGE; + unsigned long pos = offset & SC_POS_MASK; + struct swap_cgroup_ctrl *ctrl; + struct page *mappage; + struct swap_cgroup *sc; + struct mem_cgroup *ret; + + if (!do_swap_account) + return NULL; + + ctrl = &swap_cgroup_ctrl[type]; + mappage = ctrl->map[idx]; + sc = page_address(mappage); + sc += pos; + ret = sc->val; + return ret; +} + +int swap_cgroup_swapon(int type, unsigned long max_pages) +{ + void *array; + unsigned long array_size; + unsigned long length; + struct swap_cgroup_ctrl *ctrl; + + if (!do_swap_account) + return 0; + + length = ((max_pages/SC_PER_PAGE) + 1); + array_size = length * sizeof(void *); + + array = vmalloc(array_size); + if (!array) + goto nomem; + + memset(array, 0, array_size); + ctrl = &swap_cgroup_ctrl[type]; + mutex_lock(&swap_cgroup_mutex); + ctrl->length = length; + ctrl->map = array; + if (swap_cgroup_prepare(type)) { + /* memory shortage */ + ctrl->map = NULL; + ctrl->length = 0; + vfree(array); + mutex_unlock(&swap_cgroup_mutex); + goto nomem; + } + mutex_unlock(&swap_cgroup_mutex); + + printk(KERN_INFO + "swap_cgroup: uses %ld bytes of vmalloc for pointer array space" + " and %ld bytes to hold mem_cgroup pointers on swap\n", + array_size, length * PAGE_SIZE); + printk(KERN_INFO + "swap_cgroup can be disabled by noswapaccount boot option.\n"); + + return 0; +nomem: + printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); + printk(KERN_INFO + "swap_cgroup can be disabled by noswapaccount boot option\n"); + return -ENOMEM; +} + +void swap_cgroup_swapoff(int type) +{ + int i; + struct swap_cgroup_ctrl *ctrl; + + if (!do_swap_account) + return; + + mutex_lock(&swap_cgroup_mutex); + ctrl = &swap_cgroup_ctrl[type]; + if (ctrl->map) { + for (i = 0; i < ctrl->length; i++) { + struct page *page = ctrl->map[i]; + if (page) + __free_page(page); + } + vfree(ctrl->map); + ctrl->map = NULL; + ctrl->length = 0; + } + mutex_unlock(&swap_cgroup_mutex); +} + +#endif diff --git a/mm/swapfile.c b/mm/swapfile.c index ddc6d92be2cb..1e7a715a3866 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -33,6 +33,7 @@ #include #include #include +#include static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; @@ -1494,6 +1495,9 @@ asmlinkage long sys_swapoff(const char __user * specialfile) spin_unlock(&swap_lock); mutex_unlock(&swapon_mutex); vfree(swap_map); + /* Destroy swap account informatin */ + swap_cgroup_swapoff(type); + inode = mapping->host; if (S_ISBLK(inode->i_mode)) { struct block_device *bdev = I_BDEV(inode); @@ -1811,6 +1815,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) } swap_map[page_nr] = SWAP_MAP_BAD; } + + error = swap_cgroup_swapon(type, maxpages); + if (error) + goto bad_swap; + nr_good_pages = swap_header->info.last_page - swap_header->info.nr_badpages - 1 /* header page */; @@ -1882,6 +1891,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) bd_release(bdev); } destroy_swap_extents(p); + swap_cgroup_swapoff(type); bad_swap_2: spin_lock(&swap_lock); p->swap_file = NULL;