zsmalloc: use class->pages_per_zspage

There is no need to recalcurate pages_per_zspage in runtime.  Just use
class->pages_per_zspage to avoid unnecessary runtime overhead.

Signed-off-by: Minchan Kim <minchan@kernel.org>
Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Minchan Kim 2015-09-08 15:04:49 -07:00 committed by Linus Torvalds
parent ad9d5e175a
commit 6cbf16b3b6
1 changed files with 2 additions and 3 deletions

View File

@ -1723,7 +1723,7 @@ static unsigned long zs_can_compact(struct size_class *class)
obj_wasted /= get_maxobj_per_zspage(class->size, obj_wasted /= get_maxobj_per_zspage(class->size,
class->pages_per_zspage); class->pages_per_zspage);
return obj_wasted * get_pages_per_zspage(class->size); return obj_wasted * class->pages_per_zspage;
} }
static void __zs_compact(struct zs_pool *pool, struct size_class *class) static void __zs_compact(struct zs_pool *pool, struct size_class *class)
@ -1761,8 +1761,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
putback_zspage(pool, class, dst_page); putback_zspage(pool, class, dst_page);
if (putback_zspage(pool, class, src_page) == ZS_EMPTY) if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
pool->stats.pages_compacted += pool->stats.pages_compacted += class->pages_per_zspage;
get_pages_per_zspage(class->size);
spin_unlock(&class->lock); spin_unlock(&class->lock);
cond_resched(); cond_resched();
spin_lock(&class->lock); spin_lock(&class->lock);