slub: Use page variable instead of c->page.
Store the value of c->page to avoid additional fetches
from per cpu data.
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
diff --git a/mm/slub.c b/mm/slub.c
index 2389a01..6b60fc9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2208,6 +2208,7 @@
unsigned long addr, struct kmem_cache_cpu *c)
{
void *freelist;
+ struct page *page;
unsigned long flags;
local_irq_save(flags);
@@ -2220,13 +2221,14 @@
c = this_cpu_ptr(s->cpu_slab);
#endif
- if (!c->page)
+ page = c->page;
+ if (!page)
goto new_slab;
redo:
if (unlikely(!node_match(c, node))) {
stat(s, ALLOC_NODE_MISMATCH);
- deactivate_slab(s, c->page, c->freelist);
+ deactivate_slab(s, page, c->freelist);
c->page = NULL;
c->freelist = NULL;
goto new_slab;
@@ -2239,7 +2241,7 @@
stat(s, ALLOC_SLOWPATH);
- freelist = get_freelist(s, c->page);
+ freelist = get_freelist(s, page);
if (!freelist) {
c->page = NULL;
@@ -2264,8 +2266,8 @@
new_slab:
if (c->partial) {
- c->page = c->partial;
- c->partial = c->page->next;
+ page = c->page = c->partial;
+ c->partial = page->next;
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
@@ -2281,14 +2283,15 @@
return NULL;
}
+ page = c->page;
if (likely(!kmem_cache_debug(s)))
goto load_freelist;
/* Only entered in the debug case */
- if (!alloc_debug_processing(s, c->page, freelist, addr))
+ if (!alloc_debug_processing(s, page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */
- deactivate_slab(s, c->page, get_freepointer(s, freelist));
+ deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
local_irq_restore(flags);